repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
JoakimLindbom/ago
|
scheduler/scheduler.py
|
Python
|
gpl-3.0
| 7,085
| 0.005222
|
#!/usr/bin/python
AGO_SCHEDULER_VERSION = '0.0.1'
############################################
"""
Basic class for device and device group schedule
"""
__author__ = "Joakim Lindbom"
__copyright__ = "Copyright 2017, Joakim Lindbom"
__date__ = "2017-01-27"
__credits__ = ["Joakim Lindbom", "The ago control team"]
__license__ = "GPL Public License Version 3"
__maintainer__ = "Joakim Lindbom"
__email__ = 'Joakim.Lindbom@gmail.com'
__status__ = "Experimental"
__version__ = AGO_SCHEDULER_VERSION
############################################
import time
from datetime import date, datetime
import sys
import json
all_days = {"mo", "tu", "we", "th", "fr", "sa", "su"}
class Scheduler:
def __init__(self, app):
self.rules = None
self.schedules = []
self.log = None
self.app = app
try:
self.log = app.log
except AttributeError:
#We seem to be in test mode, need a local logger
self.log = llog()
def parseJSON(self, filename):
with open(filename) as schedule_file:
schedule = json.load(schedule_file)
self.log.info("JSON file: {}".format(schedule))
if "rules" in schedule:
self.rules = Rules(schedule["rules"])
if "items" in schedule:
self.schedules = Schedules(schedule["items"], self.rules)
def new_day(self, weekday):
""" Load the schedules for the new day
E.g. called when it's 00:00
"""
self.schedules.weekday = weekday
class Schedules:
def __init__(self, jsonstr, rules):
self.schedules = []
self.activities = []
self.weekday = None
for element in jsonstr:
# self.log.trace(element)
item = Schedule(element, rules)
self.schedules.append(item)
# print item
def find(self, uuid):
rule = None
for r in self.rules:
if r.uuid == uuid:
rule = r
return rule
@property
def weekday(self):
"""Weekday property."""
print "getter of weekday called"
return self._weekday
@weekday.setter
def weekday(self, day):
print "setter of weekday called"
if day not in all_days:
raise ValueError
if self._weekday != day:
self.new_day(day)
self._weekday = day
def new_day(self, weekday):
self.activities = []
for s in self.schedules:
if weekday in s.days:
#found a day to include
self.activities.append(s)
print self.activities
print " "
class Schedule:
def __init__(self, jsonstr, rules=None):
self.device = None
self.scenario = None
self.group = None
if "device" in jsonstr:
self.device= jsonstr["device"]
if "scenario" in jsonstr:
self.scenario = jsonstr["scenario"]
if "group-uuid" in jsonstr:
self.group = jsonstr["group"]
self.enabled = jsonstr["enabled"]
self.schedules = {}
seq = 0
for a in jsonstr["actions"]:
seq += 1
x = {"action": a["action"], # On/Off/Run etc
"time": a["time"],
"enabled": a["enabled"]}
if "days" in a:
if a["days"] == "weekdays":
x["days"] = ["mo", "tu", "we", "th", "fr"]
elif a["days"] == "weekends":
x["days"] = ["sa", "su"]
elif a["days"] == "all":
x["days"] = ["mo", "tu", "we", "th", "fr", "sa", "su"]
else:
x["days"] = a["days"]
if "level" in a:
x["level"] = a["level"]
if "tolevel" in a:
x["tolevel"] = a["tolevel"]
if "endtime" in a:
x["endtime"] = a["endtime"]
if "seq" in a:
x["seq"] = a["seq"]
if "rule" in a:
x["rule-uuid"] = a["rule"]
x["rule"] = rules.find(a["rule"])
#print x["rule"]
self.schedules[seq] = x
#print (seq, self.schedules[seq])
def __str__(self):
s = "Schedule: "
if self.device is not None:
s += "Device {}".format(self.device)
if self.scenario is not None:
s += "Scenario {}".format(self.scenario)
if self.group is not None:
s += "Group {}".format(self.group)
s += "Enaled" if self.enabled else "Disabled"
s += "# schedules: {}".format(len(self.schedules))
return s
class Rules:
def __init__(self, jsonstr):
self.rules = []
for element in jsonstr:
# self.log.trace(element)
rule = Rule(element)
self.rules.append(rule)
#print rule
def find(self, uuid):
rule = None
for r in self.rules:
if r.uuid == uuid:
rule = r
return rule
class Rule:
def __init__(self, jsonstr):
self.name = jsonstr["name"]
self.uuid = jsonstr["uuid"]
self.rules = {}
#print self.name
seq = 0
for r in jsonstr["rules"]:
seq += 1
x = {"type": r["type"],
"variable": r["variable"],
"operator": r["operator
|
"],
"value": r["value"]}
#print x
self.rules[seq] = x
#print (seq, self.rules[seq])
def __str__(self):
"""Return a string representing content f the Rule object"""
s = "na
|
me={}, uuid={}, type={}, # rules: {} ".format(self.name, self.uuid, self.type, len(self.rules))
return s
def execute(self):
results = []
for k, r in self.rules.iteritems():
if r["type"] == "variable check":
if r["variable"] == "HouseMode":
vv = "At home" # TODO: Get variable from inventory using r["variable"]
if r["variable"] == "test":
vv = "True"
if r["operator"] == 'eq':
if vv == r["value"]:
results.append(True)
else:
results.append(False)
return False
if r["operator"] == 'lt':
if vv < r["value"]:
results.append(True)
else:
results.append(False)
return False
return True
return True
def addlog(self, log):
self.log = log
class Days:
def __init__(self):
pass
class llog:
def __init__(self):
pass
def info(self, msg):
print ("INFO: %s" % msg)
def trace(self, msg):
print ("TRACE: %s" % msg)
def debug(self, msg):
print ("DEBUG: %s" % msg)
def error(self, msg):
print ("ERROR: %s" % msg)
|
nharrer/kodi-update-movie-dateadded
|
update_movie_dateadded.py
|
Python
|
mit
| 5,104
| 0.005094
|
import sys
import os
import re
import time
import datetime
from contextlib import closing
# ---------------------------------------------------
# Settings
# ---------------------------------------------------
# IMPORTANT: In the standard case (sqlite3) just point this to your own MyVideos database.
DATABASE_PATH = os.path.join(r"C:\Users\<Your User>\AppData\Roaming\Kodi\userdata\Database", 'MyVideos93.db')
# Or if you're using MySQL as a database, change MYSQL to True and change the other MySQL settings accordingly.
# Also make sure to install the MySQL python package: https://pypi.python.org/pypi/MySQL-python/1.2.5
MYSQL = False
MYSQL_USER = "kodi"
MYSQL_PASS = "kodi"
MYSQL_SERVER = "localhost"
MYSQL_DATABASE = "MyVideos93"
# Set this to True to get more verbose messages
VERBOSE = False
# ---------------------------------------------------
# Constants
# ---------------------------------------------------
stack_regex = re.compile("stack://(.*?)\s*[,$]")
date_format = '%Y-%m-%d %H:%M:%S'
col_id = 'idFile'
col_filename = 'strFileName'
col_path = 'strPath'
col_dateadded = 'dateAdded'
# ---------------------------------------------------
# Functions
# ---------------------------------------------------
def die(message):
sys.stderr.write(message)
sys.exit(-1)
def open_database():
if MYSQL:
import MySQLdb.cursors
connection = MySQLdb.connect(
host=MYSQL_SERVER, # name or ip of mysql server
user=MYSQL_USER, # your username
passwd=MYSQL_PASS, # your password
db=MYSQL_DATABASE, # name of the database
cursorclass=MySQLdb.cursors.DictCursor)
print "DB connection opened to {0}@{1}.".format(MYSQL_DATABASE, MYSQL_SERVER)
else:
try:
from sqlite3 import dbapi2 as sqlite
print "Loading sqlite3 as DB engine"
except:
from pysqlite2 import dbapi2 as sqlite
print "Loading pysqlite2 as DB engine"
connection = sqlite.connect(DATABASE_PATH)
#connection.text_factory = str
connection.text_factory = lambda x: unicode(x, 'utf-8', 'ignore')
connection.row_factory = sqlite.Row
print "DB connection opened to {0}.".format(DATABASE_PATH)
return connection
def check_column(columns, columnname):
if not columnname in columns:
die('Table does not contain column {0}!!'.format(columnname))
def process_row(conn, row):
keys = row.keys()
filename = row[col_filename]
path = row[col_path]
id = row[col_id]
filename = filename.encode('cp1252')
path = path.encode('cp1252')
stack = stack_regex.findall(filename)
if (len(stack) > 0):
fullpath = stack[0]
else:
fullpath = os.path.join(path, filename)
# potential samba fix (not tested)
if fullpath.startswith('smb:'):
fullpath = fullpath[4:]
fullpath = os.path.abspath(fullpath)
if not os.path.isfile(fullpath):
print('File {0} does not exist!'.format(fullpath))
return
lastmod = os.path.getmtime(fullpath)
#if lastmod < 0 or lastmod > 4102444800L:
# lastmod = os.path.getctime(fullpath)
if lastmod < 0 or lastmod > 4102444800L:
print("Ignoring File {0}. Date is out of range (lastmod={1})".format(fullpath, lastmod))
return
lt = time.localtime(lastmod)
dateadded_new = time.strftime(date_format, lt)
dateadded = str(row[col_dateadded])
if dateadded != dateadded_new:
print('idFile {0}: {1} -> {2} ({3})'.f
|
ormat(id, date
|
added, dateadded_new, fullpath))
with closing(conn.cursor()) as cursor:
cursor.execute("UPDATE files SET dateAdded = '{0}' WHERE idFile = {1}".format(dateadded_new, id))
conn.commit()
else:
if VERBOSE:
print('idFile {0}: Date OK. DB date {1} matches file date {2} ({3})'.format(id, dateadded, dateadded_new, fullpath))
# ---------------------------------------------------
# Main
# ---------------------------------------------------
conn = open_database()
viewname_movieview = "movieview"
with closing(conn.cursor()) as cursor:
cursor.execute('SELECT idVersion FROM version')
row = cursor.fetchone()
version = row['idVersion']
if version > 90:
# view name changed after version db 90
viewname_movieview = "movie_view"
with closing(conn.cursor()) as cursor:
cursor.execute('SELECT idMovie, idFile, strFileName, strPath, dateAdded FROM {0} ORDER BY idFile'.format(viewname_movieview))
columns = map(lambda x: x[0], cursor.description)
rows = cursor.fetchall()
check_column(columns, col_id)
check_column(columns, col_filename)
check_column(columns, col_path)
check_column(columns, col_dateadded)
print "Columns checked. They are ok."
for row in rows:
process_row(conn, row)
print "Processed {0} Rows.".format(len(rows))
|
Etzeitet/pythonjournal
|
pythonjournal/proptest.py
|
Python
|
gpl-2.0
| 953
| 0.012592
|
#!/usr/local/bin/python3
class TestClass(object):
def foo():
doc = "The foo property."
def fget(self):
return self._foo
def fset(self, value):
self._foo = value
def fdel(self):
del self._foo
return locals()
foo = property(**foo())
def bar():
doc = "The bar property."
def fget(self):
return self._bar
def fset(self, value):
self._bar = value
def fdel(self):
del self._bar
return locals()
bar = property(**bar())
def __init__(self, foo, bar):
|
self.foo = "foo"
self.bar = "bar"
def test_method(self, attr):
if attr == 1:
prop = self.foo
else:
prop = self.bar
print(prop)
prop = 'TADA!'
tc = TestClass(1,2
|
)
print(tc.foo)
print(tc.bar)
tc.test_method('foo')
#print(tc.foo)
#print(dir(tc))
|
rooi/CouchPotatoServer
|
couchpotato/core/notifications/trakt/main.py
|
Python
|
gpl-3.0
| 1,553
| 0.010947
|
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
log = CPLog(__name__)
class Trakt(Notification):
urls = {
'base': 'http://api.trakt.tv/%s',
'library': 'movie/library/%s',
'unwatchlist': 'movie/unwatchlist/%s',
}
listen_to = ['movie.downloaded']
def notify(self, message = '', data = None, listener = None):
if not data: data =
|
{}
post_data = {
'username': self.conf('automation_username'),
|
'password' : self.conf('automation_password'),
'movies': [{
'imdb_id': data['library']['identifier'],
'title': data['library']['titles'][0]['title'],
'year': data['library']['year']
}] if data else []
}
result = self.call((self.urls['library'] % self.conf('automation_api_key')), post_data)
if self.conf('remove_watchlist_enabled'):
result = result and self.call((self.urls['unwatchlist'] % self.conf('automation_api_key')), post_data)
return result
def call(self, method_url, post_data):
try:
response = self.getJsonData(self.urls['base'] % method_url, data = post_data, cache_timeout = 1)
if response:
if response.get('status') == "success":
log.info('Successfully called Trakt')
return True
except:
pass
log.error('Failed to call trakt, check your login.')
return False
|
jdegene/ArcGIS-scripts
|
SMOS.py
|
Python
|
mit
| 2,615
| 0.008413
|
"""
Uses a folder full of SMOS *.dbl files, converts them with the ESA snap command
line tool pconvert.exe to IMG
Uses then arcpy to to convert IMG to GeoTIFF
and crops them in the process to a specified extent and compresses them
"""
import os, subprocess, shutil
import arcpy
from arcpy import env
from arcpy.sa import *
# folder containing the DBL files
inFol = "D:/Test/SMOS/"
outFol = "D:/Test/SMOStif/"
# .img and tif output folder
imgFol = outFol + "IMGs/"
tifFol = outFol + "Tiffs/"
# ArcGIS Environmnent settings
arcpy.CheckOutExtension("Spatial")
arcpy.env.overwriteOutput = True
arcpy.env.pyramid = "NONE"
arcpy.env.extent = "85 40 125 55" #XMin, YMin, XMax, YMax
arcpy.env.rasterStatistics = 'STATISTICS 1 1'
# create a list of exisiting output Tiffs, these will be skipped
exList = []
for tiff in os.listdir(tifFol):
if tiff[-3:] == "tif":
exList.append(tiff[:-4])
for dblFile in os.listdir(inFol):
if dblFile[:-4] in exList:
continue
else:
#dblFile = "SM_OPER_MIR_SMUDP2_20150715T101051_20150715T110403_620_001_1.DBL"
dblPath = inFol + dblFile
# SNAP's pconvert.exe path
pcon = "C:/Progra~2/snap/bin/pconvert.exe"
# flags -f (format) -b (band) -o (output folder) for pcon
# converting directly to GeoTiff ('tifp' instead of 'dim') does not work with arcpy for whatever reason
options = ['dim', '1', imgFol]
# Start the subprocess with specified arguments
# creationflags=0x08000000 prevents windows from opening console window (goo.gl/vWf46a)
subP = subprocess.Popen([pcon, '-f', options[0], '-b', options[1], '-o', options[2], dblPath],
|
creationflags=0x08000000)
subP.wait()
# console subprocess sometimes throws error and no output is generated -> skip file & print name
try:
raster = Raster(imgFol + dblFile[:-3] + "data/" + "Soil_Moisture.img")
except:
print dblFile[:-3]
continue
# copy raster t
|
o new folder, only honoring above extent, converting to GeoTiff, -999 is nodata
arcpy.CopyRaster_management(raster, tifFol + dblFile[:-3] + "tif", "DEFAULTS","-999", "-999")
# try to delete Files from imgFol (*.data is recognized as folder -> shutil)
for x in os.listdir(imgFol):
try:
if os.path.isdir(imgFol + x):
shutil.rmtree(imgFol + x)
else:
os.remove(imgFol + x)
except:
continue
arcpy.CheckInExtension("Spatial")
|
openprocurement/openprocurement.tender.belowthreshold
|
openprocurement/tender/belowthreshold/tests/award.py
|
Python
|
apache-2.0
| 14,334
| 0.003084
|
# -*- coding: utf-8 -*-
import unittest
from copy import deepcopy
from openprocurement.api.tests.base import snitch
from openprocurement.tender.belowthreshold.adapters import TenderBelowThersholdConfigurator
from openprocurement.tender.belowthreshold.tests.base import (
TenderContentWebTest,
test_bids,
test_lots,
test_organization
)
from openprocurement.tender.belowthreshold.tests.award_blanks import (
# TenderAwardResourceTest
create_tender_award_invalid,
create_tender_award,
patch_tender_award,
patch_tender_award_unsuccessful,
get_tender_award,
patch_tender_award_Administrator_change,
# TenderLotAwardCheckResourceTest
check_tender_award,
# TenderLotAwardResourceTest
create_tender_lot_award,
patch_tender_lot_award,
patch_tender_lot_award_unsuccessful,
# Tender2LotAwardResourceTest
create_tender_lots_award,
patch_tender_lots_award,
# TenderAwardComplaintResourceTest
create_tender_award_complaint_invalid,
create_tender_award_complaint,
patch_tender_award_complaint,
review_tender_award_complaint,
get_tender_award_complaint,
get_tender_award_complaints,
# TenderLotAwardComplaintResourceTest
|
create_tender_lot_award_complaint,
patch_tender_lot_award_complaint,
get_tender_lot_award_complaint,
get_tender_lot_award_complaints,
# Tender2LotAwardComplaintResourceTest
create_tende
|
r_lots_award_complaint,
patch_tender_lots_award_complaint,
# TenderAwardComplaintDocumentResourceTest
not_found,
create_tender_award_complaint_document,
put_tender_award_complaint_document,
patch_tender_award_complaint_document,
# Tender2LotAwardComplaintDocumentResourceTest
create_tender_lots_award_complaint_document,
put_tender_lots_award_complaint_document,
patch_tender_lots_award_complaint_document,
# TenderAwardDocumentResourceTest
not_found_award_document,
create_tender_award_document,
put_tender_award_document,
patch_tender_award_document,
create_award_document_bot,
patch_not_author,
# Tender2LotAwardDocumentResourceTest
create_tender_lots_award_document,
put_tender_lots_award_document,
patch_tender_lots_award_document,
)
class TenderAwardResourceTestMixin(object):
test_create_tender_award_invalid = snitch(create_tender_award_invalid)
test_get_tender_award = snitch(get_tender_award)
test_patch_tender_award_Administrator_change = snitch(patch_tender_award_Administrator_change)
class TenderAwardComplaintResourceTestMixin(object):
test_create_tender_award_complaint_invalid = snitch(create_tender_award_complaint_invalid)
test_get_tender_award_complaint = snitch(get_tender_award_complaint)
test_get_tender_award_complaints = snitch(get_tender_award_complaints)
class TenderAwardDocumentResourceTestMixin(object):
test_not_found_award_document = snitch(not_found_award_document)
test_create_tender_award_document = snitch(create_tender_award_document)
test_put_tender_award_document = snitch(put_tender_award_document)
test_patch_tender_award_document = snitch(patch_tender_award_document)
test_create_award_document_bot = snitch(create_award_document_bot)
test_patch_not_author = snitch(patch_not_author)
class TenderAwardComplaintDocumentResourceTestMixin(object):
test_not_found = snitch(not_found)
test_create_tender_award_complaint_document = snitch(create_tender_award_complaint_document)
test_put_tender_award_complaint_document = snitch(put_tender_award_complaint_document)
class TenderLotAwardCheckResourceTestMixin(object):
test_check_tender_award = snitch(check_tender_award)
class Tender2LotAwardDocumentResourceTestMixin(object):
test_create_tender_lots_award_document = snitch(create_tender_lots_award_document)
test_put_tender_lots_award_document = snitch(put_tender_lots_award_document)
test_patch_tender_lots_award_document = snitch(patch_tender_lots_award_document)
class TenderAwardResourceTest(TenderContentWebTest, TenderAwardResourceTestMixin):
initial_status = 'active.qualification'
initial_bids = test_bids
test_create_tender_award = snitch(create_tender_award)
test_patch_tender_award = snitch(patch_tender_award)
test_patch_tender_award_unsuccessful = snitch(patch_tender_award_unsuccessful)
class TenderLotAwardCheckResourceTest(TenderContentWebTest, TenderLotAwardCheckResourceTestMixin):
initial_status = 'active.auction'
initial_lots = test_lots
initial_bids = deepcopy(test_bids)
initial_bids.append(deepcopy(test_bids[0]))
initial_bids[1]['tenderers'][0]['name'] = u'Не зовсім Державне управління справами'
initial_bids[1]['tenderers'][0]['identifier']['id'] = u'88837256'
initial_bids[2]['tenderers'][0]['name'] = u'Точно не Державне управління справами'
initial_bids[2]['tenderers'][0]['identifier']['id'] = u'44437256'
reverse = TenderBelowThersholdConfigurator.reverse_awarding_criteria
awarding_key = TenderBelowThersholdConfigurator.awarding_criteria_key
def setUp(self):
super(TenderLotAwardCheckResourceTest, self).setUp()
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/tenders/{}/auction'.format(self.tender_id))
auction_bids_data = response.json['data']['bids']
for lot_id in self.initial_lots:
response = self.app.post_json('/tenders/{}/auction/{}'.format(self.tender_id, lot_id['id']),
{'data': {'bids': auction_bids_data}})
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, 'application/json')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.json['data']['status'], "active.qualification")
class TenderLotAwardResourceTest(TenderContentWebTest):
initial_status = 'active.qualification'
initial_lots = test_lots
initial_bids = test_bids
test_create_tender_lot_award = snitch(create_tender_lot_award)
test_patch_tender_lot_award = snitch(patch_tender_lot_award)
test_patch_tender_lot_award_unsuccessful = snitch(patch_tender_lot_award_unsuccessful)
class Tender2LotAwardResourceTest(TenderContentWebTest):
initial_status = 'active.qualification'
initial_lots = 2 * test_lots
initial_bids = test_bids
test_create_tender_lots_award = snitch(create_tender_lots_award)
test_patch_tender_lots_award = snitch(patch_tender_lots_award)
class TenderAwardComplaintResourceTest(TenderContentWebTest, TenderAwardComplaintResourceTestMixin):
initial_status = 'active.qualification'
initial_bids = test_bids
def setUp(self):
super(TenderAwardComplaintResourceTest, self).setUp()
# Create award
auth = self.app.authorization
self.app.authorization = ('Basic', ('token', ''))
response = self.app.post_json('/tenders/{}/awards'.format(
self.tender_id), {'data': {'suppliers': [test_organization], 'status': 'pending', 'bid_id': self.initial_bids[0]['id']}})
award = response.json['data']
self.award_id = award['id']
self.app.authorization = auth
test_create_tender_award_complaint = snitch(create_tender_award_complaint)
test_patch_tender_award_complaint = snitch(patch_tender_award_complaint)
test_review_tender_award_complaint = snitch(review_tender_award_complaint)
class TenderLotAwardComplaintResourceTest(TenderContentWebTest):
initial_status = 'active.qualification'
initial_lots = test_lots
initial_bids = test_bids
def setUp(self):
super(TenderLotAwardComplaintResourceTest, self).setUp()
# Create award
auth = self.app.authorization
self.app.authorization = ('Basic', ('token', ''))
bid = self.initial_bids[0]
response = self.app.post_json('/tenders/{}/awards'.format(
self.tender_id), {'data': {'suppliers': [test_organization], 'status': 'pending', 'bid_id': bid['id'], 'lotID': bid['lotValues'][0]['relatedLot']}})
award = response.json['data']
self.award_id = aw
|
technoarch-softwares/linkedin-auth
|
setup.py
|
Python
|
bsd-2-clause
| 1,410
| 0.002128
|
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
install_requires = [
'requests==2.8.1'
]
setup(
name='linkedin-auth',
version='0.1',
packages=find_packages(),
include_package_data=True,
license='BSD License', # example license
description='A simple Django app for linkedin authentcation.',
long_description=README,
url='https://github.com/technoarch-softwares/linkedin-auth',
author='Pankul Mittal',
author_email='mittal.pankul@gmail.com',
install_requires = install_requires,
classifiers=[
'Environment :: Web Environment',
'Framework ::
|
Django',
'Framework :: Django :: 1.8', # replace "X.Y" as appropriate
'Intended Audience :: Develope
|
rs',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
zip_safe=False,
)
|
ebraunkeller/kerouac-bobblehead
|
ProcessAttendance.py
|
Python
|
mit
| 1,977
| 0.02782
|
# Process the attendance data by adding fields for day of week and school year and reoder the fields
# so that they match the enrollment file: date, lasid, status (ABS), day, school year
# also, clean the bad data. Many absence records are on days not in the calendar. Check the date
# of the absence against the calendar and delete bad records
# Input files: Attendance.csv - pulled from X2 (attendance records)
# NewCalendar.csv - pulled from X2, must be updated annually to get previous year and current year
# Output File: ProcessedAttend.csv
import csv, time
from datetime import datetime
# convert X2 date format to Python format mm/dd/yyyy
def date_func(date):
return date.s
|
plit("/")[0].zfill(2)+"/"+date.split("/")[1].zfill(2)+"/"+date.split("/")[2]
def schoolyear(date):
if int(date.split("/")[0]) <8 : return date.split("/")[2]
else: return str(int(date.split("/")[2])+1)
def calday(date):
cal_date = datetime.strptime(date_func(date),'%m/%d/%Y')
return cal_date.strftime("%a")
AttFile = "C:\Users\Elaine\Documents\BKL\Lowell\\2016-2017\Attendance.csv"
OutFile = "C:\Users\Elaine\Documents\BKL\Lowell\\2016-2017\TableauFormat\Processed
|
Attend.csv"
CalendFile ="C:\Users\Elaine\Documents\BKL\Lowell\\2016-2017\NewCalendar.csv"
csvfile=open(AttFile,'rb')
reader=csv.reader(csvfile)
Ccsvfile = open(CalendFile,'rb')
Creader = csv.reader(Ccsvfile)
with open(OutFile,'a+b') as csvout:
wr=csv.writer(csvout,delimiter=',')
wr.writerow(['Date','Lasid','Status','Day','SchoolYear','Term'])
# skip all the headers
next(reader)
for row in reader:
output = [row[0],row[1],'ABS',calday(row[0]), schoolyear(row[0])]
for crow in Creader:
if crow[0] == date_func(row[0]) :
output.append(crow[2])
if crow[1]=='TRUE': wr.writerow(output)
break
Ccsvfile.seek(0)
csvout.close()
csvfile.close()
|
sharad/calibre
|
src/calibre/ebooks/oeb/transforms/rasterize.py
|
Python
|
gpl-3.0
| 9,021
| 0.002771
|
'''
SVG rasterization transform.
'''
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
import os, re
from urlparse import urldefrag
from lxml import etree
from PyQt5.Qt import (
Qt, QByteArray, QBuffer, QIODevice, QColor, QImage, QPainter, QSvgRenderer)
from calibre.ebooks.oeb.base import XHTML, XLINK
from calibre.ebooks.oeb.base import SVG_MIME, PNG_MIME
from calibre.ebooks.oeb.base import xml2str, xpath
from calibre.ebooks.oeb.base import urlnormalize
from calibre.ebooks.oeb.stylizer import Stylizer
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.imghdr import what
IMAGE_TAGS = set([XHTML('img'), XHTML('object')])
KEEP_ATTRS = set(['class', 'style', 'width', 'height', 'align'])
class Unavailable(Exception):
pass
class SVGRasterizer(object):
def __init__(self):
from calibre.gui2 import must_use_qt
must_use_qt()
@classmethod
def config(cls, cfg):
return cfg
@classmethod
def generate(cls, opts):
return cls()
def __call__(self, oeb, context):
oeb.logger.info('Rasterizing SVG images...')
self.temp_files = []
self.stylizer_cache = {}
self.oeb = oeb
self.opts = context
self.profile = context.dest
self.images = {}
self.dataize_manifest()
self.rasterize_spine()
self.rasterize_cover()
for pt in self.temp_files:
try:
os.remove(pt)
except:
pass
def rasterize_svg(self, elem, width=0, height=0, format='PNG'):
view_box = elem.get('viewBox', elem.get('viewbox', None))
sizes = None
logger = self.oeb.logger
if view_box is not None:
try:
box = [float(x) for x in filter(None, re.split('[, ]', view_box))]
sizes = [box[2]-box[0], box[3] - box[1]]
except (TypeError, ValueError, IndexError):
logger.warn('SVG image has invalid viewBox="%s", ignoring the viewBox' % view_box)
else:
for image in elem.xpath('descendant::*[local-name()="image" and '
'@height and contains(@height, "%")]'):
logger.info('Found SVG image height in %, trying to convert...')
try:
h = float(image.get('height').replace('%', ''))/100.
image.set('height', str(h*sizes[1]))
except:
logger.exception('Failed to convert percentage height:',
image.get('height'))
data = QByteArray(xml2str(elem, with_tail=False))
svg = QSvgRenderer(data)
size = svg.defaultSize()
if size.width() == 100 and size.height() == 100 and sizes:
size.setWidth(sizes[0])
size.setHeight(sizes[1])
if width or height:
size.scale(width, height, Qt.KeepAspectRatio)
logger.info('Rasterizing %r to %dx%d'
% (elem, size.width(), size.height()))
image = QImage(size, QImage.Format_ARGB32_Premultiplied)
image.fill(QColor("white").rgb())
painter = QPainter(image)
svg.render(painter)
painter.end()
array = QByteArray()
buffer = QBuffer(array)
buffer.open(QIODevice.WriteOnly)
image.save(buffer, format)
return str(array)
def dataize_manifest(self):
for item in self.oeb.manifest.values():
if item.media_type == SVG_MIME and item.data is not None:
self.dataize_svg(item)
def dataize_svg(self, item, svg=None):
if svg is None:
svg = item.data
hrefs = self.oeb.manifest.hrefs
for elem in xpath(svg, '//svg:*[@xl:href]'):
href = urlnormalize(elem.attrib[XLINK('href')])
path = urldefrag(href)[0]
if not path:
continue
abshref = item.abshref(path)
if abshref not in hrefs:
continue
linkee = hrefs[abshref]
data = str(linkee)
ext = what(None, data) or 'jpg'
with PersistentTemporaryFile(suffix='.'+ext) as pt:
pt.write(data)
self.temp_files.append(pt.name)
elem.attrib[XLINK('href')] = pt.name
return svg
def stylizer(self, item):
ans = self.stylizer_cache.get(item, None)
if ans is None:
ans = Stylizer(item.data, item.href, self.oeb, self.opts,
self.profile)
self.stylizer_cache[item] = ans
return ans
def rasterize_spine(self):
for item in self.oeb.spine:
self.rasterize_item(item)
def rasterize_item(self, item):
html = item.data
hrefs = self.oeb.manifest.hrefs
for elem in xpath(html, '//h:img[@src]'):
src = urlnormalize(elem.attrib['src'])
image = hrefs.get(item.abshref(src), None)
if image and image.media_type == SVG_MIME:
style = self.stylizer(item).style(elem)
self.rasterize_external(elem, style, item, image)
for elem in xpath(html, '//h:object[@type="%s" and @data]' % SVG_MIME):
data = urlnormalize(elem.attrib['data'])
image = hrefs.get(item.abshref(data), None)
if image and image.media_type == SVG_MIME:
style = self.stylizer(item).style(elem)
self.rasterize_external(elem, style, item, image)
for elem in xpath(html, '//svg:svg'):
style = self.stylizer(item).style(elem)
self.rasterize_inline(elem, style, item)
def rasterize_inline(self, elem, style, item):
width = style['width']
height = style['height']
width = (width / 72) * self.profile.dpi
height = (height / 72) * self.profile.dpi
elem = self.dataize_svg(item, elem)
data = self.rasterize_svg(elem, width, height)
manifest = self.oeb.manifest
href = os.path.splitext(item.href)[0] + '.png'
id, href = manifest.generate(item.id, href)
manifest.add(id, href, PNG_MIME, data=data)
img = etree.Element(XHTML('img'), src=item.relhref(href))
elem.getparent().replace(elem, img)
for prop in ('width', 'height'):
if prop in elem.attrib:
img.attrib[prop] = elem.attrib[prop]
def rasterize_external(self, elem, style, item, svgitem):
width = style['width']
height = style['height']
width = (width / 72) * self.profile.dpi
height = (height / 72) * self.profile.dpi
data = QByteArray(str(svgitem))
svg = QSvgRenderer(data)
size = svg.defaultSize()
size.scale(width, height, Qt.KeepAspectRatio)
key = (svgitem.href, size.width(), size.height())
if key in se
|
lf.images:
href = self.images[key]
else:
logger = self.
|
oeb.logger
logger.info('Rasterizing %r to %dx%d'
% (svgitem.href, size.width(), size.height()))
image = QImage(size, QImage.Format_ARGB32_Premultiplied)
image.fill(QColor("white").rgb())
painter = QPainter(image)
svg.render(painter)
painter.end()
array = QByteArray()
buffer = QBuffer(array)
buffer.open(QIODevice.WriteOnly)
image.save(buffer, 'PNG')
data = str(array)
manifest = self.oeb.manifest
href = os.path.splitext(svgitem.href)[0] + '.png'
id, href = manifest.generate(svgitem.id, href)
manifest.add(id, href, PNG_MIME, data=data)
self.images[key] = href
elem.tag = XHTML('img')
for attr in elem.attrib:
if attr not in KEEP_ATTRS:
del elem.attrib[attr]
elem.attrib['src'] = item.relhref(href)
if elem.text:
elem.attrib['alt'] = elem.text
elem.text = None
for child in elem:
e
|
D4wN/brickv
|
src/build_data/windows/OpenGL/GL/EXT/separate_shader_objects.py
|
Python
|
gpl-2.0
| 2,874
| 0.022617
|
'''OpenGL extension EXT.separate_shader_objects
This module customises the behaviour of the
OpenGL.raw.GL.EXT.separate_shader_objects to provide a more
Python-friendly API
Overview (from the spec)
Prior to this extension, GLSL requires multiple shader domains
(vertex, fragment, geometry) to be linked into a single monolithic
program object to specify a GLSL shader for each domain.
While GLSL's monolithic approach has some advantages for
optimizing shaders as a unit that span multiple domains, all
existing GPU hardware supports the more flexible mix-and-match
approach.
HLSL9, Cg, the prior OpenGL assembly program extensions, and game
console programmers favor a more flexible "mix-and-match" approach to
specifying shaders independently for these different shader domains.
Many developers build their shader content around the mix-and-match
approach where they can use a single vertex shader with multiple
fragment shaders (or vice versa).
This keep-it-simple extension adapts the "mix-and-match" shader
domain model for GLSL so different GLSL program objects can be bound
to different shader domains.
This extension redefines the operation of glUseProgram(GLenum program)
to be equivalent to:
glUseShaderProgramEXT(GL_VERTEX_SHADER, program);
glUseShaderProgramEXT(GL_GEOMETRY_SHADER_EXT, program);
glUseShaderProgramEXT(GL_FRAGMENT_SHADER, program);
glActiveProgramEXT(program);
You can also call these commands separately to bind each respective
domain. The GL_VERTEX_SHADER, GL_GEOMETRY_SHADER_EXT, and
GL_FRAGMENT_SHADER tokens refer to the conventional vertex, geometry,
and fragment domains respectively. glActiveProgramEXT specifies
the program that glUniform* commands will update.
Separate linking creates the possibility that certain output varyings
of a shader may go unread by the subsequent shader inputting varyings.
In this case, the output varyings are simply ignored. It is also
possible input varyings from a shader may not be written as output
varyings of a preceding shader. In this case, the unwritten input
varying values are undefined. Implementations are encouraged to
zero these undefined input varying values.
This extension is a proof-of-concept that separate shader objects
can work for GLSL and a response to repeated requests for this
functionality. There are various loose ends, particularly when
dea
|
ling with user-defined varyings. The hope is a future extension
will improve this situation.
The official definition of this exte
|
nsion is available here:
http://www.opengl.org/registry/specs/EXT/separate_shader_objects.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.EXT.separate_shader_objects import *
### END AUTOGENERATED SECTION
|
uw-it-aca/scout-vagrant
|
provisioning/templates/sample.wsgi.py
|
Python
|
apache-2.0
| 1,364
| 0.002199
|
"""
WSGI config for server_proj project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
mig
|
ht make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import site
site.addsitedir('/path/to/
|
spacescout_builds/server_proj/lib/python2.6/site-packages')
site.addsitedir('/path/to/spacescout_builds/server_proj')
#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "server_proj.settings")
os.environ["DJANGO_SETTINGS_MODULE"] = "server_proj.settings"
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
MikeMitterer/dart-mdl-mustache
|
test/no_spec/whitespace.py
|
Python
|
bsd-2-clause
| 760
| 0.040789
|
import pystache
def render(source, values):
print pystache.render(source, values)
render(
"{{ # # foo }} {{ oi }} {{ / # foo }}",
{'# foo': [{'oi': 'OI!'}]}) # OI!
rende
|
r(
"{{ #foo }} {{ oi }} {{ /foo }}",
{'foo': [{'oi': 'OI!'}]}) # OI!
render(
"{{{ #foo }}} {{{ /foo }}}",
{'#foo': 1, '/foo': 2}) # 1 2
render(
"{{{ { }}}",
{'{': 1}) # 1
render(
"{{ > }}}",
|
{'>': 'oi'}) # "}" bug??
render(
"{{\nfoo}}",
{'foo': 'bar'}) # // bar
render(
"{{\tfoo}}",
{'foo': 'bar'}) # bar
render(
"{{\t# foo}}oi{{\n/foo}}",
{'foo': True}) # oi
render(
"{{{\tfoo\t}}}",
{'foo': True}) # oi
# Don't work in mustache.js
# render(
# "{{ { }}",
# {'{': 1}) # ERROR unclosed tag
# render(
# "{{ { foo } }}",
# {'foo': 1}) # ERROR unclosed tag
|
imajes/Sick-Beard
|
sickbeard/nzbget.py
|
Python
|
gpl-3.0
| 6,926
| 0.00361
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import httplib
import datetime
import urllib
import urlparse
import sickbeard
from base64 import standard_b64encode
import xmlrpclib
from sickbeard.exceptions import ex
from sickbeard.providers.generic import GenericProvider
from sickbeard import config
from sickbeard import logger
from common import Quality
def sendNZB(nzb):
if not sickbeard.NZBGET_HOST:
logger.log(u"No NZBGet host found in configuration. Please configure it.", logger.ERROR)
return False
nzb_filename = nzb.name + ".nzb"
try:
url = config.clean_url(sickbeard.NZBGET_HOST)
scheme, netloc, path, query, fragment = urlparse.urlsplit(url) # @UnusedVariable
if sickbeard.NZBGET_USERNAME or sickbeard.NZBGET_PASSWORD:
netloc = urllib.quote_plus(sickbeard.NZBGET_USERNAME.encode("utf-8", 'ignore')) + u":" + urllib.quote_plus(sickbeard.NZBGET_PASSWORD.encode("utf-8", 'ignore')) + u"@" + netloc
url = urlparse.urlunsplit((scheme, netloc, u"/xmlrpc", "", ""))
logger.log(u"Sending NZB to NZBGet")
logger.log(u"NZBGet URL: " + url, logger.DEBUG)
nzbGetRPC = xmlrpclib.ServerProxy(url.encode("utf-8
|
", 'ignore'))
if nzbGetRPC.writelog("INFO", "SickBeard connected to drop off " + nzb_filename + " any moment now."):
logger.log(u"Successful connected to NZBGet", logger.DEBUG)
else:
logger.log(u"Successful connected to NZBGet, but unable to send a message", logger.ERROR)
except httplib.socket.error:
|
logger.log(u"Please check if NZBGet is running. NZBGet is not responding.", logger.ERROR)
return False
except xmlrpclib.ProtocolError, e:
if (e.errmsg == "Unauthorized"):
logger.log(u"NZBGet username or password is incorrect.", logger.ERROR)
else:
logger.log(u"NZBGet protocol error: " + e.errmsg, logger.ERROR)
return False
except Exception, e:
logger.log(u"NZBGet sendNZB failed. URL: " + url + " Error: " + ex(e), logger.ERROR)
return False
# if it aired recently make it high priority and generate dupekey/dupescore
add_to_top = False
nzbgetprio = dupescore = 0
dupekey = ""
for curEp in nzb.episodes:
if dupekey == "":
dupekey = "SickBeard-" + str(curEp.show.tvdbid)
dupekey += "-" + str(curEp.season) + "." + str(curEp.episode)
if datetime.date.today() - curEp.airdate <= datetime.timedelta(days=7):
add_to_top = True
nzbgetprio = 100
# tweak dupescore based off quality, higher score wins
if nzb.quality != Quality.UNKNOWN:
dupescore = nzb.quality * 100
if nzb.quality == Quality.SNATCHED_PROPER:
dupescore += 10
nzbget_result = None
nzbcontent64 = None
# if we get a raw data result we encode contents and pass that
if nzb.resultType == "nzbdata":
data = nzb.extraInfo[0]
nzbcontent64 = standard_b64encode(data)
logger.log(u"Attempting to send NZB to NZBGet (" + sickbeard.NZBGET_CATEGORY + ")", logger.DEBUG)
try:
# find out nzbget version to branch logic, 0.8.x and older will return 0
nzbget_version_str = nzbGetRPC.version()
nzbget_version = config.to_int(nzbget_version_str[:nzbget_version_str.find(".")])
# v8 and older, no priority or dupe info
if nzbget_version == 0:
if nzbcontent64:
nzbget_result = nzbGetRPC.append(nzb_filename, sickbeard.NZBGET_CATEGORY, add_to_top, nzbcontent64)
else:
# appendurl not supported on older versions, so d/l nzb data from url ourselves
if nzb.resultType == "nzb":
genProvider = GenericProvider("")
data = genProvider.getURL(nzb.url)
if data:
nzbcontent64 = standard_b64encode(data)
nzbget_result = nzbGetRPC.append(nzb_filename, sickbeard.NZBGET_CATEGORY, add_to_top, nzbcontent64)
# v13+ has a new combined append method that accepts both (url and content)
elif nzbget_version >= 13:
if nzbcontent64:
nzbget_result = nzbGetRPC.append(nzb_filename, nzbcontent64, sickbeard.NZBGET_CATEGORY, nzbgetprio, False, False, dupekey, dupescore, "score")
else:
nzbget_result = nzbGetRPC.append(nzb_filename, nzb.url, sickbeard.NZBGET_CATEGORY, nzbgetprio, False, False, dupekey, dupescore, "score")
# the return value has changed from boolean to integer (Positive number representing NZBID of the queue item. 0 and negative numbers represent error codes.)
if nzbget_result > 0:
nzbget_result = True
else:
nzbget_result = False
# v12 pass dupekey + dupescore
elif nzbget_version == 12:
if nzbcontent64:
nzbget_result = nzbGetRPC.append(nzb_filename, sickbeard.NZBGET_CATEGORY, nzbgetprio, False, nzbcontent64, False, dupekey, dupescore, "score")
else:
nzbget_result = nzbGetRPC.appendurl(nzb_filename, sickbeard.NZBGET_CATEGORY, nzbgetprio, False, nzb.url, False, dupekey, dupescore, "score")
# v9+ pass priority, no dupe info
else:
if nzbcontent64:
nzbget_result = nzbGetRPC.append(nzb_filename, sickbeard.NZBGET_CATEGORY, nzbgetprio, False, nzbcontent64)
else:
nzbget_result = nzbGetRPC.appendurl(nzb_filename, sickbeard.NZBGET_CATEGORY, nzbgetprio, False, nzb.url)
if nzbget_result:
logger.log(u"NZB sent to NZBGet successfully", logger.DEBUG)
return True
else:
logger.log(u"NZBGet could not add " + nzb_filename + " to the queue", logger.ERROR)
return False
except:
logger.log(u"Connect Error to NZBGet: could not add " + nzb_filename + " to the queue", logger.ERROR)
return False
return False
|
thasso/pyjip
|
jip/scripts/__init__.py
|
Python
|
bsd-3-clause
| 1,603
| 0
|
#!/usr/bin/env python
"""This module contains a set of default tools that are
deployed with jip
"""
import jip
@jip.tool("cleanup")
class cleanup(object):
"""\
The cleanup tool removes ALL the defined output
files of its dependencies. If you have a set of intermediate jobs,
you can put this as a finalization step that goes and removes
a set of files.
Usage:
cleanup -f <files>...
Inputs:
-f, --files <files>... The files that will be deleted
"""
def is_done(self):
from os.path import exists
if self.options['files'].is_dependency():
return False
for f in self.options["files"].raw():
if exists(f):
return False
return True
def validate(self):
return True
def get_command(self):
return "bash", "for file in ${files}; do rm -f $file; done"
@jip.tool("bash")
class bash(object):
"""\
Run a bash command
Usage:
bash_runner.jip [-i <input>] [-o <output>] [-O <outfile>] -c <cmd>...
bash_runner.jip [--
|
help]
Options:
--help Show this help message
-c, --cmd <cmd>... The command to run
Inputs:
-i, --input <input> The input file to read
[default: stdin]
|
Outputs:
-O, --outfile <outfile> The output file
-o, --output <output> The output file to write
[default: stdout]
"""
def get_command(self):
return "bash", """(${cmd})${output|arg("> ")}"""
|
UpSea/midProjects
|
BasicOperations/00_Python/00_Python_05_Deco01.py
|
Python
|
mit
| 772
| 0.032051
|
'''
mid
此例展示带参数的装饰器如何装饰带参数的函数
此时,装饰器的参数,被装饰的函数,被装饰函数的参数都有确定的传递位置
'''
def d(argDec): #1) 装饰器的参数
def _d(funcDecored): #2) 被装饰函数
def __d(*arg, **karg): #3
|
) 被装饰函数的参数
|
print (argDec)
print("do sth before decored func..")
r= funcDecored(*arg, **karg)
print("do sth after decored func..")
return r
return __d
return _d
@d("first")
def func01():
print("call func01")
@d("second")
def func02(a, b=2):
print("call f2")
print (a+b)
func01()
print ("-"*20)
func02(1)
print ("-"*20)
func02(a=1,b=4)
|
Robbie1977/NRRDtools
|
test.py
|
Python
|
mit
| 436
| 0.025229
|
# Create the data.
from numpy import pi, s
|
in, cos, mgrid
dphi, dtheta = pi/250
|
.0, pi/250.0
[phi,theta] = mgrid[0:pi+dphi*1.5:dphi,0:2*pi+dtheta*1.5:dtheta]
m0 = 4; m1 = 3; m2 = 2; m3 = 3; m4 = 6; m5 = 2; m6 = 6; m7 = 4;
r = sin(m0*phi)**m1 + cos(m2*phi)**m3 + sin(m4*theta)**m5 + cos(m6*theta)**m7
x = r*sin(phi)*cos(theta)
y = r*cos(phi)
z = r*sin(phi)*sin(theta)
# View it.
from mayavi import mlab
s = mlab.mesh(x, y, z)
mlab.show()
|
katakumpo/niceredis
|
tests/test_scripting.py
|
Python
|
mit
| 2,723
| 0
|
from __future__ import with_statement
import pytest
from redis import exceptions
from redis._compat import b
multiply_script = """
local value = redis.call('GET', KEYS[1])
value = tonumber(value)
return value * ARGV[1]"""
class TestScripting(object):
@pytest.fixture(autouse=True)
def reset_scripts(self, r):
r.script_flush()
def test_eval(self, r):
r.set('a', 2)
# 2 * 3 == 6
assert r.eval(multiply_script, 1, 'a', 3) == 6
def test_evalsha(self, r):
r.set('a', 2)
sha = r.script_load(multiply_script)
# 2 * 3 == 6
assert r.evalsha(sha, 1, 'a', 3) == 6
def test_evalsha_script_not_loaded(self, r):
r.set('a', 2)
sha = r.script_load(multiply_script)
# remove the script from Redis's cache
r.script_flush()
with pytest.raises(exceptions.NoScriptError):
r.evalsha(sha, 1, 'a', 3)
def test_script_loading(self, r):
# get the sha, then clear the cache
sha = r.script_load(multiply_script)
r.scr
|
ipt_flush()
assert r.script_exists(sha) ==
|
[False]
r.script_load(multiply_script)
assert r.script_exists(sha) == [True]
def test_script_object(self, r):
r.set('a', 2)
multiply = r.register_script(multiply_script)
assert not multiply.sha
# test evalsha fail -> script load + retry
assert multiply(keys=['a'], args=[3]) == 6
assert multiply.sha
assert r.script_exists(multiply.sha) == [True]
# test first evalsha
assert multiply(keys=['a'], args=[3]) == 6
def test_script_object_in_pipeline(self, r):
multiply = r.register_script(multiply_script)
assert not multiply.sha
pipe = r.pipeline()
pipe.set('a', 2)
pipe.get('a')
multiply(keys=['a'], args=[3], client=pipe)
# even though the pipeline wasn't executed yet, we made sure the
# script was loaded and got a valid sha
assert multiply.sha
assert r.script_exists(multiply.sha) == [True]
# [SET worked, GET 'a', result of multiple script]
assert pipe.execute() == [True, b('2'), 6]
# purge the script from redis's cache and re-run the pipeline
# the multiply script object knows it's sha, so it shouldn't get
# reloaded until pipe.execute()
r.script_flush()
pipe = r.pipeline()
pipe.set('a', 2)
pipe.get('a')
assert multiply.sha
multiply(keys=['a'], args=[3], client=pipe)
assert r.script_exists(multiply.sha) == [False]
# [SET worked, GET 'a', result of multiple script]
assert pipe.execute() == [True, b('2'), 6]
|
edx-solutions/edx-platform
|
openedx/core/lib/xblock_builtin/xblock_discussion/xblock_discussion/__init__.py
|
Python
|
agpl-3.0
| 12,281
| 0.002931
|
# -*- coding: utf-8 -*-
"""
Discussion XBlock
"""
import logging
import six
from six.moves import urllib
from six.moves.urllib.parse import urlparse # pylint: disable=import-error
from django.contrib.staticfiles.storage import staticfiles_storage
from django.urls import reverse
from django.utils.translation import get_language_bidi, get_language
from xblock.completable import XBlockCompletionMode
from xblock.core import XBlock
from xblock.fields import Scope, String, UNIQUE_ID
from web_fragments.fragment import Fragment
from xblockutils.resources import ResourceLoader
from xblockutils.studio_editable import StudioEditableXBlockMixin
from openedx.core.djangolib.markup import HTML, Text
from openedx.core.lib.xblock_builtin import get_css_dependencies, get_js_dependencies
from xmodule.raw_module import RawDescriptor
from xmodule.xml_module import XmlParserMixin
log = logging.getLogger(__name__)
loader = ResourceLoader(__name__) # pylint: disable=invalid-name
def _(text):
"""
A noop underscore function that marks strings for extraction.
"""
return text
@XBlock.needs('user') # pylint: disable=abstract-method
@XBlock.needs('i18n')
class DiscussionXBlock(XBlock, StudioEditableXBlockMixin, XmlParserMixin):
"""
Provides a discussion forum that is inline with other content in the courseware.
"""
completion_mode = XBlockCompletionMode.EXCLUDED
discussion_id = String(scope=Scope.settings, default=UNIQUE_ID)
display_name = String(
display_name=_("Display Name"),
help=_("The display name for this component."),
default="Discussion",
scope=Scope.settings
)
discussion_category = String(
display_name=_("Category"),
default=_("Week 1"),
help=_(
"A category name for the discussion. "
"This name appears in the left pane of the discussion forum for the course."
),
scope=Scope.settings
)
discussion_target = String(
display_name=_("Subcategory"),
default="Topic-Level Student-Visible Label",
help=_(
"A subcategory name for the discussion. "
"This name appears in the left pane of the discussion forum for the course."
),
scope=Scope.settings
)
sort_key = String(scope=Scope.settings)
editable_fields = ["display_name", "discussion_category", "discussion_target"]
has_author_view = True # Tells Studio to use author_view
# support for legacy OLX format - consumed by XmlParserMixin.load_metadata
metadata_translations = dict(RawDescriptor.metadata_translations)
metadata_translations['id'] = 'discussion_id'
metadata_translations['for'] = 'discussion_target'
@property
def course_key(self):
"""
:return: int
|
course id
NB: The goal is to move this XBlock out of edx-platform, and so we use
scope_ids.usage_id instead
|
of runtime.course_id so that the code will
continue to work with workbench-based testing.
"""
return getattr(self.scope_ids.usage_id, 'course_key', None)
@property
def django_user(self):
"""
Returns django user associated with user currently interacting
with the XBlock.
"""
user_service = self.runtime.service(self, 'user')
if not user_service:
return None
return user_service._django_user # pylint: disable=protected-access
@staticmethod
def get_translation_content():
try:
return 'js/i18n/{lang}/djangojs.js'.format(
lang=get_language(),
)
except IOError:
return 'js/i18n/en/djangojs.js'
@staticmethod
def vendor_js_dependencies():
"""
Returns list of vendor JS files that this XBlock depends on.
The helper function that it uses to obtain the list of vendor JS files
works in conjunction with the Django pipeline to ensure that in development mode
the files are loaded individually, but in production just the single bundle is loaded.
"""
vendor_dependencies = get_js_dependencies('discussion_vendor')
base_vendor_dependencies = [
'edx-ui-toolkit/js/utils/global-loader.js',
'edx-ui-toolkit/js/utils/string-utils.js',
'edx-ui-toolkit/js/utils/html-utils.js',
'js/vendor/URI.min.js',
'js/vendor/jquery.leanModal.js'
]
return base_vendor_dependencies + vendor_dependencies
@staticmethod
def js_dependencies():
"""
Returns list of JS files that this XBlock depends on.
The helper function that it uses to obtain the list of JS files
works in conjunction with the Django pipeline to ensure that in development mode
the files are loaded individually, but in production just the single bundle is loaded.
"""
return get_js_dependencies('discussion')
@staticmethod
def css_dependencies():
"""
Returns list of CSS files that this XBlock depends on.
The helper function that it uses to obtain the list of CSS files
works in conjunction with the Django pipeline to ensure that in development mode
the files are loaded individually, but in production just the single bundle is loaded.
"""
if get_language_bidi():
return get_css_dependencies('style-inline-discussion-rtl')
else:
return get_css_dependencies('style-inline-discussion')
def add_resource_urls(self, fragment):
"""
Adds URLs for JS and CSS resources that this XBlock depends on to `fragment`.
"""
# Add js translations catalog
fragment.add_javascript_url(staticfiles_storage.url(self.get_translation_content()))
# Head dependencies
for vendor_js_file in self.vendor_js_dependencies():
fragment.add_resource_url(staticfiles_storage.url(vendor_js_file), "application/javascript", "head")
for css_file in self.css_dependencies():
fragment.add_css_url(staticfiles_storage.url(css_file))
# Body dependencies
for js_file in self.js_dependencies():
fragment.add_javascript_url(staticfiles_storage.url(js_file))
def has_permission(self, permission):
"""
Encapsulates lms specific functionality, as `has_permission` is not
importable outside of lms context, namely in tests.
:param user:
:param str permission: Permission
:rtype: bool
"""
# normal import causes the xmodule_assets command to fail due to circular import - hence importing locally
from lms.djangoapps.discussion.django_comment_client.permissions import has_permission
return has_permission(self.django_user, permission, self.course_key)
def student_view(self, context=None):
"""
Renders student view for LMS.
"""
fragment = Fragment()
self.add_resource_urls(fragment)
login_msg = ''
if not self.django_user.is_authenticated:
qs = urllib.parse.urlencode({
'course_id': self.course_key,
'enrollment_action': 'enroll',
'email_opt_in': False,
})
login_msg = Text(_(u"You are not signed in. To view the discussion content, {sign_in_link} or "
u"{register_link}, and enroll in this course.")).format(
sign_in_link=HTML(u'<a href="{url}">{sign_in_label}</a>').format(
sign_in_label=_('sign in'),
url='{}?{}'.format(reverse('signin_user'), qs),
),
register_link=HTML(u'<a href="/{url}">{register_label}</a>').format(
register_label=_('register'),
url='{}?{}'.format(reverse('register_user'), qs),
),
)
context = {
'discussion_id': self.discussion_id,
'display_name': self.display_name if self.display_name else _("Discussion"),
'user': self.django_user,
|
ColinDuquesnoy/QCrash
|
tests/test_dialogs/test_review.py
|
Python
|
mit
| 455
| 0
|
from qcrash._dialogs.review import DlgReview
def test_review(qtbot):
dlg = DlgReview('some content', 'log content', None, None)
assert dlg.ui.edit_main.toPlainText() == 'some content'
|
assert dlg.ui.edit_log.toPlainText() == 'log content'
qtbot.keyPress(dlg.ui.edit_main, 'A')
assert dlg.ui.edit_main.toPlainText() == 'Asome content'
qtbot.keyPress(dlg.ui.edit_log, 'A')
assert dlg.ui.edit_
|
log.toPlainText() == 'Alog content'
|
lichong012245/django-lfs-0.7.8
|
lfs/marketing/models.py
|
Python
|
bsd-3-clause
| 1,567
| 0
|
# django imports
from django.db import models
from django.utils.translation import ugettext_lazy as _
# lfs imports
from lfs.catalog.models import Product
from lfs.order.models import Order
class Topseller(models.Model):
"""Selected products are in any case among topsellers.
"""
product = models.ForeignKey(Product, verbose_name=_(u"Product"))
position = models.PositiveSmallIntegerField(_(u"Position"), default=1)
class Meta:
ordering = ["position"]
def __unicode__(self):
return "%s (%s)" % (self.product.name, self.position)
class ProductSales(models.Model):
"""Stores totals sales per product.
"""
product = models.ForeignKey(Product, verbose_name=_(u"Product"))
sales = models.IntegerField(_(u"sales"), default=0)
class FeaturedProduct(models.Model):
"""Featured products are manually selected by the
|
shop owner
"""
product = models.ForeignKey(Product, verbose_name=_(u"Product"))
position = models.PositiveSmallIntegerField(_(u"Position"), default=1)
active = models.BooleanField(_(u"Active"), default=True)
class Meta:
ordering = ["position"]
def __unicode__(self):
return "%s (%s)" % (self.product.name, self.position)
class OrderRatingMail(models.Model):
"""Saves whether and when a rating mail has been send for an order.
|
"""
order = models.ForeignKey(Order, verbose_name=_(u"Order"))
send_date = models.DateTimeField(auto_now=True)
def __unicode__(self):
return "%s (%s)" % (self.order.id, self.rating_mail_sent)
|
nachiketkarmarkar/XtremPerfProbe
|
generatePlots.py
|
Python
|
mit
| 6,181
| 0.015046
|
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plot
import matplotlib.pylab
from matplotlib.backends.backend_pdf import PdfPages
import re
def drawPlots(data,plotObj,name,yLabel,position):
drawing = plotObj.add_subplot(position,1,position)
drawing.set_ylabel(yLabel, fontsize=16)
drawing.set_xlabel("Sample", fontsize=18)
drawing.plot(data[name], label = name)
drawing.legend(loc = 'upper center', bbox_to_anchor=(0.9, 1.128))
# drawing.legend(loc = 'upper center')
def drawXtremIOCharts():
xenvData = np.genfromtxt('xenvPerfStats.csv', dtype=float, delimiter=',', names=True)
xmsData = np.genfromtxt('xmsPerfStats.csv', dtype=float, delimiter=',', names=True)
plot.ioff()
iops = plot.figure(figsize=(20,15))
iops.suptitle("IOPs", fontsize=20)
iopsInit = len(iops.axes)
bw = plot.figure(figsize=(20,15))
bw.suptitle("Bandwidth MB/s", fontsize=20)
bwInit = len(bw.axes)
latency = plot.figure(figsize=(20,15))
latency.suptitle("Latency, MicroSec.", fontsize=20)
latencyInit = len(latency.axes)
xCpu = plot.figure(figsize=(20,15))
xCpu.suptitle("X-ENV Utilization", fontsize=20)
xCpuInit = len(xCpu.axes)
for name in xmsData.dtype.names:
if re.search('iops', name):
drawPlots(xmsData,iops,name,"IOPs",iopsInit+1)
if re.search('bandwidth', name):
drawPlots(xmsData,bw,name,"Bandwidth, MB/s", bwInit+1)
if re.search('latency', name):
drawPlots(xmsData,latency,name,"Latency, MicroSec", latencyInit+1)
for name in xenvData.dtype.names:
drawPlots(xenvData,xCpu,name,"% CPU Utilization", xCpuInit+1)
pdfDoc = PdfPages('XtremPerfcharts.pdf')
pdfDoc.savefig(iops)
pdfDoc.savefig(bw)
pdfDoc.savefig(latency)
pdfDoc.savefig(xCpu)
pdfDoc.close()
plot.close(iops)
plot.close(bw)
plot.close(latency)
plot.close(xCpu)
# plot.show()
def drawVolPerfCharts(vol):
volData = np.genfromtxt('%s.csv' % (vol), dtype=float, delimiter=',', names=True)
plot.ioff()
iops = plot.figure(figsize=(20,15))
iops.suptitle("IOPs", fontsize=20)
iopsInit = len(iops.axes)
bw = plot.figure(figsize=(20,15))
bw.suptitle("Bandwidth MB/s", fontsize=20)
bwInit = len(bw.axes)
latency = plot.figure(figsize=(20,15))
latency.suptitle("Latency, MicroSec.", fontsize=20)
latencyInit = len(latency.axes)
for name in volData.dtype.names:
if re.search('iops', name):
drawPlots(volData,iops,name,"IOPs",iopsInit+1)
if re.search('bandwidth', name):
drawPlots(volData,bw,name,"Bandwidth, MB/s", bwInit+1)
if re.search('latency', name):
drawPlots(volData,latency,name,"Latency, MicroSec", latencyInit+1)
pdfDoc = PdfPages('%s.pdf' %(vol))
pdfDoc.savefig(iops)
pdfDoc.savefig(bw)
pdfDoc.savefig(latency)
pdfDoc.close()
plot.close(iops)
plot.close(bw)
plot.close(latency)
def drawEsxCharts(hostname,storageHba):
pdfDoc = PdfPages('host_%s.pdf'%(hostname))
data = np.genfromtxt('%s.csv' %(hostname), dtype=float, delimiter=',', names=True)
# print data.dtype.names
cpu = plot.figure(figsize=(20,15))
cpu.suptitle("% CPU-Utilization", fontsize=20)
cpuInit = len(cpu.axes)
memory = plot.figure(figsize=(20,15))
memory.suptitle("% Memory Usage", fontsize=20)
memoryInit = len(memory.axes)
for name in data.dtype.names:
if re.match('CPU_Utilization', name):
plotName = '% CPU Util'
drawPlots(data,cpu,name,"% CPU Util",cpuInit+1)
if re.match('Memory_Usage', name):
plotName = '% Usage'
drawPlots(data,memory,name,"% Memory Usage", memoryInit+1)
for hba in storageHba:
hba_iops = plot.figure(figsize=(20,15))
hba_iops.suptitle("%s IOPs"%(hba), fontsize=20)
hbaIopsInit = len(hba_iops.axes)
hba_bw = plot.figure(figsize=(20,15))
hba_bw.suptitle("%s Bandwidth"%(hba), fontsize=20)
hbaBwInit = len(hba_bw.axes)
hba_latency = plot.figure(figsize=(20,15))
hba_latency.suptitle("%s Latency"%(hba), fontsize=20)
hbaLatencyInit = len(hba_latency.axes)
for name in data.dtype.names:
if re.search('Storage_adapter%s'%(hba), name) and re.search('requests_per_second', name):
plotName = '%s IOPs' %(hba)
drawPlots(data,hba_iops,name,"IOPs",hbaIopsInit+1)
if re.search('Storage_adapter%s'%(hba), name) and re.search(r
|
'_rate_average', name):
plotName = 'Bandwidth Utilization'
drawPlots(data,hba_bw,name,"Bandwidth Utilization", hbaBwInit+1)
if re.search('Storage_adapter%s'%(hba), name) and re.search(r'_latency_average', name):
plotName = 'Latency'
drawPlots(data,hba_latency,name,"Latency (msec)", hbaLatencyInit+1)
pdfDoc.savefig(hba_latency)
pdfDoc.savefig(hba_iops)
pdfDoc.
|
savefig(hba_bw)
pdfDoc.savefig(cpu)
pdfDoc.savefig(memory)
pdfDoc.close()
plot.close(hba_iops)
plot.close(hba_bw)
plot.close(hba_latency)
plot.close(cpu)
plot.close(memory)
# plot.show()
def main():
drawXtremIOCharts()
# data = np.genfromtxt('xtremPerfStats.csv', dtype=float, delimiter=',', names=True)
# print data.dtype.names
# iops = plot.figure()
# iopsInit = len(iops.axes)
# bw = plot.figure()
# bwInit = len(bw.axes)
# latency = plot.figure()
# latencyInit = len(latency.axes)
# xCpu = plot.figure()
# xCpuInit = len(xCpu.axes)
# for name in data.dtype.names:
# if re.search('iops', name):
# drawPlots(data,iops,name,"IOPs",iopsInit+1)
# if re.search('bandwidth', name):
# drawPlots(data,bw,name,"Bandwidth, MB/s", bwInit+1)
# if re.search('latency', name):
# drawPlots(data,latency,name,"Latency, MicroSec", latencyInit+1)
# if re.search('SC', name):
# drawPlots(data,xCpu,name,"% CPU Utilization", xCpuInit+1)
# plot.show()
if __name__ == '__main__':
main()
|
scanny/python-pptx
|
pptx/enum/action.py
|
Python
|
mit
| 1,548
| 0.000646
|
# encoding: utf-8
"""
Enumerations that describe click action settings
"""
from __future__ import absolute_import
from .base import alias, Enumeration, EnumMember
@alias("PP_ACTION")
class PP_ACTION_TYPE(Enumeration):
"""
Specifies the type of a mouse action (click or hover action).
Alias: ``PP_ACTION``
Example::
from pptx.enum.action import PP_ACTION
assert shape.click_action.action == PP_ACTION.HYPERLINK
"""
__ms_name__ = "PpActionType"
__url__ = "https://msdn.microsoft.com/EN-US/libra
|
ry/office/ff744895.aspx"
__members__ = (
EnumMember("END_SHOW", 6, "Slide show ends."),
EnumMember("FIRST_SLIDE", 3, "Returns to the first slide."),
EnumMember("HYPERLINK", 7, "Hyperlink."),
EnumMember("LAST_SLIDE", 4, "Moves to the last slide."),
EnumMember("LAST_SLIDE_VIEWED", 5, "Moves to the last slide viewed."),
EnumMember("NAMED_SLIDE", 101, "Mov
|
es to slide specified by slide number."),
EnumMember("NAMED_SLIDE_SHOW", 10, "Runs the slideshow."),
EnumMember("NEXT_SLIDE", 1, "Moves to the next slide."),
EnumMember("NONE", 0, "No action is performed."),
EnumMember("OPEN_FILE", 102, "Opens the specified file."),
EnumMember("OLE_VERB", 11, "OLE Verb."),
EnumMember("PLAY", 12, "Begins the slideshow."),
EnumMember("PREVIOUS_SLIDE", 2, "Moves to the previous slide."),
EnumMember("RUN_MACRO", 8, "Runs a macro."),
EnumMember("RUN_PROGRAM", 9, "Runs a program."),
)
|
Orav/kbengine
|
kbe/src/lib/python/Lib/test/test_opcodes.py
|
Python
|
lgpl-3.0
| 2,787
| 0.011123
|
# Python test set -- part 2, opcodes
from test.support import run_unittest
import unittest
class OpcodeTest(unittest.TestCase):
def test_try_inside_for_loop(self):
n = 0
for i in range(10):
|
n = n+i
try: 1/0
except NameError: pass
|
except ZeroDivisionError: pass
except TypeError: pass
try: pass
except: pass
try: pass
finally: pass
n = n+i
if n != 90:
self.fail('try inside for')
def test_raise_class_exceptions(self):
class AClass(Exception): pass
class BClass(AClass): pass
class CClass(Exception): pass
class DClass(AClass):
def __init__(self, ignore):
pass
try: raise AClass()
except: pass
try: raise AClass()
except AClass: pass
try: raise BClass()
except AClass: pass
try: raise BClass()
except CClass: self.fail()
except: pass
a = AClass()
b = BClass()
try:
raise b
except AClass as v:
self.assertEqual(v, b)
else:
self.fail("no exception")
# not enough arguments
##try: raise BClass, a
##except TypeError: pass
##else: self.fail("no exception")
try: raise DClass(a)
except DClass as v:
self.assertIsInstance(v, DClass)
else:
self.fail("no exception")
def test_compare_function_objects(self):
f = eval('lambda: None')
g = eval('lambda: None')
self.assertNotEqual(f, g)
f = eval('lambda a: a')
g = eval('lambda a: a')
self.assertNotEqual(f, g)
f = eval('lambda a=1: a')
g = eval('lambda a=1: a')
self.assertNotEqual(f, g)
f = eval('lambda: 0')
g = eval('lambda: 1')
self.assertNotEqual(f, g)
f = eval('lambda: None')
g = eval('lambda a: None')
self.assertNotEqual(f, g)
f = eval('lambda a: None')
g = eval('lambda b: None')
self.assertNotEqual(f, g)
f = eval('lambda a: None')
g = eval('lambda a=None: None')
self.assertNotEqual(f, g)
f = eval('lambda a=0: None')
g = eval('lambda a=1: None')
self.assertNotEqual(f, g)
def test_modulo_of_string_subclasses(self):
class MyString(str):
def __mod__(self, value):
return 42
self.assertEqual(MyString() % 3, 42)
def test_main():
run_unittest(OpcodeTest)
if __name__ == '__main__':
test_main()
|
Tesora/tesora-project-config
|
tools/check_irc_access.py
|
Python
|
apache-2.0
| 5,790
| 0.000173
|
#! /usr/bin/env python
# Copyright 2011, 2013-2014 OpenStack Foundation
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import irc.client
import logging
import random
import string
import ssl
import sys
import time
import yaml
logging.basicConfig(level=logging.INFO)
class CheckAccess(irc.client.SimpleIRCClient):
log = logging.getLogger("checkaccess")
def __init__(self, channels, nick, flags):
irc.client.SimpleIRCClient.__init__(self)
self.identify_msg_cap = False
self.channels = channels
self.nick = nick
self.flags = flags
self.current_channel = None
self.current_list = []
self.failed = None
def on_disconnect(self, connection, event):
if self.failed is not False:
sys.exit(1)
else:
sys.exit(0)
def on_welcome(self, c, e):
self.identify_msg_cap = False
self.log.debug("Requesting identify-msg capability")
c.cap('REQ', 'identify-msg')
c.cap('END')
def on_cap(self, c, e):
self.log.debug("Received cap response %s" % repr(e.arguments))
if e.arguments[0] == 'ACK' and 'identify-msg' in e.arguments[1]:
self.log.debug("identify-msg cap acked")
self.identify_msg_cap = True
self.advance()
def on_privnotice(self, c, e):
if not self.identify_msg_cap:
self.log.debug("Ignoring message because identify-msg "
"cap not enabled")
return
nick = e.source.split('!')[0]
auth = e.arguments[0][0]
msg = e.arguments[0][1:]
if auth != '+' or nick != 'ChanServ':
self.log.debug("Ignoring message from unauthenticated "
"user %s" % nick)
return
self.advance(msg)
def advance(self, msg=None):
if not self.current_channel:
if not self.channels:
self.connection.quit()
return
self.current_channel = self.channels.pop()
self.current_list = []
self.connection.privmsg('chanserv', 'access list %s' %
self.current_channel)
time.sleep(1)
return
if msg.endswith('is not registered.'):
self.failed = True
print("%s is not registered with ChanServ." %
self.current_channel)
self.current_channel = None
self.advance()
return
if msg.startswith('End of'):
found = False
for nick, flags, msg in self.current_list:
if nick == self.nick and flags == self.flags:
self.log.info('%s access ok on %s' %
(self.nick, self.current_channel))
found = True
break
if not found:
|
self.failed = True
print("%s does not have permissions on %s:" %
(self.nick, self.current_channel))
for nick, flags, msg in self.current_list:
print(msg)
print
# If this is the first channel checked, set the failure
# flag to false because we know that the system is
# operating well enough to check at least one channel.
if self.failed is None:
self.faile
|
d = False
self.current_channel = None
self.advance()
return
parts = msg.split()
self.current_list.append((parts[1], parts[2], msg))
def main():
parser = argparse.ArgumentParser(description='IRC channel access check')
parser.add_argument('-l', dest='config',
default='/etc/accessbot/channels.yaml',
help='path to the config file')
parser.add_argument('-s', dest='server',
default='chat.freenode.net',
help='IRC server')
parser.add_argument('-p', dest='port',
default=6697,
help='IRC port')
parser.add_argument('nick',
help='the nick for which access should be validated')
args = parser.parse_args()
config = yaml.load(open(args.config))
channels = []
for channel in config['channels']:
channels.append('#' + channel['name'])
access_level = None
for level, names in config['global'].items():
if args.nick in names:
access_level = level
if access_level is None:
raise Exception("Unable to determine global access level for %s" %
args.nick)
flags = config['access'][access_level]
a = CheckAccess(channels, args.nick, flags)
mynick = ''.join(random.choice(string.ascii_uppercase)
for x in range(16))
port = int(args.port)
if port == 6697:
factory = irc.connection.Factory(wrapper=ssl.wrap_socket)
a.connect(args.server, int(args.port), mynick,
connect_factory=factory)
else:
a.connect(args.server, int(args.port), mynick)
a.start()
if __name__ == "__main__":
main()
|
qadium-memex/linkalytics
|
linkalytics/factor/constructor/merge.py
|
Python
|
apache-2.0
| 603
| 0.014925
|
from . elasticfactor import ElasticFactor
from ... environment import cfg
from elasticsearch import Elasticsearch
def run(node):
id_a, id_b = node.get('id_a', '63166071_1'), node.get('id_b', '63166071_2')
es = Elasticsearch()
data_a = es.get(index="factor_state2016", doc_type='factor
|
_network', id=id_a)
data_b = es.get(index="factor_state2016", doc_type='factor_network', id=id_b)
const
|
ructor = ElasticFactor(cfg["cdr_elastic_search"]["hosts"] + cfg["cdr_elastic_search"]["index"])
merged = constructor.merge(data_a["_source"], data_b["_source"])
return merged
|
antivirtel/Flexget
|
flexget/_version.py
|
Python
|
mit
| 453
| 0.004415
|
"""
Current FlexGet version.
T
|
his is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '
|
1.2.500.dev'
|
obmarg/pypod
|
feed.py
|
Python
|
bsd-2-clause
| 6,209
| 0.015139
|
#!/usr/bin/python
import feedparser
import os
import pickle
import logging
from episode import Episode
log = logging.getLogger()
class Feed:
""" Class representing a single podcast feed """
def __init__(
self,
name,
url,
destPath,
episodes,
limit=0,
postCommand=None,
destFilenameFormat=None
):
""" Constructor
Params:
name - The name of the podcast
url - The url of the feed
destPath - The path to download mp3 files to
episodes - A list of already downloaded episodes
limit - The max files to add to download
list in one shot
postCommand - A command to be run on finishing
destFilenameFormat - The format for destination filenames
"""
self.name = name
self.url = url
self.destPath = destPath
self.episodes = episodes
self.downloadList = []
self.limit = limit
self.postCommand = postCommand
if destFilenameFormat:
self.destFilenameFormat = destFilenameFormat.rstrip()
else:
self.destFilenameFormat = "%podcastname%/%filename%"
def IsNew( self ):
""" Checks if this feed is new """
return len( self.episodes ) == 0
def RunUpdate( self ):
""" Runs an update of this feed """
self.FetchFeed()
self.DownloadFiles()
def Has
|
Episode( self, name ):
""" Checks if an episode has already been download
Params:
name - The name of the episode to look for
Returns True or False
"""
return any(
True for e in self.episodes
if e.name == name
)
def FetchFeed( self ):
""" Fetches from the rss feed """
result = feedparser.parse( self.url )
for entry in result.en
|
tries:
if not self.HasEpisode( entry.title ):
epUrl = self.GetDownloadUrl( entry )
if not epUrl:
continue
self.AddToDownloadList(
epUrl,
entry.title,
self.MakeEpisodeFilename(
entry,
epUrl
) )
log.debug(
"Feed fetched. %i total, %i new",
len( result.entries ),
len( self.downloadList )
)
def AddToDownloadList( self, link, title, destFilename ):
""" Adds a link and reference to the download list
Params:
link - The link to add
title - The title of this episode
destFilename - The destination filename
"""
self.downloadList.append(
Episode(
title,
title,
link,
destFilename
) )
def GetDownloadUrl( self, entry ):
""" Gets the mp3 download url from an rss entry
Params:
entry - the rss entry
Returns:
The url (or None)
"""
if entry.link and entry.link[:4] == u'.mp3':
return entry.link
elif entry.links:
for linkData in entry.links:
if (
linkData['type'] == u'audio/mpeg' or
linkData['href'][:-4] == u'.mp3'
):
return linkData.href
log.info(
"No download link found for %s",
entry.title
)
return
def DownloadFiles( self ):
""" Downloads each of the files in downloadList """
if not os.path.exists( self.destPath ):
os.makedirs( self.destPath )
limit = len( self.downloadList )
if self.limit != 0:
limit = self.limit
for episode in self.downloadList[:limit]:
try:
episode.Download()
self.episodes.append( episode )
CallPostCommand( episode )
except:
pass
""" TODO: print traceback """
else:
log.debug( "No New Episodes" )
def CallPostCommand( self, episode ):
""" Call the post download command
Params:
episode - The episode just downloaded
"""
pass
def MarkAllAsDownloaded( self ):
""" Marks everything in the downloaded list as downloaded """
log.info( "Marking all as Downloaded" )
for episode in self.downloadList:
self.episodes.append( episode )
def MakeEpisodeFilename( self, entry, url=None ):
""" Makes a filename for an episode.
Params:
entry - The rss feed entry for this episode
url - The url for this episode.
Will be calculated if not set
Returns the destination filename, including full path
"""
if url == None:
url = self.MakeDownloadUrl( entry )
urlBasename = os.path.basename( url )
urlBasenameExt = urlBasename.rfind( '.' )
if urlBasenameExt != -1:
urlFilename = urlBasename[:urlBasenameExt]
urlExt = urlBasename[urlBasenameExt:]
else:
urlFilename = urlBasename
urlExt = ""
destFilenameSubs = [
( '%filename%', urlFilename ),
( '%title%', entry.title ),
( '%podcastname%', self.name ),
]
rv = self.destFilenameFormat
for search, replace in destFilenameSubs:
rv = rv.replace( search, replace )
rv = os.path.join( self.destPath, rv + urlExt )
rv = os.path.normpath( rv )
if not rv.startswith( self.destPath ):
raise Exception(
"MakeEpisodeFilename generated file outwith destination path"
)
return rv
|
birkin/dashboard
|
config/urls.py
|
Python
|
mit
| 1,021
| 0.026445
|
from dashboard_app import views
from django.conf.urls import include, url
from django.contrib i
|
mport admin
from django.views.generic import RedirectView
admin.autodiscover(
|
)
urlpatterns = [
## primary app urls...
url( r'^info/$', views.info, name='info_url' ),
url( r'^widgets/$', views.widgets_redirect, name='widgets_redirect_url' ),
url( r'^widgets/(?P<identifier>[^/]+)/$', views.widgets, name='widgets_url' ),
url( r'^widget_detail/(?P<identifier>[^/]+)/$', views.widget_detail, name='widget_detail_url' ),
url( r'^request_widget/$', views.request_widget, name='request_widget_url' ),
url( r'^tag/(?P<tag>[^/]+)/$', views.tag, name='tag_url' ),
url( r'^admin/', include(admin.site.urls) ),
## support urls...
url( r'^bul_search/$', views.bul_search, name='bul_search_url' ),
url( r'^login/$', views.login, name='login_url' ),
url( r'^logout/$', views.shib_logout, name='logout_url' ),
url( r'^$', RedirectView.as_view(pattern_name='info_url') ),
]
|
JunctionAt/JunctionWWW
|
blueprints/player_profiles/views/admin_reset.py
|
Python
|
agpl-3.0
| 1,619
| 0.002471
|
from flask_wtf import Form
from wtforms import HiddenField, StringField
from wtforms.validators import InputRequired, EqualTo
from flask_login import current_user, abort, login_required
from flask import r
|
equest, flash, redirect, render_template
import random
import bcrypt
from models.user_model import User
from .. import blueprint
class ResetForm(Form):
who = HiddenField()
|
confirm_who = StringField('Confirm Username', validators=[InputRequired(),
EqualTo('who')])
@blueprint.route("/reset/<what>", methods=["POST"])
@login_required
def reset(what):
if not current_user.has_permission('reset.{}'.format(what)):
abort(403)
form = ResetForm(request.form)
user = User.objects(name=form.who.data).first()
if user is None:
abort(401)
if form.validate():
if what == 'password':
password = ''.join(random.choice('0123456789abcdefghijklmnopqrstuvxyzABCDEFGHIJKLMNOPQRSTUVWXYZ') for i in range(16))
user.hash = bcrypt.hashpw(password, bcrypt.gensalt())
user.save()
return render_template('profile_reset_password_successful.html', user=user, password=password)
elif what == 'tfa':
user.tfa = False
user.tfa_secret = ''
user.save()
return render_template('profile_reset_tfa_successful.html', user=user)
else:
abort(401)
flash('Error in reset form. Make sure you are typing the confirmation token correctly.', category='alert')
return redirect(user.get_profile_url()), 303
|
pinakinathc/python_code
|
movies/entertainment_center.py
|
Python
|
gpl-3.0
| 1,497
| 0.028724
|
import media
import fresh_tomatoes
toy_story=media.Movie(
"Toy Story",
"A story of a boy and his toys that come to life",
"http://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg",
"https://www.youtube.com/watch?v=vwyZH85NQC4")
#print (toy_story.storyline)
avatar=media.Movie(
"Avatar",
"A marine on an alien planet",
"http://upload.wikimedia.org/wikipedia/id/b/bO/Avatar-Teaser-Poster.jpg",
"http://www.youtube.com/watch?v=-9ceBgWV8io")
#print(avatar.storyline)
#avatar.show_trailor()
school_of_rock=media.Movie(
"Schools of Rock",
"Using rock music to learn",
"http://upload.wikipedia.org/wikipedia/en/1/11/School_of_Rock_poster.jpg",
"https://www.youtube.com/watch?v=3PsUJFEBC74")
ratatouille=media.Movie(
"Ratatouille",
"A rat is a chef in Paris",
"http://upload.wikimedia.org/wikipedia/en/5/50/RatatouillePoster.jpg",
"https://www.youtube.com/watch?v=c3sBBRxDAqk")
midnight_in_paris=media.Mov
|
ie(
"Midnight in PAris",
"Going back in time to meet authors",
"http://upload.wikime
|
dia.org/wikipedia/em/9/9f/Midnight_in_paris_Poster.jpg",
"https://www.youtube.com/watch?v=atLg2wQQxuU")
hunger_games=media.Movie(
"Hunger Games",
"A really real reality show",
"http://upload.wikimedia.org/wikipedia/en/4/42/Hunger-GamesPoster.jpg",
"https://www.youtube.com/watch?v=PbA63a7HObo")
movies=[toy_story,avatar,school_of_rock,ratatouille,midnight_in_paris,hunger_games]
fresh_tomatoes.open_movies_page(movies)
print (media.Movie.VALID_RATINGS)
print(media.Movie.__doc__)
|
Ritsyy/fjord
|
fjord/base/tests/test_views.py
|
Python
|
bsd-3-clause
| 5,907
| 0
|
import json
from django.test.utils import override_settings
import pytest
from pyquery import PyQuery
from fjord.base import views
from fjord.base.tests import (
LocalizingClient,
TestCase,
AnalyzerProfileFactory,
reverse
)
from fjord.base.views import IntentionalException
from fjord.search.tests import ElasticTestCase
class TestAbout(TestCase):
client_class = LocalizingClient
def test_about_view(self):
resp = self.client.get(reverse('about-view'))
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'about.html')
class TestLoginFailure(TestCase):
def test_login_failure_view(self):
resp = self.client.get(reverse('login-failure'))
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'login_failure.html')
resp = self.client.get(reverse('login-failure'), {'mobile': 1})
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'mobile/login_failure.html')
# Note: This needs to be an ElasticTestCase because the view does ES
# stuff.
class TestMonitorView(ElasticTestCase):
def test_monitor_view(self):
"""Tests for the monitor view."""
# TODO: When we add a mocking framework, we can mock this
# properly.
test_memcached = views.test_memcached
try:
with self.settings(
SHOW_STAGE_NOTICE=True,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', # noqa
'LOCATION': ['localhost:11211', 'localhost2:11211']
}
}):
# Mock the test_memcached function so it always returns
# True.
views.test_memcached = lambda host, port: True
# TODO: Replace when we get a mock library.
def mock_rabbitmq():
class MockRabbitMQ(object):
def connect(self):
return True
return lambda *a, **kw: MockRabbitMQ()
views.establish_connection = mock_rabbitmq()
# Request /services/monitor and make sure it returns
# HTTP 200 and that there aren't errors on the page.
resp = self.client.get(reverse('services-monitor'))
errors = [line for line in resp.content.splitlines()
if 'ERROR' in line]
assert resp.status_code == 200, '%s != %s (%s)' % (
resp.status_code, 200, repr(errors))
finally:
views.test_memcached = test_memcached
class TestFileNotFound(TestCase):
client_class = LocalizingClient
def test_404(self):
request = self.client.get('/a/path/that/should/never/exist')
assert request.status_code == 404
self.assertTemplateUsed(request, '404.html')
class TestServerError(TestCase):
@override_settings(SHOW_STAGE_NOTICE=True)
def test_500(self):
with pytest.raises(IntentionalException):
self.client.get('/services/throw-error')
class TestRobots(TestCase):
def test_robots(self):
resp = self.client.get('/robots.txt')
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'robots.txt')
class TestContribute(TestCase):
def test_contribute(self):
resp = self.client.get('/contribute.json')
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'contribute.json')
def test_contribute_if_valid_json(self):
resp = self.client.get('/contribute.json')
# json.loads throws a ValueError when contribute.json is invalid JSON.
json.loads(resp.content)
class TestNewUserView(ElasticTestCase):
def setUp(self):
super(TestNewUserView, self).setUp()
jane = AnalyzerProfileFactory().user
self.jane = jane
def test_redirect_to_dashboard_if_anonymous(self):
# AnonymousUser shouldn't get to the new-user-view, so make
# sure they get redirected to the dashboard.
resp = self.client.get(reverse('new-user-view'), follow=True)
assert resp.status_code == 200
self.assertTemplateNotUsed('new_user.html')
self.assertTemplateUsed('analytics/dashboard.html')
def test_default_next_url(self):
self.client_login_user(self.jane)
resp = self.client.get(reverse('new-user-view'))
assert resp.status_code == 200
self.assertTemplateUsed('new_user.html')
# Pull out next link
pq = PyQuery(resp.content)
next_url = pq('#next-url-link')
assert next_url.attr['href'] == '/en-US/' # this is the dashboard
def test_valid_next_url(self):
self.client_login_user(self.jane)
url = reverse('new-user-view')
resp = self.client.get(url, {
'next': '/ou812' # stretches the meaning of 'valid'
})
assert resp.status_code == 200
self.assertTemplateUsed('new_user.html')
# Pull out next link which is naughty, so it should have been
# replaced with a dashboard link.
pq = PyQuery(resp.content)
next_url = pq('#next-url-link')
assert next_url.a
|
ttr['href'] == '/ou812'
def test_sanitized_next_url(self):
self.client_login_user(self.jane)
url = reverse('new-user-view')
resp = self.client.get(url, {
'next': 'javascript:prompt%28document.cookie%29'
})
assert resp.status_code == 200
self.assertTemplateUsed('new_user.html')
# Pull out next link which is naughty, so it should have been
# replaced with a dashboard link.
pq = PyQuery(resp.content)
|
next_url = pq('#next-url-link')
assert next_url.attr['href'] == '/en-US/' # this is the dashboard
|
rayhu-osu/vcube
|
crowdshipping/urls.py
|
Python
|
mit
| 178
| 0.039326
|
from dj
|
ango.conf.urls import url, include
from . import views
urlpatterns = [
url(r'^$', views.index, name = 'index'),
url(r'^contact/$', views.contact, na
|
me = 'contact'),
]
|
faribas/RMG-Py
|
unittest/qm/qmverifierTest.py
|
Python
|
mit
| 1,393
| 0.015793
|
"""
Created on May 17, 2012
@author: nmvdewie
"""
import unittest
import rmgpy.qm.qmtp as qm
import os
import rmgpy.qm.qmverifier as verif
import rmgpy.molecule as mol
class Test(unittest.TestCase):
def testVerifierDoesNotExist(self):
molecule = mol.Molecule()
name = 'UMRZSTCPUPJPOJ-UHFFFAOYSA'
directory = os.path.join(os.path.dirname(__file__),'data','QMfiles')
InChIaug = 'InChI=1S/C7H12/c1-2-7-4-3-6(1)5-7/h6-7H,1-5H2'
molfile = qm.molFile(molecule, name, directory, InChIaug)
verifier = verif.QMVerifier(molfile)
verifier.verify()
self.assertFalse(verifier.succesfulJobExists())
def testVerifierMOPACResultExists(self):
molecule = mol.Molecule()
name = 'GRWFGVWFFZKLTI-UHFFFAOYAF'
directory = os.path.join(os.path.dirname(__file__),'data','QMfiles','MOPAC')
I
|
nChIaug = 'InChI=1/C10H16/c1-7-4-5-8-6-9(7)10(8,2)3/h4,8-9H,5-6H2,1-3H3'
molfile = qm.molFile(molecule, name, directory, InChIaug)
verifier = verif.QMVerifier(molfile)
verifier.verify()
self.assertTrue(verifier.succesfulJobExists())
self.assertTrue(verifier.mopacResultExists)
|
self.assertFalse(verifier.gaussianResultExists)
if __name__ == "__main__":
unittest.main( testRunner = unittest.TextTestRunner(verbosity=2) )
|
JamesMura/sentry
|
src/sentry/runner/commands/dsym.py
|
Python
|
bsd-3-clause
| 6,651
| 0
|
"""
sentry.runner.commands.dsym
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import uuid
import json
import click
import six
import warnings
import threading
from sentry.runner.decorators import configuration
SHUTDOWN = object()
def load_bundle(q, uuid, data, sdk_info, trim_symbols, demangle):
from sentry.models import DSymBundle, DSymObject, DSymSDK
from sentry.constants import MAX_SYM
from symsynd.demangle import demangle_symbol
def _process_symbol(sym):
too_long = trim_symbols and len(sym) > MAX_SYM
if demangle or too_long:
new_sym = demangle_symbol(sym)
if new_sym is not None and (len(new_sym) < sym or too_long):
sym = new_sym
if trim_symbols:
sym = sym[:MAX_SYM]
return sym
sdk = DSymSDK.objects.get_or_create(
dsym_type=sdk_info['dsym_type'],
sdk_name=sdk_info['sdk_name'],
version_major=sdk_info['version_major'],
version_minor=sdk_info['version_minor'],
version_patchlevel=sdk_info['version_patchlevel'],
version_build=sdk_info['version_build'],
)[0]
obj = DSymObject.objects.get_or_create(
cpu_name=data['arch'],
object_path='/' + data['image'].strip('/'),
uuid=six.text_type(uuid),
vmaddr=data['vmaddr'],
vmsize=data['vmsize'],
)[0]
DSymBundle.objects.get_or_create(
sdk=sdk,
object=obj
)[0]
step = 4000
symbols = data['symbols']
for idx in range(0, len(symbols) + step, step):
end_idx = min(idx + step, len(symbols))
batch = []
for x in range(idx, end_idx):
addr = symbols[x][0]
batch.append((obj.id, addr, _process_symbol(symbols[x][1])))
if batch:
yield batch
def process_archive(members, zip, sdk_info, threads=8, trim_symbols=False,
demangle=True):
from sentry.models import DSymSymbol
import Queue
q = Queue.Queue(threads)
def process_items():
while 1:
items = q.get()
if items is SHUTDOWN:
break
DSymSymbol.objects.bulk_insert(items)
pool = []
for x in range(threads):
t = threading.Thread(target=process_items)
t.setDaemon(True)
t.start()
pool.append(t)
for member in members:
try:
id = uuid.UUID(member)
except ValueError:
continue
for chunk in load_bundle(q.put, id, json.load(zip.open(member)),
sdk_info, trim_symbols, demangle):
q.put(chunk)
for t in pool:
q.put(SHUTDOWN)
for t in pool:
t.join()
@click.group(name='dsym')
def dsym():
"""Manage system symbols in Sentry.
This allows you to import and manage globally shared system symbols in
the Sentry installation. In particular this is useful for iOS where
system symbols need to be ingested before stacktraces can be fully
symbolized due to device optimizations.
"""
@dsym.command(name='import-system-symbols',
short_help='Import system debug symbols.')
@click.argument('bundles', type=click.Path(), nargs=-1)
@click.option('--threads', default=8, help='The number of threads to use')
@click.option('--trim-symbols', is_flag=True,
help='If enabled symbols are trimmed before storing. '
'This reduces the database size but means that symbols are '
'already trimmed on the way to the database.')
@click.option('--no-demangle', is_flag=True,
help='If this is set to true symbols are never demangled. '
'By default symbols are demangled if they are trimmed or '
'demangled symbols are shorter than mangled ones. Enabling '
'this option speeds up importing slightly.')
@configuration
def import_system_symbols(bundles, threads, trim_symbols, no_demangle):
"""Imports system symbols from preprocessed zip files into Sentry.
It takes a list of zip files as arguments that contain preprocessed
system symbol
|
information. These zip files contain JSON dumps. The
actual zipped up dsym files cannot be used here, they need to be
preprocessed.
"""
import zipfile
from sentry.utils.db import is_mysql
if threads != 1 and is_mysql():
warnings.warn(Warning('disabled threading for mysql'))
threads = 1
for path in bundles:
with zipfile.ZipFile(path) as f:
sdk_info = json.load(f.open('sdk_info'))
label = ('%s.%s.%s (%s)' % (
|
sdk_info['version_major'],
sdk_info['version_minor'],
sdk_info['version_patchlevel'],
sdk_info['version_build'],
)).ljust(18)
with click.progressbar(f.namelist(), label=label) as bar:
process_archive(bar, f, sdk_info, threads,
trim_symbols=trim_symbols,
demangle=not no_demangle)
@dsym.command(name='sdks', short_help='List SDKs')
@click.option('--sdk', help='Only include the given SDK instead of all.')
@click.option('--version', help='Optionally a version filter. For instance '
'9 returns all versions 9.*, 9.1 returns 9.1.* etc.')
@configuration
def sdks(sdk, version):
"""Print a list of all installed SDKs and a breakdown of the symbols
contained within. This queries the system symbol database and reports
all SDKs and versions that symbols exist for. The output is broken down
by minor versions, builds and cpu architectures. For each of those a
count of the stored bundles is returned. (A bundle in this case is a
single binary)
"""
from sentry.models import DSymSDK
last_prefix = None
click.secho(' %-8s %-10s %-12s %-8s %s' % (
'SDK',
'Version',
'Build',
'CPU',
'Bundles',
), fg='cyan')
click.secho('-' * click.get_terminal_size()[0], fg='yellow')
for sdk in DSymSDK.objects.enumerate_sdks(sdk=sdk, version=version):
prefix = ' %-8s %-10s ' % (
sdk['sdk_name'],
sdk['version']
)
if prefix == last_prefix:
prefix = ' ' * len(prefix)
else:
last_prefix = prefix
click.echo('%s%-12s %-8s %d' % (
prefix,
sdk['build'],
sdk['cpu_name'],
sdk['bundle_count'],
))
|
OpenDeployment/openstack-cloud-management
|
tools/checkOs_InstallStatus.py
|
Python
|
apache-2.0
| 6,462
| 0.023367
|
#!/usr/bin/python
## ################################################################################
## the package and lib that must install:
##
## OpenIPMI
## yum install OpenIPMI-python
##
## Pexpect:Version 3.3 or higher
## caution: a lower version will cause some error like "timeout nonblocking() in read" when you log to a host by ssh
## wget https://pypi.python.org/packages/source/p/pexpect/pexpect-3.3.tar.gz
## tar xvf pexpect-3.3.tar.gz
## cd pexpect-3.3
## python setup install
##
##
## Be aware: **
## 2014-08-24 : using multiprocessing.dummy to archieve multi-thread instead of multi-processing with multiprocessing
## in multi-process, the function pssh will cause error like "local variable 's' referenced before assignment"
##
## Cautions: *****
## 2014-08-30 : make sure that you delete the file ' /root/.ssh/konw_hosts ' after you reinstall the OS in host ,or you will
## meet the error as you can not load into the OS by pxssh,it will always show as error.
##
## Please don't execute this script in the host with some important sevices ,otherwise ,it may effect these services by deleting their ssl keys
## ################################################################################
import os
import sys
import pexpect
import pxssh
from multiprocessing.dummy import Pool
import subprocess
import OpenIPMI
import time
def pssh((hostname,username,password,cli)):
print 'host:%s,cli:%s' % (hostname,cli)
output=''
try:
s = pxssh.pxssh()
s.login(hostname,username,password)
s.sendline(cli)
s.expect(pexpect.EOF, timeout=None)
output=s.before
print output
except Exception,e:
print '\nException Occur in ssh to host %s ,Error is:\n %s' % (hostname, str(e))
finally:
s.close()
return [hostname,output]
def pxe((hostname,commandList)):
print "pxe %s" % hostname
result = 0
for command in commandList :
print 'pxe command:%s' % command
res=subprocess.call(command.split(" "))
if res == 1:
result = 1
print 'pxe error in host %s' % hostname
break
return [hostname, result]
def rebootAndInstall(hosts,timeinterval=15):
"""
a function to reboot the hosts ,using single-thread.
"""
# TimeInterval=15
RebootHostInPerInterval=1
with open('restartError.log','w') as file:
file.truncate()
while True:
for i in range(1,RebootHostInPerInterval+1) :
if hosts :
commandList = []
commandList.append("ipmitool -l lanplus -H %s -U admin -P admin chassis bootdev pxe" % (hosts[0]))
commandList.append("ipmitool -I lanplus -H %s -U admin -P admin power reset" % (hosts[0]))
result = pxe((hosts[0],commandList))
if result[1] == 1:
with open('restartError.log','a') as file:
file.write(result[0]+'\n')
#print 'host :%s ,restart state: %s' % (result[0],result[1])
del hosts[0]
if hosts:
time.sleep(timeinterval)
else:
break
def checkOsIsFresh(hosts,username,password,timeinterval=86400,multiProcessCount = 10):
"""
a function to check the hosts' os are new install one,using the multi-thread.
the default timeinterval that judge the
|
fresh os is default as 1 day.
return :
[errorList,oldOsHost]
"""
oldOsHost = []
errorList = []
cli = "stat /lost+found/ | grep Modify | awk -F ' ' {'print $2,$3,$4'};"
cli += "exit $?" ## auto logout
## delete the existed ssl public key for the new install host
with open('/root/.ssh/known_host','w') as file:
lines = file.truncate()
pool = Pool(processes=multiProcessCount)
res=pool.map_async(pssh,((host,username,password,
|
cli) for host in hosts))
result=res.get()
# import time
import datetime
import string
for output in result:
if output[1] and output[1] != '' :
timeArr=output[1].split('\n')[1].split(' ')
realTimeStruct = time.strptime(timeArr[0]+' '+timeArr[1].split('.')[0],'%Y-%m-%d %H:%M:%S')
realTime = datetime.datetime(*realTimeStruct[:6])
osInstallTime_UTC = None
utcDelta=string.atoi(timeArr[2][1:])
if '+' in timeArr[2]:
osInstallTime_UTC = realTime + datetime.timedelta(hours=-1*(utcDelta/100))
elif '-' in timeArr[2]:
osInstallTime_UTC = realTime + datetime.timedelta(hours=1*(utcDelta/100))
hostOsTimeList.append((output[0],osInstallTime_UTC))
else:
errorList.append(output[0])
print 'Host %s connection failed' % output[0]
curTime = datetime.datetime.utcnow()
print 'current Utc Time :%s' % curTime
for host in hostOsTimeList :
# print (curTime - host[1]).seconds
if (curTime - host[1]).seconds > NewOSFilterInterval :
print 'host %s \'OS is not a fresh one' % host[0]
oldOsHost.append(host[0])
if oldOsHost :
print 'These Hosts\' Os are not reinstall: \n'
print oldOsHost
pool.close()
pool.join()
return [errorList,oldOsHost]
if __name__ == '__main__':
hostList = []
errorList = []
hostOsTimeList = []
net='10.1.0.'
pxenet='10.0.0.'
username='root'
password='root'
#unit:second, be sure that the time in your host and server are normal.be regardless of time zone,the code will auto hanlde the timezone issue.
NewOSFilterInterval = 60 * 60 ##
for i in range(100,105+1):
hostList.append(net+str(i))
for i in range(129,144+1):
hostList.append(net+str(i))
result=checkOsIsFresh(hostList,username,password,NewOSFilterInterval)
print 'error'
print result[0]
print 'old'
print result[1]
# add host to the `reboot` list to reboot them ,in a single-thread function with a resonable time interval which you need to set according.
# the time interval avoid a shot to the power provider when lots of compute hosts need to restart.
waitRebootHost = result[1] #oldOsHost # errorList
reboot =[]
for host in waitRebootHost:
reboot.append(pxenet+host[7:])
rebootAndInstall(reboot)
|
DigitalArtsNetworkMelbourne/huemovie
|
lib/terminalsize.py
|
Python
|
mit
| 2,958
| 0.003719
|
#!/usr/bin/env python
# Source: https://gist.github.com/jtriley/1108174
import os
import shlex
import struct
import platform
import subprocess
class TerminalSize:
def get_terminal_size(self):
""" getTerminalSize()
- get width and height of consol
|
e
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-wind
|
ow-width-in-python
"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = self._get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = self._get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = self._get_terminal_size_linux()
if tuple_xy is None:
print "default"
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows(self):
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput(self):
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def _get_terminal_size_linux(self):
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr
except:
pass
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
|
7Robot/BeagleBone-Black
|
PROJETS/2013/FiveAxesArm/asserv/Motor.py
|
Python
|
gpl-2.0
| 3,370
| 0.026237
|
# -*-coding:Utf-8 -*
import Adafruit_BBIO.GPIO as GPIO
import Adafruit_BBIO.ADC as ADC
import Adafruit_BBIO.PWM as PWM
import time
from math import *
class Motor :
"""Classe définissant un moteur, caractérisé par :
- le rapport de cycle de son PWM
- la pin de son PWM
- son sens de rotation
- la pin de son sens de rotation
- la valeur de son potentiomètre
- la pin de son potentiomètre
- son angle actuel
- sa consigne angulaire
- son état (commandé par bouton ou asservi)"""
def __init__(self, nom, pinEnable, pinPwm, pinSens, pinPota, angleMin=-pi, angleMax=pi, potaMin=0.0, potaMax=1.0, consigneAngle=0.0, etat=0) :
"""Initialisation de l'instance de Motor"""
self.nom = nom
self.pinEnable = pinEnable
self.pwm = 0
self.pinPwm = pinPwm
self.sens = 0
self.pinSens = pinSens
self.pota = 0.0
self.pinPota = pinPota
self.angle = 0.0
self.consigneAngle = consigneAngle
self.etat = etat
self.sommeErreur = 0.0
# min et max des valeur du pota et correspondance en angle
self.angleMin = angleMin
self.angleMax = angleMax
self.potaMin = potaMin
self.potaMax =
|
potaMax
def getEcart(self) :
|
"""Renvoie l'écart entre la consigne angulaire et l'angle actuel"""
return self.consigneAngle - self.angle
def getCommande(self) :
"""Renvoie une commande pour asservir le moteur en angle.
La commande est comprise entre -100.0 et +100.0"""
# A FAIRE : pour l'instant juste proportionnel
# coeficient proportionnel
ecartPourMax = pi/2
coefProportionnel = 1000/ecartPourMax
# coef integral
coefIntegral = 1
self.sommeErreur += self.getEcart()
if self.sommeErreur > 100 :
self.sommeErreur = 100
elif self.sommeErreur < -100 :
self.sommeErreur = -100
# calcul de la commande
commande = (self.getEcart())*coefProportionnel + self.sommeErreur*coefIntegral
# Traitement du dépassement des valeurs autorisée(-100..100)
if commande < -100 :
commande = -100
elif commande > 100 :
commande = 100
else :
commande = commande
return commande
def commander(self, commande) :
"""Commander ce moteur avec une commande.
Attention, si la pin de sens est activée, l'architecture du pont en H fait que le cycle du PWM est inversé. Il faut donc en tenir compte et inverser le rapport cyclique du PWM"""
if commande >= 0 :
GPIO.output(self.pinSens, GPIO.LOW)
PWM.set_duty_cycle(self.pinPwm, commande)
self.pwm = commande
self.sens = 0
else :
GPIO.output(self.pinSens, GPIO.HIGH)
PWM.set_duty_cycle(self.pinPwm, commande + 100)
self.pwm = -commande
self.sens = 1
def majPota(self) :
"""Récupère la valeur du pota"""
#print ADC.read(self.pinPota)
self.pota = ADC.read(self.pinPota)
# attendre 2 ms au minimum pour que l'ADC se fasse correctement
time.sleep(0.002)
def majAngle(self) :
"""Transforme la valeur du pota en un angle en fonction des caractéristiques du pota.
self.pota doit etre à jour !"""
self.angle = self.angleMin + (self.pota-self.potaMin)*(self.angleMax-self.angleMin)/(self.potaMax-self.potaMin)
|
seecr/dc-erfgeo-enrich
|
digitalecollectie/erfgeo/pittoannotation.py
|
Python
|
gpl-2.0
| 7,137
| 0.004063
|
# -*- coding: utf-8 -*-
## begin license ##
#
# "Digitale Collectie ErfGeo Enrichment" is a service that attempts to automatically create
# geographical enrichments for records in "Digitale Collectie" (http://digitalecollectie.nl)
# by querying the ErfGeo search API (https://erfgeo.nl/search).
# "Digitale Collectie ErfGeo Enrichment" is developed for Stichting DEN (http://www.den.nl)
# and the Netherlands Institute for Sound and Vision (http://instituut.beeldengeluid.nl/)
# by Seecr (http://seecr.nl).
# The project is based on the open source project Meresco (http://meresco.org).
#
# Copyright (C) 2015-2017 Netherlands Institute for Sound and Vision http://instituut.beeldengeluid.nl/
# Copyright (C) 2015-2017 Seecr (Seek You Too B.V.) http://seecr.nl
# Copyright (C) 2015-2016 Stichting DEN http://www.den.nl
#
# This file is part of "Digitale Collectie ErfGeo Enrichment"
#
# "Digitale Collectie ErfGeo Enrichment" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Digitale Collectie ErfGeo Enrichment" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Digitale Collectie ErfGeo Enrichment"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from xml.sax.saxutils import escape as xmlEscape
from urllib import urlencode
from lxml.etree import XML
from weightless.core import asString
from meresco.core import Observable
from digitalecollectie.erfgeo.utils import getitem, uriWithBase
from digitalecollectie.erfgeo.namespaces import namespaces, curieToUri
from digitalecollectie.erfgeo.uris import uris
from digitalecollectie.erfgeo.annotationprofiles import ERFGEO_ENRICHMENT_PROFILE
class PitToAnnotation(Observable):
def __init__(self, searchApiBaseUrl='https://api.erfgeo.nl/search', **kwargs):
Observable.__init__(self, **kwargs)
self._searchApiBaseUrl = searchApiBaseUrl
def toAnnotation(self, pit, targetUri=None, query=None, geoCoordinates=None):
uri = None
if targetUri:
uri = ERFGEO_ENRICHMENT_PROFILE.uriFor(targetUri)
return XML(asString(self._renderRdfXml(pit, uri=uri, targetUri=targetUri
|
, query=query, geoCoordinates=geoCoordinates)))
def _renderRdfXml(self, pit, uri, targetUri, query, geoCoordinates=None):
source = None
if query:
source = "%s?%s" % (self._searchApiBaseUrl, urlencode({'q': query}))
yield '''<rdf:RDF %(xmlns_rdf)s>\n''' % namespaces
annotationRdfAbout = ''
if uri:
annotationRdfAbout = ' rdf:about="%s"' % uri
yield ' <o
|
a:Annotation %(xmlns_oa)s %(xmlns_rdfs)s %(xmlns_dcterms)s %(xmlns_owl)s %(xmlns_hg)s %(xmlns_geos)s %(xmlns_geo)s' % namespaces
yield '%s\n>' % annotationRdfAbout
yield ' <oa:annotatedBy rdf:resource="%s"/>\n' % uris.idDigitaleCollectie
yield ' <oa:motivatedBy rdf:resource="%s"/>\n' % ERFGEO_ENRICHMENT_PROFILE.motive
if targetUri:
yield ' <oa:hasTarget rdf:resource="%s"/>\n' % targetUri
if source:
yield ' <dcterms:source rdf:resource="%s"/>\n' % xmlEscape(source)
if pit is None:
yield ' <dcterms:description>No PlaceInTime could be found for target record</dcterms:description>\n'
elif not geoCoordinates is None:
yield ' <dcterms:description>Geographical coordinates were already provided in original record</dcterms:description>\n'
else:
yield ' <dcterms:description>No ErfGeo search API query could be constructed from target record</dcterms:description>\n'
if not pit is None:
yield ' <oa:hasBody>\n'
yield ' <rdf:Description>\n'
yield ' <dcterms:spatial>\n'
yield self._renderPit(pit)
yield ' </dcterms:spatial>\n'
yield ' </rdf:Description>\n'
yield ' </oa:hasBody>\n'
elif not geoCoordinates is None:
geoLat, geoLong = geoCoordinates
yield ' <oa:hasBody>\n'
yield ' <rdf:Description>\n'
yield ' <geo:lat>%s</geo:lat>\n' % geoLat
yield ' <geo:long>%s</geo:long>\n' % geoLong
yield ' </rdf:Description>\n'
yield ' </oa:hasBody>\n'
yield ' </oa:Annotation>\n'
yield '</rdf:RDF>\n'
def _renderPit(self, pit):
yield '<hg:PlaceInTime rdf:about="%s">\n' % xmlEscape(pit['@id'])
type = pit.get('type')
if type:
yield ' <rdf:type rdf:resource="%s"/>\n' % xmlEscape(curieToUri(type))
name = pit.get('name')
if name:
yield ' <rdfs:label>%s</rdfs:label>\n' % xmlEscape(name)
yield self._renderPartOf(pit)
owlSameAs = pit.get('uri')
if owlSameAs:
yield ' <owl:sameAs rdf:resource="%s"/>\n' % xmlEscape(owlSameAs)
sameHgConceptRelations = getitem(pit.get('relations'), 'hg:sameHgConcept', [])
for sameHgConcept in sameHgConceptRelations:
yield self._renderSameHgConcept(uriWithBase(sameHgConcept['@id'], pit['@base']))
hasBeginning = pit.get('hasBeginning')
if hasBeginning:
yield '<hg:hasBeginning>%s</hg:hasBeginning>\n' % hasBeginning
hasEnd = pit.get('hasEnd')
if hasEnd:
yield '<hg:hasEnd>%s</hg:hasEnd>\n' % hasEnd
geometry = pit['geometry']
if geometry:
yield self._renderGeometry(geometry)
yield '</hg:PlaceInTime>\n'
def _renderPartOf(self, pit):
woonplaatsnaam = getitem(pit.get('data'), 'woonplaatsnaam')
if woonplaatsnaam:
yield '''\
<dcterms:isPartOf>
<hg:Place>
<rdfs:label>%s</rdfs:label>
</hg:Place>
</dcterms:isPartOf>\n''' % xmlEscape(woonplaatsnaam)
gemeentenaam = getitem(pit.get('data'), 'gme_naam')
if gemeentenaam:
yield '''\
<dcterms:isPartOf>
<hg:Municipality>
<rdfs:label>%s</rdfs:label>
</hg:Municipality>
</dcterms:isPartOf>\n''' % xmlEscape(gemeentenaam)
def _renderSameHgConcept(self, concept):
yield ' <hg:sameHgConcept rdf:resource="%s"/>\n' % concept
def _renderGeometry(self, geometry):
yield ' <geos:hasGeometry>\n'
yield ' <rdf:Description>\n'
yield ' <geos:asWKT>%s</geos:asWKT>\n' % geometry.asWkt()
yield ' </rdf:Description>\n'
yield ' </geos:hasGeometry>\n'
|
HPENetworking/topology_docker
|
lib/topology_docker/shell.py
|
Python
|
apache-2.0
| 2,063
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Docker shell helper class module.
"""
from __futu
|
re__ import unicode_literals, absolute_import
from __future__ import print_function, division
from topology.platforms.shell import PExpectShell, PExpectBashShell
class DockerExecMixin(object):
"""
Docker `
|
`exec`` connection mixin for the Topology shell API.
This class implements a ``_get_connect_command()`` method that allows to
interact with a shell through a ``docker exec`` interactive command, and
extends the constructor to request for container related parameters.
:param str container: Container unique identifier.
:param str command: Command to be executed with the ``docker exec`` that
will launch an interactive session.
"""
def __init__(self, container, command, *args, **kwargs):
self._container = container
self._command = command
super(DockerExecMixin, self).__init__(*args, **kwargs)
def _get_connect_command(self):
return 'docker exec -i -t {} {}'.format(
self._container, self._command
)
class DockerShell(DockerExecMixin, PExpectShell):
"""
Generic ``docker exec`` shell for unspecified interactive session.
"""
class DockerBashShell(DockerExecMixin, PExpectBashShell):
"""
Specialized ``docker exec`` shell that will run and setup a bash
interactive session.
"""
__all__ = ['DockerShell', 'DockerBashShell']
|
lcoandrade/DsgTools
|
core/DSGToolsProcessingAlgs/Algs/OtherAlgs/fileInventoryAlgorithm.py
|
Python
|
gpl-2.0
| 8,002
| 0.0015
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
DsgTools
A QGIS plugin
Brazilian Army Cartographic Production Tools
-------------------
begin : 2019-01-04
git sha : $Format:%H$
copyright : (C) 2018 by Philipe Borba - Cartographic Engineer @ Brazilian Army
(C) 2015 by Luiz Andrade - Cartographic Engineer @ Brazilian Army
email : borba.philipe@eb.mil.br
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from DsgTools.core.GeometricTools.layerHandler import LayerHandler
from DsgTools.core.Factories.ThreadFactory.threadFactory import ThreadFactory
from ...algRunner import AlgRunner
import processing, os, requests
from PyQt5.QtCore import QCoreApplication
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsProcessingAlgorithm,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink,
QgsFeature,
QgsDataSourceUri,
QgsProcessingOutputVectorLayer,
QgsProcessingParameterVectorLayer,
QgsWkbTypes,
QgsProcessingParameterBoolean,
QgsProcessingParameterEnum,
QgsProcessingParameterNumber,
QgsProcessingParameterMultipleLayers,
QgsProcessingUtils,
QgsSpatialIndex,
QgsGeometry,
QgsProcessingParameterField,
QgsProcessingMultiStepFeedback,
QgsProcessingParameterFolderDestination,
QgsProcessingParameterExpression,
QgsProcessingException,
QgsProcessingParameterString,
QgsProcessingParameterDefinition,
QgsProcessingParameterType,
QgsProcessingParameterMatrix,
QgsProcessingParameterFile,
QgsCoordinateReferenceSystem,
QgsFields)
class FileInventoryAlgorithm(QgsProcessingAlgorithm):
INPUT_FOLDER = 'INPUT_FOLDER'
ONLY_GEO = 'ONLY_GEO'
SEARCH_TYPE = 'SEARCH_TYPE'
FILE_FORMATS = 'FILE_FORMATS'
TYPE_LIST = 'TYP
|
E_LIST'
COPY_FILES = 'COPY_FILES'
COPY_FOLDER= 'COPY_FOLDER'
OUTPUT = 'OUTPUT'
|
def initAlgorithm(self, config):
"""
Parameter setting.
"""
self.addParameter(
QgsProcessingParameterFile(
self.INPUT_FOLDER,
self.tr('Input folder'),
behavior=QgsProcessingParameterFile.Folder
)
)
self.addParameter(
QgsProcessingParameterBoolean(
self.ONLY_GEO,
self.tr('Search only georreferenced files'),
defaultValue=True
)
)
self.searchTypes = [
'Search only listed formats',
'Exclude listed formats'
]
self.addParameter(
QgsProcessingParameterEnum(
self.SEARCH_TYPE,
self.tr('Search type'),
options=self.searchTypes,
defaultValue=0
)
)
self.addParameter(
QgsProcessingParameterMatrix(
self.FILE_FORMATS,
self.tr('Formats'),
headers=[self.tr('File Formats')],
numberRows=1,
defaultValue=['shp','tif']
)
)
self.addParameter(
QgsProcessingParameterBoolean(
self.COPY_FILES,
self.tr('Copy files to output'),
defaultValue=False
)
)
self.addParameter(
QgsProcessingParameterFolderDestination(
self.COPY_FOLDER,
self.tr('Copy files to folder'),
optional=True,
defaultValue=None
)
)
self.addParameter(
QgsProcessingParameterFeatureSink(
self.OUTPUT,
self.tr('Inventory layer')
)
)
def processAlgorithm(self, parameters, context, feedback):
"""
Here is where the processing itself takes place.
"""
inventory = ThreadFactory().makeProcess('inventory')
inputFolder = self.parameterAsString(parameters, self.INPUT_FOLDER, context)
if inputFolder is None:
raise QgsProcessingException(self.tr('Invalid input folder.'))
file_formats = self.parameterAsMatrix(parameters, self.FILE_FORMATS, context)
copyFolder = self.parameterAsString(parameters, self.COPY_FOLDER, context)
onlyGeo = self.parameterAsBool(parameters, self.ONLY_GEO, context)
copyFiles = self.parameterAsBool(parameters, self.COPY_FILES, context)
sinkFields = QgsFields()
for field in inventory.layer_attributes:
sinkFields.append(field)
(output_sink, output_dest_id) = self.parameterAsSink(
parameters,
self.OUTPUT,
context,
sinkFields,
QgsWkbTypes.Polygon,
QgsCoordinateReferenceSystem(4326)
)
featList = inventory.make_inventory_from_processing(
inputFolder,
file_formats,
make_copy=copyFiles,
onlyGeo=onlyGeo,
destination_folder=copyFolder
)
output_sink.addFeatures(featList, QgsFeatureSink.FastInsert)
return {'OUTPUT':output_dest_id}
def name(self):
"""
Returns the algorithm name, used for identifying the algorithm. This
string should be fixed for the algorithm, and must not be localised.
The name should be unique within each provider. Names should contain
lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'runfileinventory'
def displayName(self):
"""
Returns the translated algorithm name, which should be used for any
user-visible display of the algorithm name.
"""
return self.tr('Run File Inventory')
def group(self):
"""
Returns the name of the group this algorithm belongs to. This string
should be localised.
"""
return self.tr('Other Algorithms')
def groupId(self):
"""
Returns the unique ID of the group this algorithm belongs to. This
string should be fixed for the algorithm, and must not be localised.
The group id should be unique within each provider. Group id should
contain lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'DSGTools: Other Algorithms'
def tr(self, string):
return QCoreApplication.translate('FileInventoryAlgorithm', string)
def createInstance(self):
return FileInventoryAlgorithm()
|
rg3/captlog
|
setup.py
|
Python
|
cc0-1.0
| 1,110
| 0.000901
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of captlog.
#
# captlog - The Captain's Log (secret diary and notes application)
#
# Written in 2013 by Ricardo Garcia <r@rg3.name>
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
|
#
# You should have received a copy of the CC0 Public Domain Dedication along with
# this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
from distutils.core import setup
VERSION = '0.1.1'
setup(
name='captlog',
version=VERSION,
description="The Captain's Log (secret diary and notes application)",
author='Ricardo Garcia',
author_email='r@rg3.name',
url='http://www.github.com/rg3/captlog',
packages=['CaptainsL
|
og'],
package_dir={'CaptainsLog': 'src/lib'},
package_data={'CaptainsLog': ['pixmap/*']},
scripts=['src/bin/captlog'],
provides=['CaptainsLog (%s)' % (VERSION, )],
requires=['Crypto (>=2.6)'],
)
|
nsgomez/vboxmanager
|
models.py
|
Python
|
mit
| 539
| 0.007421
|
from peewee import *
from playhouse.sqlite_ext import SqliteExtDatabase
db = SqliteExtDatabase('store/virus_manager.db',
threadlocals=True)
class BaseModel(Model):
class Meta:
database = db
class ManagedMachine(BaseModel):
image_name = TextField(unique=True)
reference_image = TextField()
creation_time = IntegerField()
class Infection(BaseModel):
name = TextField()
|
machine = ForeignKeyField(ManagedMachine,
related_name='infections')
db.create_t
|
ables([ManagedMachine, Infection], True)
|
Pringley/basinweb
|
basin/views.py
|
Python
|
mit
| 1,780
| 0.004494
|
from django.shortcuts import render
from rest_framework import viewsets
from basin.models import Task
from basin.serializers import TaskSerializer
def index(request):
context = {}
return render(request, 'index.html', context)
def display(request):
state = 'active'
if request.method == 'POST':
state = request.POST['state']
submit = request.POST['submit']
tid = request.POST['id']
if submit == 'check':
task = Task.objects.get(id=tid)
task.completed = not task.completed
task.save()
elif request.method == 'GET':
if 'state' in request.GET:
state = request.GET['state']
context = {
'task_list': Task.objects.state(state),
'state': state,
}
return render(request, 'display.html', context)
class ActiveViewSet(viewsets.ModelViewSet):
|
queryset = Task.objects
|
.active()
serializer_class = TaskSerializer
class SleepingViewSet(viewsets.ModelViewSet):
queryset = Task.objects.sleeping()
serializer_class = TaskSerializer
class BlockedViewSet(viewsets.ModelViewSet):
queryset = Task.objects.blocked()
serializer_class = TaskSerializer
class DelegatedViewSet(viewsets.ModelViewSet):
queryset = Task.objects.delegated()
serializer_class = TaskSerializer
class CompletedViewSet(viewsets.ModelViewSet):
queryset = Task.objects.filter(completed=True, trashed=False)
serializer_class = TaskSerializer
class TaskViewSet(viewsets.ModelViewSet):
model = Task
serializer_class = TaskSerializer
def get_queryset(self):
if 'state' in self.request.QUERY_PARAMS:
state = self.request.QUERY_PARAMS['state']
return Task.objects.state(state)
return Task.objects.all()
|
your-favorite-hacker/shellcode
|
x86_32/Example_Code/ascii_converter.py
|
Python
|
gpl-3.0
| 331
| 0.018127
|
#!/usr/bin/env python
#
# ascii
|
converter for shellcoding-lab at hack4
# ~dash in 2014
#
import sys
import binascii
text = sys.argv[1]
def usage():
print "./%s <string2convert>" % (sys.argv[0])
if len(sys.argv)<2:
usage()
exit()
val = binascii.hexlify(text[::-1])
print "Stringlen: %d" % len(text)
print "String: %s" % val
| |
antoinecarme/pyaf
|
tests/artificial/transf_Logit/trend_MovingMedian/cycle_12/ar_/test_artificial_1024_Logit_MovingMedian_12__0.py
|
Python
|
bsd-3-clause
| 264
| 0.087121
|
import pyaf.Bench.TS_da
|
tasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D'
|
, seed = 0, trendtype = "MovingMedian", cycle_length = 12, transform = "Logit", sigma = 0.0, exog_count = 0, ar_order = 0);
|
iamsteadman/bambu-attachments
|
bambu_attachments/apps.py
|
Python
|
apache-2.0
| 104
| 0.009615
|
from django.apps import AppConfig
cl
|
ass AttachmentsConfig(AppConfig)
|
:
verbose_name = 'Attachments'
|
LeonRave/Tarea_Git
|
a.py
|
Python
|
mit
| 66
| 0.075758
|
def suma(a, b):
retur
|
n a+b
def resta(a,
|
b):
return a+b
|
bijaydev/Implementation-of-Explicit-congestion-notification-ECN-in-TCP-over-wireless-network-in-ns-3
|
src/flow-monitor/bindings/modulegen__gcc_ILP32.py
|
Python
|
gpl-2.0
| 454,665
| 0.015106
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.flow_monitor', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## flow-monitor-helper.h (module 'flow-monitor'): ns3::FlowMonitorHelper [class]
module.add_class('FlowMonitorHelper')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## histogram.h (module 'flow-monitor'): ns3::Histogram [class]
module.add_class('Histogram')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
module.add_class('Inet6SocketAddress', import_from_module='ns.network')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
module.add_class('InetSocketAddress', import_from_module='ns.network')
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress [class]
module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet')
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e [enumeration]
module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress [class]
module.add_class('Ipv6InterfaceAddress', import_from_module='ns.internet')
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::State_e [enumeration]
module.add_enum('State_e', ['TENTATIVE', 'DEPRECATED', 'PREFERRED', 'PERMANENT', 'HOMEADDRESS', 'TENTATIVE_OPTIMISTIC', 'INVALID'], outer_class=root_module['ns3::Ipv6InterfaceAddress'], import_from_module='ns.internet')
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Scope_e [enumeration]
module.add_enum('Scope_e', ['HOST', 'LINKLOCAL', 'GLOBAL'], outer_class=root_module['ns3::Ipv6InterfaceAddress'], import_from_module='ns.internet')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## p
|
acket-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetad
|
ata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData_e [enumeration]
module.add_enum('TagData_e', ['MAX_SIZE'], outer_class=root_module['ns3
|
OpenDA-Association/OpenDA
|
model_bmi/java/test/org/openda/model_bmi/testData/wflow_bin/wflow/stats.py
|
Python
|
lgpl-3.0
| 24,590
| 0.019113
|
#!/usr/local/bin/python
#
# Created on July 10, 2000
# by Keith Cherkauer
#
# This python script computes several standard statistics on arrays
# of values
#
# Functions include:
# get_mean
# get_median
# get_var
# get_stdev
# get_skew
# get_sum
# get_min
# get_max
# get_count_over_threshold
# get_quantile
# get_running_average
# get_running_slope
# get_bias
# get_root_mean_square
# get_mean_absolute_error
# get_max_absolute_error
# get_nash_sutcliffe
# get_peak_diff
# get_number_of_sign_changes
# get_peak_threshold_diff
# get_covariance
# get_correlation
# get_cross_correlation
# filter_threshold
# get_days
# get_last_day
# get_first_day
# get_box_plot_parameters
#
import sys
# Import math library
from math import sqrt
from math import fabs
from numpy import isnan
NoDataVal = -999
SmallValue = 1.e-10
def get_mean(values, N="", NoData=NoDataVal, Skip = ""):
"""This function computes the mean or average of an array of values
after filtering out the NoData values. It returns both the mean
value and the number of valid data points used. The mean value
is set to the NoData value if there are no valid data points.
If Skip is set, then values equal to skip are included in the count
of active data, but not included in the calculation of the mean.
An example of when this would be used is for computing average
snow cover, where 0 indicates a valid measurement but does not
contribute to a meaningful measurement of snow cover."""
if not N: N = len(values)
mean = 0
Nact = 0
Nskip = 0
for i in range(N):
if values[i] != NoData and not isnan(values[i]):
if Skip and values[i] == Skip:
Nskip = Nskip + 1
else:
mean = mean + values[i]
Nact= Nact + 1
if Nact-Nskip > 0:
mean = mean / ( Nact - Nskip )
else:
mean = NoData
return ( mean, Nact )
def get_median(values, N="", NoData=NoDataVal):
"""This function computes the median of an array of values
after filtering out the NoData values. It returns both the median
value and the number of valid data points used. The median value
is set to the NoData value if there are no valid data points."""
if not N: N = len(values)
new_value = []
Nact = 0
for i in range(N):
if values[i] != NoData and not isnan(values[i]):
new_value = new_value + [ values[i] ]
Nact= Nact + 1
if Nact > 0:
new_value.sort()
if Nact % 2 == 0:
median = ( new_value[int(Nact/2)] + new_value[int(Nact/2)] ) / 2.
else:
median = new_value[int(Nact/2)]
else:
median = NoData
return ( median, N
|
act )
def
|
get_var(values, N="", mean="", NoData=NoDataVal):
"""This function computes the variance of an array of values after
filtering out the NoData values. The mean value of the array
must be provided to the routine. It returns both the variance
value and the number of valid data points used. The variance
is set to the NoData value if there are no valid data points."""
if not N: N = len(values)
if not mean: mean = get_mean(values,N,NoData)[0]
var = 0
Nact = 0
for i in range(N):
if values[i] != NoData and not isnan(values[i]):
var = var + (values[i] - mean) * (values[i] - mean)
Nact = Nact + 1
if Nact > 1:
var = var / (Nact-1)
else:
var = NoData
return ( var, Nact )
def get_stdev(values, N="", mean="", NoData=NoDataVal):
"""This function computes the standard deviation of an array of
values after filtering out the NoData values. The mean of the
array must be provided to the routine. It returns both
the standard deviation value and the number of valid data
points used. The standard deviation is set to the NoData value
if there are no valid data points."""
if not N: N = len(values)
if not mean: mean = get_mean(values,N=N,NoData=NoData)[0]
stdev = 0
Nact = 0
for i in range(N):
if values[i] != NoData and not isnan(values[i]):
stdev = stdev + (values[i] - mean) * (values[i] - mean)
Nact = Nact + 1
if Nact > 1:
stdev = stdev / (Nact-1)
stdev = sqrt(stdev)
else:
stdev = NoData
return ( stdev, Nact )
def get_skew(values, N="", mean="", stdev="", NoData=NoDataVal):
"""This function computes the skewness of an array of values after
filtering out the NoData values. The mean and standard deviation
of the array must be provided to the routine. It returns both
the skewness value and the number of valid data points used. The
skewness is set to the NoData value if there are no valid data
points."""
if not N: N = len(values)
if not mean: mean = get_mean(values,N,NoData)[0]
if not stdev: stdev = get_stdev(values,N,mean,NoData)[0]
skew = 0
Nact = 0
for i in range(N):
if values[i] != NoData and not isnan(values[i]):
skew = skew + (values[i] - mean) ** 3
Nact = Nact + 1
if (stdev**3*(Nact-1)*(Nact-2)) != 0:
skew = (skew*Nact)/(stdev**3*(Nact-1)*(Nact-2))
else:
skew = NoData
return ( skew, Nact )
def get_sum(values, N="", NoData=NoDataVal):
"""This function computes the sum of an array of values after
filtering out the NoData values. It returns both the sum value
and the number of valid data points used. The sum is set to
the NoData value if there are no valid data points."""
if not N: N = len(values)
sum = 0
Nact = 0
for i in range(N):
if values[i] != NoData and not isnan(values[i]):
sum = sum + values[i]
Nact = Nact + 1
if Nact == 0:
sum = NoData
return ( sum, Nact )
def get_min(values, N="", NoData=NoDataVal):
"""This function finds the minimum value of an array after
filtering out the NoData values. It returns both the
minimum value and the number of valid data points used.
The minimum is set to the NoData value if there are no
valid data points."""
if not N: N = len(values)
pos = 0
while pos < N and values[pos] == NoData:
pos = pos + 1
if pos < N:
Nact = 1
min = values[pos]
minpos = pos
for i in range(pos,N):
if values[i] != NoData and not isnan(values[i]):
if values[i] < min:
min = values[i]
minpos = i
Nact = Nact + 1
if Nact == 0:
min = NoData
else:
min = NoData
minpos = NoData
Nact = 0
return ( min, Nact, minpos )
def get_max(values, N="", NoData=NoDataVal):
"""This function finds the maximum value of an array after
filtering out the NoData values. It returns both the
maximum value and the number of valid data points used.
The maximum is set to the NoData value if there are no
valid data points."""
if not N: N = len(values)
pos = 0
while pos < N and values[pos] == NoData:
pos = pos + 1
if pos < N:
max = values[pos]
maxpos = 0
Nact = 0
for i in range(pos,N):
if values[i] != NoData and not isnan(values[i]):
if values[i] > max:
max = values[i]
maxpos = i
Nact = Nact + 1
if Nact == 0:
max = NoData
else:
max = NoData
maxpos = NoData
Nact = 0
return ( max, Nact, maxpos )
def get_count_over_threshold(values, threshold, N="", NoData=NoDataVal):
"""This function determines the number of values that are equal to
or exceed the given threshold. Values equal to NoData are not
included in the count and the number of valid values is returned
along with the over threshold count."""
if not N: N = len(values)
count = 0
Nact = 0
for i in range(N):
if values[i] != NoData and not isnan(values[i]):
if values[i] >= threshold:
count = count + 1
Nact = Nact + 1
if Nact == 0:
count = NoData
return ( count, Nact )
def get_qua
|
tracfm/tracfm
|
tracfm/polls/migrations/0010_auto__add_field_poll_detailed_chart.py
|
Python
|
agpl-3.0
| 13,004
| 0.008151
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Poll.detailed_chart'
db.add_column('polls_poll', 'detailed_chart', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'Poll.detailed_chart'
db.delete_column('polls_poll', 'detailed_chart')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'polls.poll': {
'Meta': {'ordering': "('-id',)", 'object_name': 'Poll'},
'always_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'category_set': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'category_set'", 'null': 'True', 'to': "orm['polls.PollCategorySet']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'demographic': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'detailed_chart': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ended': ('django.db.models.fields.DateTimeF
|
ield', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
|
'message': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'secondary_category_set': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'secondary_category_set'", 'null': 'True', 'to': "orm['polls.PollCategorySet']"}),
'secondary_template': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'secondary_template'", 'null': 'True', 'to': "orm['polls.PollCategorySet']"}),
'started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'template'", 'null': 'True', 'to': "orm['polls.PollCategorySet']"}),
'unknown_message': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'polls.pollcategory': {
'Meta': {'unique_together': "(('name', 'category_set'),)", 'object_name': 'PollCategory'},
'category_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categories'", 'to': "orm['polls.PollCategorySet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'longitude': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'polls.pollcategoryset': {
'Meta': {'object_name': 'PollCategorySet'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['polls.Poll']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'polls.pollkeyword': {
'Meta': {'object_name': 'PollKeyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keywords'", 'to': "orm['polls.Poll']"})
},
'polls.pollresponse': {
'Meta': {'ordering': "('-id',)", 'object_name': 'PollResponse'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_responses'", 'null': 'True', 'to': "orm['polls.PollCategory']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rapidsms_httprouter.Message']"}),
'poll': ('
|
rohitranjan1991/home-assistant
|
homeassistant/components/econet/__init__.py
|
Python
|
mit
| 5,194
| 0.000963
|
"""Support for EcoNet products."""
from datetime import timedelta
import logging
from aiohttp.client_exceptions import ClientError
from pyeconet import EcoNetApiInterface
from pyeconet.equipment import EquipmentType
from pyeconet.errors import (
GenericHTTPError,
InvalidCredentialsError,
InvalidResponseFormat,
PyeconetError,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD, TEMP_FAHRENHEIT, Platform
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.entity import DeviceInfo, Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import ConfigType
from .const import API_CLIENT, DOMAIN, EQUIPMENT
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [
Platform.CLIMATE,
Platform.BINARY_SENSOR,
Platform.SENSOR,
Platform.WATER_HEATER,
]
PUSH_UPDATE = "econet.push_update"
INTERVAL = timedelta(minutes=60)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the EcoNet component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][API_CLIENT] = {}
hass.data[DOMAIN][EQUIPMENT] = {}
return True
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Set up EcoNet as config entry."""
email = config_entry.data[CONF_EMAIL]
password = config_entry.data[CONF_PASSWORD]
try:
api = await EcoNetApiInterface.login(email, password=password)
except InvalidCredentialsError:
_LOGGER.error("Invalid credentials provided")
return False
except PyeconetError as err:
_LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady from err
try:
equipment = await api.get_equipment_by_type(
[EquipmentType.WAT
|
ER_HEATER, EquipmentType.THERMOSTAT]
|
)
except (ClientError, GenericHTTPError, InvalidResponseFormat) as err:
raise ConfigEntryNotReady from err
hass.data[DOMAIN][API_CLIENT][config_entry.entry_id] = api
hass.data[DOMAIN][EQUIPMENT][config_entry.entry_id] = equipment
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
api.subscribe()
def update_published():
"""Handle a push update."""
dispatcher_send(hass, PUSH_UPDATE)
for _eqip in equipment[EquipmentType.WATER_HEATER]:
_eqip.set_update_callback(update_published)
for _eqip in equipment[EquipmentType.THERMOSTAT]:
_eqip.set_update_callback(update_published)
async def resubscribe(now):
"""Resubscribe to the MQTT updates."""
await hass.async_add_executor_job(api.unsubscribe)
api.subscribe()
async def fetch_update(now):
"""Fetch the latest changes from the API."""
await api.refresh_equipment()
config_entry.async_on_unload(async_track_time_interval(hass, resubscribe, INTERVAL))
config_entry.async_on_unload(
async_track_time_interval(hass, fetch_update, INTERVAL + timedelta(minutes=1))
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a EcoNet config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN][API_CLIENT].pop(entry.entry_id)
hass.data[DOMAIN][EQUIPMENT].pop(entry.entry_id)
return unload_ok
class EcoNetEntity(Entity):
"""Define a base EcoNet entity."""
def __init__(self, econet):
"""Initialize."""
self._econet = econet
async def async_added_to_hass(self):
"""Subscribe to device events."""
await super().async_added_to_hass()
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
PUSH_UPDATE, self.on_update_received
)
)
@callback
def on_update_received(self):
"""Update was pushed from the ecoent API."""
self.async_write_ha_state()
@property
def available(self):
"""Return if the the device is online or not."""
return self._econet.connected
@property
def device_info(self) -> DeviceInfo:
"""Return device registry information for this entity."""
return DeviceInfo(
identifiers={(DOMAIN, self._econet.device_id)},
manufacturer="Rheem",
name=self._econet.device_name,
)
@property
def name(self):
"""Return the name of the entity."""
return self._econet.device_name
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return f"{self._econet.device_id}_{self._econet.device_name}"
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False
|
opendatatrentino/opendata-harvester
|
harvester_odt/pat_geocatalogo/constants.py
|
Python
|
bsd-2-clause
| 669
| 0
|
API_XML_NSMAP = {
"csw": "http://www.opengis.net/cat/csw/
|
2.0.2",
"dc": "http://purl.org/dc/elements/1.1/",
"dct": "http://purl.org/dc/terms/",
"geonet": "http://www.fao.org/geonetwork",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
LINKED_XML_NSMAP = {
"csw": "http://www.opengis.net/cat/csw/2.0.2",
"gco": "http://www.isotc211.org/2005/gco",
"gmd": "http://www.isotc211.org/2005/gmd",
"gml": "http://www.opengis.net/gml/3.2",
"gmx": "http://www.isotc211.org/2005/gmx",
"srv": "
|
http://www.isotc211.org/2005/srv",
"xlink": "http://www.w3.org/1999/xlink",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
|
alephdata/ingestors
|
ingestors/directory.py
|
Python
|
mit
| 1,551
| 0
|
from followthemoney import model
from ingestors.ingestor import Ingestor
class DirectoryIngestor(Ingestor):
"""Traverse the entries in a directory."""
MIME_TYPE = "inode/directory"
SKIP_ENTRIES = [".git", ".hg", "__MACOSX", ".gitignore"]
def ingest(self, file_path, entity):
"""Ingestor implementation."""
if entity.schema == model.get("Document"):
entity.schema = model.get("Folder")
if file_path is None or not file_path.is_dir():
return
self.crawl(self.manager, file_path, parent=entity)
|
@classmethod
def crawl(cls, manager, file_path, parent=None):
for path in
|
file_path.iterdir():
name = path.name
if name is None or name in cls.SKIP_ENTRIES:
continue
sub_path = file_path.joinpath(name)
child = manager.make_entity("Document", parent=parent)
child.add("fileName", name)
if sub_path.is_dir():
if parent is not None:
child.make_id(parent.id, name)
else:
child.make_id(name)
child.schema = model.get("Folder")
child.add("mimeType", cls.MIME_TYPE)
manager.emit_entity(child)
cls.crawl(manager, sub_path, parent=child)
else:
checksum = manager.store(sub_path)
child.make_id(name, checksum)
child.set("contentHash", checksum)
manager.queue_entity(child)
|
isaachenrion/jets
|
src/monitors/saver.py
|
Python
|
bsd-3-clause
| 985
| 0.001015
|
import torch
import pickle
import logging
from .baseclasses import ScalarMonitor
|
from .meta import Regurgitate
class Saver(ScalarMonitor):
def __init__(self, save_monitor, model_file, settings_file, **kwargs):
self.saved = False
self.save_monitor = save_monitor
self.model_file = model_file
self.settings_file = settings_file
super().__init__('save', **kwargs)
def call(self, model=None, settings=None, **kwargs):
if self.value is None:
self.value = self.save_monitor.value
if sel
|
f.save_monitor.changed:
self.save(model, settings)
self.value = self.save_monitor.value
return self.value
def save(self, model, settings):
with open(self.model_file, 'wb') as f:
torch.save(model.cpu().state_dict(), f)
if torch.cuda.is_available():
model.cuda()
with open(self.settings_file, "wb") as f:
pickle.dump(settings, f)
|
mmccoo/kicad_mmccoo
|
menus_and_buttons/menus_and_buttons.py
|
Python
|
apache-2.0
| 2,271
| 0.005284
|
import pcbnew
import wx
import wx.aui
# get the path of this script. Will need it to load the png later.
import inspect
import os
filename = inspect.getframeinfo(inspect.currentframe()).filename
path = os.path.dirname(os.path.abspath(filename))
print("running {} from {}".format(filename, path))
def findPcbnewWindow():
windows = wx.GetTopLevelWindows()
pcbnew = [w for w in windows if w.GetTitle()[0:6] == "Pcbnew"]
if len(pcbnew) != 1:
raise Exception("Cannot find pcbnew window from title matching!")
return pcbnew[0]
pcbwin = findPcbnewWindow()
# 6038 is the value that H_TOOLBAR from kicad/include/id.h happens to get.
# other interesting values include:
# 6041 is AUX_TOOLBAR. That's the second row of stuff in the pcbnew gui.
# it contains things like track width, via size, grid
# 6039 is V_TOOLBAR, the right commands window. zoom to selection, highlight net.
# 6040 is OPT_TOOLBAR, the left commands window. disable drc, hide grid, display polar
# kicad/include/id.h has been added to pcbnew's interface. If you get the error
# that ID_H_TOOLBAR doesn't exist, it's probably because you need to update your
# version of kicad.
top_tb = pcbwin.FindWindowById(pcbnew.ID_H_TOOLBAR)
# let's look at what top level frames/windows we have. These include the
#
#children = {}
#for subwin in pcbwin.Children:
# id = subwin.GetId()
# children[id] = subwin
# print("subwin {} {} {}".format(subwin.GetLabel(), subwin.GetClassName(), subwin.GetId()))
# for idx in range(top_tb.GetToolCount()):
# tbi
|
= top_tb.FindToolByIndex(idx)
# #print("
|
toolbar item {}".format(tbi.GetShortHelp()))
def MyButtonsCallback(event):
# when called as a callback, the output of this print
# will appear in your xterm or wherever you invoked pcbnew.
print("got a click on my new button {}".format(str(event)))
# Plan for three sizes of bitmaps:
# SMALL - for menus - 16 x 16
# MID - for toolbars - 26 x 26
# BIG - for program icons - 48 x 48
# bitmaps_png/CMakeLists.txt
bm = wx.Bitmap(path + '/hello.png', wx.BITMAP_TYPE_PNG)
itemid = wx.NewId()
top_tb.AddTool(itemid, "mybutton", bm, "this is my button", wx.ITEM_NORMAL)
top_tb.Bind(wx.EVT_TOOL, MyButtonsCallback, id=itemid)
top_tb.Realize()
|
nerandell/aiopg
|
aiopg/cursor.py
|
Python
|
bsd-2-clause
| 11,747
| 0.00017
|
import asyncio
import warnings
import psycopg2
from .log import logger
class Cursor:
def __init__(self, conn, impl, timeout, echo):
self._conn = conn
self._impl = impl
self._timeout = timeout
self._echo = echo
@property
def echo(self):
"""Return echo mode status."""
return self._echo
@property
def description(self):
"""This read-only attribute is a sequence of 7-item sequences.
Each of these sequences is a collections.namedtuple containing
information describing one result column:
0. name: the name of the column returned.
1. type_code: the PostgreSQL OID of the column.
2. display_size: the actual length of the column in bytes.
3. internal_size: the size in bytes of the column associated to
this column on the server.
4. precision: total number of significant digits in columns of
type NUMERIC. None for other types.
5. scale: count of decimal digits in the fractional part in
columns of type NUMERIC. None for other types.
6. null_ok: always None as not easy to retrieve from the libpq.
This attribute will be None for operations that do not
return rows or if the cursor has not had an operation invoked
via the execute() method yet.
"""
return self._impl.description
def close(self):
"""Close the cursor now."""
self._impl.close()
@property
def closed(self):
"""Read-only boolean attribute: specifies if the cursor is closed."""
return self._impl.closed
@property
def connection(self):
"""Read-only attribute returning a reference to the `Connection`."""
return self._conn
@property
def raw(self):
"""Underlying psycopg cursor object, readonly"""
return self._impl
@property
def name(self):
# Not supported
return self._impl.name
@property
def scrollable(self):
# Not supported
return self._impl.scrollable
@scrollable.setter
def scrollable(self, val):
# Not supported
self._impl.scrollable = val
@property
def withhold(self):
# Not supported
return self._impl.withhold
@withhold.setter
def withhold(self, val):
# Not supported
self._impl.withhold = val
@asyncio.coroutine
def execute(self, operation, parameters=None, *, timeout=None):
"""Prepare and execute a database operation (query or command).
Parameters may be provided as sequence or mapping and will be
bound to variables in the operation. Variables are specified
either with positional %s or named %({name})s placeholders.
"""
if timeout is None:
timeout = self._timeout
waiter = self._conn._create_waiter('cursor.execute')
if self._echo:
logger.info(operation)
logger.info("%r", parameters)
try:
self._impl.execute(operation, parameters)
except:
self._conn._waiter = None
raise
else:
yield from self._conn._poll(waiter, timeout)
@asyncio.coroutine
def executemany(self, operation, seq_of_parameters):
# Not supported
raise psycopg2.ProgrammingError(
"executemany cannot be used in asynchronous mode")
@asyncio.coroutine
def callproc(self, procname, parameters=None, *, timeout=None):
"""Call a stored database procedure with the given name.
The sequence of parameters must contain one entry for each
argument that the procedure expects. The result of the call is
returned as modified copy of the input sequence. Input
parameters are left untouched, output and input/output
parameters replaced with possibly new values.
"""
if timeout is None:
timeout = self._timeout
waiter = self._conn._create_waiter('cursor.callproc')
if self._echo:
logger.info("CALL %s", procname)
logger.info("%r", parameters)
try:
self._impl.callproc(procname, parameters)
except:
self._conn._waiter = None
raise
else:
yield from self._conn._poll(waiter, timeout)
@asyncio.coroutine
def mogrify(self, operation, parameters=None):
"""Return a query string after arguments binding.
The string returned is exactly the one that would be sent to
the database running the .execute() method or similar.
"""
ret = self._impl.mogrify(operation, parameters)
assert not self._conn._isexecuting(), ("Don't support server side "
"mogrify")
return ret
@asyncio.coroutine
def setinputsizes(self, sizes):
"""This method is exposed in compliance with the DBAPI.
It currently does nothing but it is safe to call it.
"""
self._impl.setinputsizes(sizes)
@asyncio.coroutine
def fetchone(self):
"""Fetch the next row of a query result set.
Returns a single tuple, or None when no more data is
available.
"""
ret = self._impl.fetchone()
assert not self._conn._isexecuting(), ("Don't support server side "
"cursors yet")
return ret
@asyncio.coroutine
def fetchmany(self, size=None):
"""Fetch the next set of rows of a query result.
Returns a list of tuples. An empty list is returned when no
more rows are available.
The number of rows to fetch per call is specified by the
parameter. If it is not given, the cursor's .arraysize
determines the number of rows to be fetched. The method should
try to fetch as many rows as indicated by the size
parameter. If this is not possible due to the specified number
of rows not being available, fewer rows may be returned.
"""
if size is None:
size = self._impl.arraysize
ret = self._impl.fetchmany(size)
assert not self._conn._isexecuting(), ("Don't support server side "
|
"cursors yet")
return ret
@asyncio.coroutine
def fetchall(self):
"""Fetch all (remaining) rows of a query result.
Ret
|
urns them as a list of tuples. An empty list is returned
if there is no more record to fetch.
"""
ret = self._impl.fetchall()
assert not self._conn._isexecuting(), ("Don't support server side "
"cursors yet")
return ret
@asyncio.coroutine
def scroll(self, value, mode="relative"):
"""Scroll to a new position according to mode.
If mode is relative (default), value is taken as offset
to the current position in the result set, if set to
absolute, value states an absolute target position.
"""
ret = self._impl.scroll(value, mode)
assert not self._conn._isexecuting(), ("Don't support server side "
"cursors yet")
return ret
@property
def arraysize(self):
"""How many rows will be returned by fetchmany() call.
This read/write attribute specifies the number of rows to
fetch at a time with fetchmany(). It defaults to
1 meaning to fetch a single row at a time.
"""
return self._impl.arraysize
@arraysize.setter
def arraysize(self, val):
"""How many rows will be returned by fetchmany() call.
This read/write attribute specifies the number of rows to
fetch at a time with fetchmany(). It defaults to
1 meaning to fetch a single row at a time.
"""
self._impl.arraysize = val
@property
def itersize(self):
# Not supported
return self._impl.itersize
@itersize.setter
def itersize(self, val):
# Not s
|
WimpyAnalytics/django-andablog
|
demo/common/migrations/0001_initial.py
|
Python
|
bsd-2-clause
| 2,365
| 0.004651
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(unique=True, max_length=255, verbose_name='email address', db_index=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('name', models.CharField(max_length=100)),
('profile_name', models.CharField(unique=True, max_length=20, verbose_name=b'pr
|
ofile name')),
('slug', models.SlugField(unique=True)),
('groups', mod
|
els.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'ordering': ['email'],
'abstract': False,
},
bases=(models.Model,),
),
]
|
peiyuwang/pants
|
src/python/pants/backend/codegen/tasks/simple_codegen_task.py
|
Python
|
apache-2.0
| 531
| 0.001883
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.deprecated import deprecated_module
from pants.task.simple_codegen_task import SimpleCodegenTask
de
|
precated_module('1.5.0dev0', 'Use pa
|
nts.task.simple_codegen_task instead')
SimpleCodegenTask = SimpleCodegenTask
|
fredericmohr/mitro
|
mitro-mail/build/venv/lib/python2.7/site-packages/sqlalchemy/dialects/sqlite/base.py
|
Python
|
gpl-3.0
| 34,783
| 0.001265
|
# sqlite/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sqlite
:name: SQLite
Date and Time Types
-------------------
SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite
does not provide out of the box functionality for translating values between
Python `datetime` objects and a SQLite-supported format. SQLAlchemy's own
:class:`~sqlalchemy.types.DateTime` and related types provide date formatting
and parsing functionality when SQlite is used. The implementation classes are
:class:`~.sqlite.DATETIME`, :class:`~.sqlite.DATE` and :class:`~.sqlite.TIME`.
These types represent dates and times as ISO formatted strings, which also
nicely support ordering. There's no reliance on typical "libc" internals
for these functions so historical dates are fully supported.
Auto Incrementing Behavior
--------------------------
Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html
Two things to note:
* The AUTOINCREMENT keyword is **not** required for SQLite tables to
generate primary key values automatically. AUTOINCREMENT only means that
the algorithm used to generate ROWID values should be slightly different.
* SQLite does **not** generate primary key (i.e. ROWID) values, even for
one column, if the table has a composite (i.e. multi-column) primary key.
This is regardless of the AUTOINCREMENT keyword being present or not.
To specifically render the AUTOINCREMENT keyword on the primary key
column when rendering DDL, add the flag ``sqlite_autoincrement=True``
to the Table construct::
Table('sometable', metadata,
Column('id', Integer, primary_key=True),
sqlite_autoincrement=True)
Transaction Isolation Level
---------------------------
:func:`.create_engine` accepts an ``isolation_level`` parameter which
results in the command ``PRAGMA read_uncommitted <level>`` being invoked for
every new connection. Valid values for this parameter are ``SERIALIZABLE``
and ``READ UNCOMMITTED`` corresponding to a value of 0 and 1, respectively.
See the section :ref:`pysqlite_serializable` for an important workaround
when using serializable isolation with Pysqlite.
Database Locking Behavior / Concurrency
---------------------------------------
Note that SQLite is not designed for a high level of concurrency. The
database itself, being a file, is locked completely during write operations
and within transactions, meaning exactly one connection has exclusive access
to the database during this period - all other connections will be blocked
during this time.
The Python DBAPI specification also calls for a connection model that is always
in a transaction; there is no BEGIN method, only commit and rollback. This
implies that a SQLite DBAPI driver would technically allow only serialized
access to a particular database file at all times. The pysqlite driver
attempts to ameliorate this by deferring the actual BEGIN statement until
the first DML (INSERT, UPDATE, or DELETE) is received within a
transaction. While this breaks serializable isolation, it at least delays
the exclusive locking inherent in SQLite's design.
SQLAlchemy's default mode of usage with the ORM is known
as "autocommit=False", which means the moment the :class:`.Session` begins to
be used, a transaction is begun. As the :class:`.Session` is used, the
autoflush feature, also on by default, will flush out pending changes to the
database before each query. The effect of this is that a :class:`.Session`
used in its default mode will often emit DML early on, long before the
transaction is actually committed. This again will have the effect of
serializing access to the SQLite database. If highly concurrent reads are
desired against the SQLite database, it is advised that the autoflush feature
be disabled, and potentially even that autocommit be re-enabled, which has
the effect of each SQL statement and flush committing changes immediately.
For more information on SQLite's lack of concurrency by design, please
see `Situations Where Another RDBMS May Work Better - High
Concurrency <http://www.s
|
qlite.org/whentouse.html>`_ near the bottom of
the page.
.. _sqlite_foreign_keys:
Foreign Key Support
-------------------
SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables,
however by default these constraints have no effect on the operation
of the table.
Constraint checking on SQLite has three prerequisites:
* At least version 3.6.19 of SQLite must be in use
* The SQLite libary must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY
or SQLITE_OMIT_TRIGG
|
ER symbols enabled.
* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all connections
before use.
SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically
for new connections through the usage of events::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
.. seealso::
`SQLite Foreign Key Support <http://www.sqlite.org/foreignkeys.html>`_ -
on the SQLite web site.
:ref:`event_toplevel` - SQLAlchemy event API.
"""
import datetime
import re
from sqlalchemy import sql, exc
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.sql import compiler
from sqlalchemy import processors
from sqlalchemy.types import BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL,\
FLOAT, REAL, INTEGER, NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, VARCHAR
class _DateTimeMixin(object):
_reg = None
_storage_format = None
def __init__(self, storage_format=None, regexp=None, **kw):
super(_DateTimeMixin, self).__init__(**kw)
if regexp is not None:
self._reg = re.compile(regexp)
if storage_format is not None:
self._storage_format = storage_format
class DATETIME(_DateTimeMixin, sqltypes.DateTime):
"""Represent a Python datetime object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:%(second)02d.%(microsecond)06d"
e.g.::
2011-03-15 12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATETIME
dt = DATETIME(
storage_format="%(year)04d/%(month)02d/%(day)02d %(hour)02d:%(min)02d:%(second)02d",
regexp=re.compile("(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)")
)
:param storage_format: format string which will be applied to the
dict with keys year, month, day, hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to
incoming result rows. If the regexp contains named groups, the
resulting match dict is applied to the Python datetime() constructor
as keyword arguments. Otherwise, if positional groups are used, the
the datetime() constructor is called with positional arguments via
``*map(int, match_obj.groups(0))``.
"""
_storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
)
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop('truncate_microseconds', False)
super(DATETIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert 'storage_format' not in kwargs, "You can specify only "\
"one of truncate_microseconds or storage_format."
assert 'regexp' not in kwargs, "You can specify only one of "\
"truncate_microseconds or regexp."
self._storage_format = (
|
spulec/PyQS
|
example/api/helpers.py
|
Python
|
mit
| 545
| 0
|
import json
import logging
from flask i
|
mport jsonify
def construct_response(message, payload, status):
body = {}
if status == 500:
body['message'] = (
'Something went wrong constructing response. '
'Is your payload valid JSON?'
)
body['request_payload'] = str(payload)
else:
body['message'] = message
body['request_payload'] = payload
body['status_code'] = status
logging.debug(body)
resp = jsonify(body)
|
resp.status_code = status
return resp
|
saramic/learning
|
data/tensorflow/src/2_4_creating_tensors.py
|
Python
|
unlicense
| 405
| 0.032099
|
import tensorflow as tf
m1 = tf.const
|
ant([[1., 2.]])
m2 = tf.constant([[1],
[2]])
m3 = tf.constant([ [[1,2],
[3,4],
[5,6]],
[[7,8],
[9,10],
[11,12]] ])
print(m1)
pri
|
nt(m2)
print(m3)
# 500 x 500 tensor
print(tf.ones([500, 500]))
# 500 x 500 tensor with 0.5 value
print(tf.ones([500, 500]) * 0.5)
|
expyriment/expyriment
|
expyriment/misc/_colour.py
|
Python
|
gpl-3.0
| 15,022
| 0.000466
|
# -*- coding: utf-8 -*-
"""Colour class.
This module contains a class implementing an RGB colour.
"""
__author__ = 'Florian Krause <florian@expyriment.org>, \
Oliver Lindemann <oliver@expyriment.org>'
__version__ = ''
__revision__ = ''
__date__ = ''
import colorsys
from . import round
# The named colours are the 140 HTML colour names:
# see https://www.w3schools.com/colors/colors_names.asp
_colours = {
'aliceblue': (240, 248, 255),
'antiquewhite': (250, 235, 215),
'aqua': (0, 255, 255),
'aquamarine': (127, 255, 212),
'azure': (240, 255, 255),
'beige': (245, 245, 220),
'bisque': (255, 228, 196),
'black': (0, 0, 0),
'blanchedalmond': (255, 235, 205),
'blue': (0, 0, 255),
'blueviolet': (138, 43, 226),
'brown': (165, 42, 42),
'burlywood': (222, 184, 135),
'cadetblue': (95, 158, 160),
'chartreuse': (127, 255, 0),
'chocolate': (210, 105, 30),
'coral': (255, 127, 80),
'cornflowerblue': (100, 149, 237),
'cornsilk': (255, 248, 220),
'crimson': (220, 20, 60),
'cyan': (0, 255, 255),
'darkblue': (0, 0, 139),
'darkcyan': (0, 139, 139),
'darkgoldenrod': (184, 134, 11),
'darkgray': (169, 169, 169),
'darkgreen': (0, 100, 0),
'darkkhaki': (189, 183, 107),
'darkmagenta': (139, 0, 139),
'darkolivegreen': (85, 107, 47),
'darkorange': (255, 140, 0),
'darkorchid': (153, 50, 204),
'darkred': (139, 0, 0),
'darksalmon': (233, 150, 122),
'darkseagreen': (143, 188, 143),
'darkslateblue': (72, 61, 139),
'darkslategray': (47, 79, 79),
'darkturquoise': (0, 206, 209),
'darkviolet': (148, 0, 211),
'deeppink': (255, 20, 147),
'deepskyblue': (0, 191, 255),
'dimgray': (105, 105, 105),
'dodgerblue': (30, 144, 255),
'firebrick': (178, 34, 34),
'floralwhite': (255, 250, 240),
'forestgreen': (34, 139, 34),
'fuchsia': (255, 0, 255),
'gainsboro': (220, 220, 220),
'ghostwhite': (248, 248, 255),
'gold': (255, 215, 0),
'goldenrod': (218, 165, 32),
'gray': (128, 128, 128),
'green': (0, 128, 0),
'greenyellow': (173, 255, 47),
'honeydew': (240, 255, 240),
'hotpink': (255, 105, 180),
'indianred': (205, 92, 92),
'indigo': (75, 0, 130),
'ivory': (255, 255, 240),
'khaki': (240, 230, 140),
'lavender': (230, 230, 250),
'lavenderblush': (255, 240, 245),
'lawngreen': (124, 252, 0),
'lemonchiffon': (255, 250, 205),
'lightblue': (173, 216, 230),
'lightcoral': (240, 128, 128),
'lightcyan': (224, 255, 255),
'lightgoldenrodyellow': (250, 250, 210),
'lightgray': (211, 211, 211),
'lightgreen': (144, 238, 144),
'lightpink': (255, 182, 193),
'lightsalmon': (255, 160, 122),
'lightseagreen': (32, 178, 170),
'lightskyblue': (135, 206, 250),
'lightslategray': (119, 136, 153),
'lightsteelblue': (176, 196, 222),
'lightyellow': (255, 255, 224),
'lime': (0, 255, 0),
'limegreen': (50, 205, 50),
'linen': (250, 240, 230),
'magenta': (255, 0, 255),
'maroon': (128, 0, 0),
'mediumaquamarine': (102, 205, 170),
'mediumblue': (0, 0, 205),
'mediumorchid': (186, 85, 211),
'mediumpurple': (147, 112, 219),
'mediumseagreen': (60, 179, 113),
'mediumslateblue': (123, 104, 238),
'mediumspringgreen': (0, 250, 154),
'mediumt
|
urquoise': (72, 209, 204),
'mediumvioletred': (199, 21, 133),
'midnightblue': (25, 25, 112),
'mintcream': (245, 255, 250),
'mistyrose': (255, 228, 225),
'moccasin': (255, 228, 181),
'navajowhite': (255, 222, 173),
'navy': (0, 0, 128),
'o
|
ldlace': (253, 245, 230),
'olive': (128, 128, 0),
'olivedrab': (107, 142, 35),
'orange': (255, 165, 0),
'orangered': (255, 69, 0),
'orchid': (218, 112, 214),
'palegoldenrod': (238, 232, 170),
'palegreen': (152, 251, 152),
'paleturquoise': (175, 238, 238),
'palevioletred': (219, 112, 147),
'papayawhip': (255, 239, 213),
'peachpuff': (255, 218, 185),
'peru': (205, 133, 63),
'pink': (255, 192, 203),
'plum': (221, 160, 221),
'powderblue': (176, 224, 230),
'purple': (128, 0, 128),
'red': (255, 0, 0),
'rosybrown': (188, 143, 143),
'royalblue': (65, 105, 225),
'saddlebrown': (139, 69, 19),
'salmon': (250, 128, 114),
'sandybrown': (250, 164, 96),
'seagreen': (46, 139, 87),
'seashell': (255, 245, 238),
'sienna': (160, 82, 45),
'silver': (192, 192, 192),
'skyblue': (135, 206, 235),
'slateblue': (106, 90, 205),
'slategray': (112, 128, 144),
'snow': (255, 250, 250),
'springgreen': (0, 255, 127),
'steelblue': (70, 130, 180),
'tan': (210, 180, 140),
'teal': (0, 128, 128),
'thistle': (216, 191, 216),
'tomato': (255, 99, 71),
'turquoise': (64, 224, 208),
'violet': (238, 130, 238),
'wheat': (245, 222, 179),
'white': (255, 255, 255),
'whitesmoke': (245, 245, 245),
'yellow': (255, 255, 0),
'yellowgreen': (154, 205, 50),
}
class Colour(object):
"""Implements a class representing an RGB colour."""
@staticmethod
def get_colour_names():
"""Get a dictionary of all known colour names."""
from collections import OrderedDict
return OrderedDict(sorted(_colours.items(), key=lambda t: t[0]))
@staticmethod
def is_rgb(value):
"""Check for valid RGB tuple value.
Parameters
----------
value : iterable of length 3 (e.g. [255, 0, 0])
the value to be checked
Returns
-------
valid : bool
whether the value is valid or not
"""
if not len(value) == 3:
return False
elif False in [isinstance(x, int) for x in value]:
return False
elif False in [0 <= x <= 255 for x in value]:
return False
else:
return True
@staticmethod
def is_name(value):
"""Check for valid colour name value.
Parameters
----------
value : str (e.g
|
mattop101/ShellSwitch
|
shellswitch.py
|
Python
|
mit
| 5,372
| 0.003351
|
import pygame
import sys
from shellswitch_lib import ShellSwitchGameGrid
DISPLAY_WIDTH = 512
DISPLAY_HEIGHT = 384
class ShellSwitcher:
def __init__(self):
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.init()
self.screen = pygame.display.set_mode((DISPLAY_WIDTH, DISPLAY_HEIGHT))
pygame.display.set_caption("Shell Switch")
self.background = pygame.image.load("assets/bg0.png").convert()
self.tile_sprites = (pygame.image.load("assets/tile_bomb.png"),
pygame.image.load("assets/tile_1.png"),
pygame.image.load("assets/tile_2.png"),
pygame.image.load("assets/tile_3.png"),
pygame.image.load("assets/tile_0.png"))
self.sounds = {'click': pygame.mixer.Sound("assets/click.ogg"),
'expl': pygame.mixer.Sound("assets/expl.ogg"),
'point': pygame.mixer.Sound("assets/point.ogg")}
pygame.mixer.music.load("assets/main.ogg")
pygame.mixer.music.play(loops=-1)
self.grid_data = ShellSwitchGameGrid()
self.level = 0
self.score = 0
self.score_display = 0
self.max_score = 0
self.game_over = False
self.load_grid()
def load_grid(self):
"""
Generates the game grid and associates each tile with the blank tile sprite
"""
self.grid_data.gen_grid(self.level)
self.max_score = self.grid_data.max_score()
for row in range(self.grid_data.rows):
for col in range(self.grid_data.cols):
data_tile = self.grid_data.get_cell(row, col)
data_ti
|
le.sprite = self.tile_sprites[4]
self.counter_bombs_y = [self.grid_data.bombs_in_row(i) for i in range(self.grid_data.rows)]
self.counter_bombs_x = [self.grid_data.bombs_in_col(j) for j in range(self.grid_data.cols)]
self.counter_score_y = [self.grid_data.points_in_row(k) for k in range(self.grid_data.rows)]
self.counter_score_x = [self.grid_data.points_in_col(l) for l in range(self.grid_data.cols)]
def check_tiles(sel
|
f, mouse_pos):
"""
Change the clicked tile to the corresponding sprite
"""
for tile in self.grid_data:
if tile.area.collidepoint(mouse_pos) and not tile.is_clicked:
if self.score == 0:
self.score = tile.mult
else:
self.score *= tile.mult
self.sounds['click'].play()
tile.sprite = self.tile_sprites[tile.mult]
tile.is_clicked = True
# If bomb is clicked
if tile.mult == 0:
self.screen.blit(tile.sprite, tile.pos.get_tuple())
pygame.display.update()
pygame.time.wait(500)
self.sounds['expl'].play()
self.score_display = 0
self.game_over = True
def run(self):
"""
Run the main game loop
"""
clock = pygame.time.Clock()
pygame.font.init()
font_score = pygame.font.SysFont("Impact", 44)
font_counter_score = pygame.font.SysFont("Impact", 12)
font_counter_bombs = pygame.font.SysFont("Impact", 24)
while True:
clock.tick(25)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONUP:
self.check_tiles(pygame.mouse.get_pos())
self.screen.fill((255, 255, 255))
self.screen.blit(self.background, (0, 0))
for tile in self.grid_data:
self.screen.blit(tile.sprite, tile.pos.get_tuple())
if self.game_over:
pygame.time.wait(1500)
self.load_grid()
self.game_over = False
# Handle scoring
if self.score_display != self.score:
self.score_display += 1
self.sounds['point'].play()
self.screen.blit(font_score.render(str(self.score_display), -1, (255, 222, 0)), (54, 128))
# Draw counters
for i in range(5):
self.screen.blit(font_counter_score.render(str(self.counter_score_y[i]), -1, (67, 67, 67)), (464, 8 + i * (self.grid_data.gap_x + self.grid_data.tile_size)))
self.screen.blit(font_counter_bombs.render(str(self.counter_bombs_y[i]), -1, (67, 67, 67)), (482, 26 + i * (self.grid_data.gap_x + self.grid_data.tile_size)))
self.screen.blit(font_counter_score.render(str(self.counter_score_x[i]), -1, (67, 67, 67)), (146 + i * (self.grid_data.gap_x + self.grid_data.tile_size), 328))
self.screen.blit(font_counter_bombs.render(str(self.counter_bombs_x[i]), -1, (67, 67, 67)), (164 + i * (self.grid_data.gap_x + self.grid_data.tile_size), 346))
if self.score_display == self.grid_data.max_score():
self.level += 1
self.score = 0
self.score_display = 0
print(self.level)
self.load_grid()
pygame.display.update()
if __name__ == "__main__":
ShellSwitcher().run()
|
abhishekjiitr/my-nltk
|
examples/ex6.py
|
Python
|
mit
| 762
| 0.003937
|
import nltk
from nltk.corpus import state_union
from nltk.tokenize import PunktSentenceTokenizer
train_text = state_union.raw("2005-GWBush.txt")
sample_text = state_union.raw("2006-GWBush.txt")
custom_sent_tokenizer = PunktSentenceTokenizer(train_text)
tokenized = custom_sent_tokenizer.tokenize(sample_text)
def process_content():
try:
for i in tokenized[5:]:
words = nltk.word_tokenize(i)
tagged = nltk.pos_tag(words)
chunkGram = r"""Chunk: {<.*>+}
}<VB.?|IN|DT|TO>+{"""
chunkPa
|
rser = nltk.RegexpParser(chunkGram)
chunked = c
|
hunkParser.parse(tagged)
chunked.draw()
except Exception as e:
print(str(e))
process_content()
|
rev22/svgl
|
scripts/test_suite.py
|
Python
|
lgpl-2.1
| 767
| 0.009126
|
import os
import popen2
HOME = '/home/conversy'
#TEST_SUITE_DIR = HOME+'/Archives/svgtests'
TEST_SUITE_DIR = HOME+'/Archives/svgtoolkit-20001010/samples'
lfiles = os.listdir(TEST_SUITE_DIR)
tmpfile = '/tmp/conversysvgtest'
excludes = ['SVGAnimat', 'SVGSVGElement::xmlns', 'SVGTitleElement::cont
|
ent', 'SVGDescElement::content', 'xmlns:xlink']
filter = ' && cat ' + tmpfile
for i in excludes:
filter = filter + ' | grep -v "%s" '%i
fo
|
r filename in lfiles:
prefix, ext = os.path.splitext(filename)
if ext=='.svg':
longname = TEST_SUITE_DIR+ '/' + filename
print longname
cmd = "./svgtest " + longname + ' 2>' + tmpfile + filter
#print cmd
stdout, stdin, stderr = popen2.popen3(cmd)
print stdout.read()
|
reisub-de/dmpr-simulator
|
dmprsim/analyze/random_network.py
|
Python
|
mit
| 809
| 0
|
import random
from pathlib import Path
from dmprsim.topologies.randomized import RandomTopology
from dmprsim.topologies.utils import ffmpeg
SIMU_TIME = 300
def main(args, results_dir: Path, scenario_dir: Path):
sim = RandomTopology(
simulation_
|
time=getattr(args, 'simulation_time', 300),
num_routers=getattr(args, 'num_routers', 100),
random_seed_prep=getattr(args, 'random_seed
|
_prep', 1),
random_seed_runtime=getattr(args, 'random_seed_runtime', 1),
scenario_dir=scenario_dir,
results_dir=results_dir,
args=args,
tracepoints=('tx.msg',),
area=(640, 720),
velocity=lambda: random.random()**6,
)
sim.prepare()
for _ in sim.start():
pass
if sim.gen_movie:
ffmpeg(results_dir, scenario_dir)
|
davidhernon/libmapper
|
swig/test.py
|
Python
|
lgpl-2.1
| 3,790
| 0.018997
|
import sys, mapper
def h(sig, id, f, timetag):
try:
print sig.name, f
except:
print 'exception'
print sig, f
def setup(d):
sig = d.add_input("/freq", 1, 'i', "Hz", None, None, h)
print 'inputs',d.num_inputs
print 'minimum',sig.minimum
sig.minimum = 34.0
print 'minimum',sig.minimum
sig.minimum = 12
print 'minimum',sig.minimum
sig.minimum = None
print 'minimum',sig.minimum
print 'port',d.port
print 'device name',d.name
print 'device port',d.port
print 'device ip',d.ip4
print 'device interface',d.interface
print 'device ordinal',d.ordinal
print 'signal name',sig.name
print 'signal full name',sig.full_name
while not d.ready():
d.poll(10)
print 'port',d.port
print 'device name',d.name
print 'device ip',d.ip4
print 'device interface',d.interface
print 'device ordinal',d.ordinal
print 'signal name',sig.name
print 'signal full name',sig.full_name
print 'signal is_output',sig.is_output
print 'signal length',sig.length
print 'signal type', sig.type
print 'signal is_output', sig.is_output
print 'signal unit', sig.unit
dev.set_properties({"testInt":5, "testFloat":12.7, "testString":"test",
"removed1":"shouldn't see this"})
dev.properties['testInt'] = 7
dev.set_properties({"removed1":None, "removed2":"test"})
dev.remove_property("removed2")
print 'signal properties:', sig.properties
sig.properties['testInt'] = 3
print 'signal properties:', sig.properties
print 'setup done!'
dev = mapper.device("test")
setup(dev)
def db_cb(rectype, record, action):
print rectype,'callback -'
print ' record:',record
print ' action:',["MODIFY","NEW","REMOVE"][action]
mon = mapper.monitor()
mon.db.add_device_callback(lambda x,y:db_cb('device',x,y))
mon.db.add_signal_callback(lambda x,y:db_cb('signal',x,y))
mon.db.add_connection_callback(lambda x,y:db_cb('connection',x,y))
l = lambda x,y:db_cb('link',x,y)
mon.db.add_link_callback(l)
mon.db.remove_link_callback(l)
while not dev.ready():
dev.poll(10)
mon.poll()
mon.request_devices()
for i in range(1000):
dev.poll(10)
mon.poll()
if i==250:
for i in [('devices', mon.db.all_devices),
('inputs', mon.db.all_inputs),
('outputs', mon.db.all_outputs),
('connections', mon.db.all_connections),
('links', mon.db.all_links)]:
print i[0],':'
for j in i[1]():
print j
print 'devices matching "send":'
for i in mon.db.match_devices_by_name('send'):
print i
print 'outputs for device "/testsend.1" matching "3":'
for i in mon.db.match_outputs_by_device_name('/testsend.1', '3'):
|
print i
print 'links for device "/testsend.1":'
for i in mon.db.links_by_src_device_name('/testsend.1'):
print i
print 'link for /testsend.1, /testrecv.1:'
print mon.db.get_link_by_src_dest_names("/testsend.1", "/testrecv.1")
print 'not fo
|
und link:'
print mon.db.get_link_by_src_dest_names("/foo", "/bar")
if i==500:
mon.connect("/testsend.1/outsig_3", "/testrecv.1/insig_3",
{'mode': mapper.MO_EXPRESSION,
'expression': 'y=x',
'src_min': [1,2,3,4],
'bound_min': mapper.BA_WRAP,
'bound_max': mapper.BA_CLAMP})
if i==750:
mon.modify({'src_name':"/testsend.1/outsig_3",
'dest_name':"/testrecv.1/insig_3",
'dest_max':[10,11,12,13],
'muted':True,
'mode': mapper.MO_LINEAR})
|
leeseuljeong/leeseulstack_neutron
|
neutron/agent/linux/ip_lib.py
|
Python
|
apache-2.0
| 20,419
| 0.000392
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import netaddr
from oslo.config import cfg
from neutron.agent.linux import utils
from neutron.common import exceptions
OPTS = [
cfg.BoolOpt('ip_lib_force_root',
default=False,
help=_('Force ip_lib calls to use the root helper')),
]
LOOPBACK_DEVNAME = 'lo'
# NOTE(ethuleau): depend of the version of iproute2, the vlan
# interface details vary.
VLAN_INTERFACE_DETAIL = ['vlan protocol 802.1q',
'vlan protocol 802.1Q',
'vlan id']
class SubProcessBase(object):
def __init__(self, root_helper=None, namespace=None,
log_fail_as_error=True):
self.root_helper = root_helper
self.namespace = namespace
self.log_fail_as_error = log_fail_as_error
try:
self.force_root = cfg.CONF.ip_lib_force_root
except cfg.NoSuchOptError:
|
# Only callers that nee
|
d to force use of the root helper
# need to register the option.
self.force_root = False
def _run(self, options, command, args):
if self.namespace:
return self._as_root(options, command, args)
elif self.force_root:
# Force use of the root helper to ensure that commands
# will execute in dom0 when running under XenServer/XCP.
return self._execute(options, command, args, self.root_helper,
log_fail_as_error=self.log_fail_as_error)
else:
return self._execute(options, command, args,
log_fail_as_error=self.log_fail_as_error)
def enforce_root_helper(self):
if not self.root_helper and os.geteuid() != 0:
raise exceptions.SudoRequired()
def _as_root(self, options, command, args, use_root_namespace=False):
self.enforce_root_helper()
namespace = self.namespace if not use_root_namespace else None
return self._execute(options,
command,
args,
self.root_helper,
namespace,
log_fail_as_error=self.log_fail_as_error)
@classmethod
def _execute(cls, options, command, args, root_helper=None,
namespace=None, log_fail_as_error=True):
opt_list = ['-%s' % o for o in options]
if namespace:
ip_cmd = ['ip', 'netns', 'exec', namespace, 'ip']
else:
ip_cmd = ['ip']
return utils.execute(ip_cmd + opt_list + [command] + list(args),
root_helper=root_helper,
log_fail_as_error=log_fail_as_error)
def set_log_fail_as_error(self, fail_with_error):
self.log_fail_as_error = fail_with_error
class IPWrapper(SubProcessBase):
def __init__(self, root_helper=None, namespace=None):
super(IPWrapper, self).__init__(root_helper=root_helper,
namespace=namespace)
self.netns = IpNetnsCommand(self)
def device(self, name):
return IPDevice(name, self.root_helper, self.namespace)
def get_devices(self, exclude_loopback=False):
retval = []
output = self._execute(['o', 'd'], 'link', ('list',),
self.root_helper, self.namespace)
for line in output.split('\n'):
if '<' not in line:
continue
tokens = line.split(' ', 2)
if len(tokens) == 3:
if any(v in tokens[2] for v in VLAN_INTERFACE_DETAIL):
delimiter = '@'
else:
delimiter = ':'
name = tokens[1].rpartition(delimiter)[0].strip()
if exclude_loopback and name == LOOPBACK_DEVNAME:
continue
retval.append(IPDevice(name,
self.root_helper,
self.namespace))
return retval
def add_tuntap(self, name, mode='tap'):
self._as_root('', 'tuntap', ('add', name, 'mode', mode))
return IPDevice(name, self.root_helper, self.namespace)
def add_veth(self, name1, name2, namespace2=None):
args = ['add', name1, 'type', 'veth', 'peer', 'name', name2]
if namespace2 is None:
namespace2 = self.namespace
else:
self.ensure_namespace(namespace2)
args += ['netns', namespace2]
self._as_root('', 'link', tuple(args))
return (IPDevice(name1, self.root_helper, self.namespace),
IPDevice(name2, self.root_helper, namespace2))
def del_veth(self, name):
"""Delete a virtual interface between two namespaces."""
self._as_root('', 'link', ('del', name))
def ensure_namespace(self, name):
if not self.netns.exists(name):
ip = self.netns.add(name)
lo = ip.device(LOOPBACK_DEVNAME)
lo.link.set_up()
else:
ip = IPWrapper(self.root_helper, name)
return ip
def namespace_is_empty(self):
return not self.get_devices(exclude_loopback=True)
def garbage_collect_namespace(self):
"""Conditionally destroy the namespace if it is empty."""
if self.namespace and self.netns.exists(self.namespace):
if self.namespace_is_empty():
self.netns.delete(self.namespace)
return True
return False
def add_device_to_namespace(self, device):
if self.namespace:
device.link.set_netns(self.namespace)
def add_vxlan(self, name, vni, group=None, dev=None, ttl=None, tos=None,
local=None, port=None, proxy=False):
cmd = ['add', name, 'type', 'vxlan', 'id', vni]
if group:
cmd.extend(['group', group])
if dev:
cmd.extend(['dev', dev])
if ttl:
cmd.extend(['ttl', ttl])
if tos:
cmd.extend(['tos', tos])
if local:
cmd.extend(['local', local])
if proxy:
cmd.append('proxy')
# tuple: min,max
if port and len(port) == 2:
cmd.extend(['port', port[0], port[1]])
elif port:
raise exceptions.NetworkVxlanPortRangeError(vxlan_range=port)
self._as_root('', 'link', cmd)
return (IPDevice(name, self.root_helper, self.namespace))
@classmethod
def get_namespaces(cls, root_helper):
output = cls._execute('', 'netns', ('list',), root_helper=root_helper)
return [l.strip() for l in output.split('\n')]
class IpRule(IPWrapper):
def add_rule_from(self, ip, table, rule_pr):
args = ['add', 'from', ip, 'lookup', table, 'priority', rule_pr]
ip = self._as_root('', 'rule', tuple(args))
return ip
def delete_rule_priority(self, rule_pr):
args = ['del', 'priority', rule_pr]
ip = self._as_root('', 'rule', tuple(args))
return ip
class IPDevice(SubProcessBase):
def __init__(self, name, root_helper=None, namespace=None):
super(IPDevice, self).__init__(root_helper=root_helper,
namespace=namespace)
self.name = name
self.link = IpLinkCommand(self)
self.addr = IpAddrCommand(self)
self.route = IpRouteCommand(self)
self.neigh = IpNeighCommand(self)
def __eq__(self, other):
retur
|
plotly/python-api
|
packages/python/plotly/plotly/validators/layout/ternary/aaxis/_tickfont.py
|
Python
|
mit
| 1,549
| 0.000646
|
import _plotly_utils.basevalidators
class TickfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="tickfont", parent_name="layout.ternary.aaxis", **kwargs
):
super(TickfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it op
|
erates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-pre
|
mise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs
)
|
torchbox/zencoder-py
|
setup.py
|
Python
|
mit
| 363
| 0.00551
|
from distutils.core import setup
setup(name='zencoder',
version='0.4',
description='Integration library for Zencoder',
author='Alex Schworer',
author_email='alex.schworer@gmail.com',
url='http://github.com/sch
|
worer
|
/zencoder-py',
license="MIT License",
install_requires=['httplib2'],
packages=['zencoder']
)
|
twenty0ne/CocosBuilder-wxPython
|
CCBDocument.py
|
Python
|
mit
| 26,533
| 0.043078
|
"""
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>centeredOrigin</key>
<false/>
<key>currentResolution</key>
<integer>0</integer>
<key>currentSequenceId</key>
<integer>0</integer>
<key>exportFlattenPaths</key>
<false/>
<key>exportPath</key>
<string>TestMenus.ccbi</string>
<key>exportPlugIn</key>
<string>ccbi</string>
<key>fileType</key>
<string>CocosBuilder</string>
<key>fileVersion</key>
<integer>4</integer>
<key>guides</key>
<array/>
<key>jsControlled</key>
<false/>
<key>nodeGraph</key>
<dict>
<key>baseClass</key>
<string>CCLayer</string>
<key>children</key>
<array>
<dict>
<key>baseClass</key>
<string>CCLayerGradient</string>
<key>children</key>
<array/>
<key>customClass</key>
<string></string>
<key>displayName</key>
<string>CCLayerGradient</string>
<key>memberVarAssignmentName</key>
<string></string>
<key>memberVarAssignmentType</key>
<integer>0</integer>
<key>properties</key>
<array>
<dict>
<key>name</key>
<string>contentSize</string>
<key>type</key>
<string>Size</string>
<key>value</key>
<array>
<real>100</real>
<real>100</real>
<integer>1</integer>
</array>
</dict>
<dict>
<key>name</key>
<string>anchorPoint</string>
<key>type</key>
<string>Point</string>
<key>value</key>
<array>
<real>0.0</real>
<real>0.0</real>
</array>
</dict>
<dict>
<key>name</key>
<string>scale</string>
<key>type</key>
<string>ScaleLock</string>
<key>value</key>
<array>
<real>1</real>
<real>1</real>
<false/>
<integer>0</integer>
</array>
</dict>
<dict>
<key>name</key>
<string>ignoreAnchorPointForPosition</string>
<key>type</key>
<string>Check</string>
<key>value</key>
<true/>
</dict>
<dict>
<key>name</key>
<string>touchEnabled</string>
<key>platform</key>
<string>iOS</string>
<key>type</key>
<string>Check</string>
<key>value</key>
<true/>
</dict>
<dict>
<key>name</key>
<string>accelerometerEnabled</string>
<key>platform</key>
<string>iOS</string>
<key>type</key>
<string>Check</string>
<key>value</key>
<false/>
</dict>
<dict>
<key>name</key>
<string>mouseEnabled</string>
<key>platform</key>
<string>Mac</string>
<key>type</key>
<string>Check</string>
<key>value</key>
<true/>
</dict>
<dict>
<key>name</key>
<string>keyboardEnabled</string>
<key>platform</key>
<string>Mac</string>
<key>type</key>
<string>Check</string>
<key>value</key>
<false/>
</dict>
<dict>
<key>name</key>
<string>startColor</string>
<key>type</key>
<string>Color3</string>
<key>value</key>
<array>
<integer>52</integer>
<integer>84</integer>
<integer>236</integer>
</array>
</dict>
<dict>
<key>name</key>
<string>endColor</string>
<key>type</key>
<string>Color3</string>
<key>value</key>
<array>
<integer>149</integer>
<integer>0</integer>
<integer>202</integer>
</array>
</dict>
<dict>
<key>name</key>
<string>vector</string>
<key>type</key>
<string>Point</string>
<key>value</key>
<array>
<real>0.0</real>
<real>-1</real>
</array>
</dict>
</array>
</dict>
<dict>
<key>baseClass</key>
<string>CCMenu</string>
<key>children</key>
<array>
<dict>
<key
|
>baseClass</key>
<string>CCMenuItemImage</stri
|
ng>
<key>children</key>
<array/>
<key>customClass</key>
<string></string>
<key>displayName</key>
<string>CCMenuItemImage</string>
<key>memberVarAssignmentName</key>
<string></string>
<key>memberVarAssignmentType</key>
<integer>0</integer>
<key>properties</key>
<array>
<dict>
<key>name</key>
<string>position</string>
<key>type</key>
<string>Position</string>
<key>value</key>
<array>
<real>20.833333969116211</real>
<real>5</real>
<integer>4</integer>
</array>
</dict>
<dict>
<key>name</key>
<string>anchorPoint</string>
<key>type</key>
<string>Point</string>
<key>value</key>
<array>
<real>0.5</real>
<real>0.5</real>
</array>
</dict>
<dict>
<key>name</key>
<string>scale</string>
<key>type</key>
<string>ScaleLock</string>
<key>value</key>
<array>
<real>1</real>
<real>1</real>
<false/>
<integer>1</integer>
</array>
</dict>
<dict>
<key>name</key>
<string>ignoreAnchorPointForPosition</string>
<key>type</key>
<string>Check</string>
<key>value</key>
<false/>
</dict>
<dict>
<key>name</key>
<string>block</string>
<key>type</key>
<string>Block</string>
<key>value</key>
<array>
<string>onMenuItemAClicked</string>
<integer>1</integer>
</array>
</dict>
<dict>
<key>name</key>
<string>isEnabled</string>
<key>type</key>
<string>Check</string>
<key>value</key>
<true/>
</dict>
<dict>
<key>name</key>
<string>normalSpriteFrame</string>
<key>type</key>
<string>SpriteFrame</string>
<key>value</key>
<array>
<string></string>
<string>ccbres/btn-a-0.png</string>
</array>
</dict>
<dict>
<key>name</key>
<string>selectedSpriteFrame</string>
<key>type</key>
<string>SpriteFrame</string>
<key>value</key>
<array>
<string></string>
<string>ccbres/btn-a-1.png</string>
</array>
</dict>
<dict>
<key>name</key>
<string>disabledSpriteFrame</string>
<key>type</key>
<string>SpriteFrame</string>
<key>value</key>
<array>
<string></string>
<string>ccbres/btn-a-2.png</string>
</array>
</dict>
</array>
</dict>
<dict>
<key>baseClass</key>
<string>CCMenuItemImage</string>
<key>children</key>
<array/>
<key>customClass</key>
<string></string>
<key>displayName</key>
<string>CCMenuItemImage</string>
<key>memberVarAssignmentName</key>
<string></string>
<key>memberVarAssignmentType</key>
<integer>0</integer>
<key>properties</key>
<array>
<dict>
<key>name</key>
<string>position</string>
<key>type</key>
<string>Position</string>
<key>value</key>
<array>
<real>50</real>
<real>5</real>
<integer>4</integer>
</array>
</dict>
<dict>
<key>name</key>
<string>anchorPoint</string>
<key>type</key>
<string>Point</string>
<key>value</key>
<array>
<real>0.5</real>
<real>0.5</real>
</array>
</dict>
<dict>
<key>name</key>
<string>scale</string>
<key>type</key>
<string>ScaleLock</string>
<key>value</key>
<array>
<real>1</real>
<real>1</real>
<false/>
<integer>1</integer>
</array>
</dict>
<dict>
<key>name</key>
<string>ignoreAnchorPointForPosition</string>
<key>type</key>
<string>Check</string>
<key>value</key>
<false/>
</dict>
<dict>
<key>name</ke
|
CSD-Public/stonix
|
src/tests/rules/unit_tests/zzzTestRulePreventXListen.py
|
Python
|
gpl-2.0
| 4,504
| 0.00222
|
#!/usr/bin/env python3
###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative wor
|
ks, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
'''
This is a Unit Test for Rule
|
ConfigureAppleSoftwareUpdate
@author: ekkehard j. koch
@change: 03/18/2013 Original Implementation
@change: 2016/02/10 roy Added sys.path.append for being able to unit test this
file as well as with the test harness.
'''
import unittest
import sys
sys.path.append("../../../..")
from src.tests.lib.RuleTestTemplate import RuleTest
from src.stonix_resources.CommandHelper import CommandHelper
from src.tests.lib.logdispatcher_mock import LogPriority
from src.stonix_resources.rules.PreventXListen import PreventXListen
class zzzTestRulePreventXListen(RuleTest):
def setUp(self):
RuleTest.setUp(self)
self.rule = PreventXListen(self.config,
self.environ,
self.logdispatch,
self.statechglogger)
self.rulename = self.rule.rulename
self.rulenumber = self.rule.rulenumber
self.ch = CommandHelper(self.logdispatch)
def tearDown(self):
pass
def runTest(self):
self.simpleRuleTest()
def setConditionsForRule(self):
'''Configure system for the unit test
:param self: essential if you override this definition
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
success = True
return success
def checkReportForRule(self, pCompliance, pRuleSuccess):
'''check on whether report was correct
:param self: essential if you override this definition
:param pCompliance: the self.iscompliant value of rule
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pCompliance = " + \
str(pCompliance) + ".")
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = True
return success
def checkFixForRule(self, pRuleSuccess):
'''check on whether fix was correct
:param self: essential if you override this definition
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = True
return success
def checkUndoForRule(self, pRuleSuccess):
'''check on whether undo was correct
:param self: essential if you override this definition
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = True
return success
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
funbaker/astropy
|
astropy/stats/tests/test_bayesian_blocks.py
|
Python
|
bsd-3-clause
| 4,205
| 0
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from .. import bayesian_blocks, RegularEvents
def test_single_change_point(rseed=0):
rng = np.random.RandomState(rseed)
x = np.concatenate([rng.rand(100),
1 + rng.rand(200)])
bins = bayesian_blocks(x)
assert (len(bins) == 3)
assert_allclose(bins[1], 1, rtol=0.02)
def test_duplicate_events(rseed=0):
rng = np.random.RandomState(rseed)
t = rng.rand(100)
t[80:] = t[:20]
x = np.ones_like(t)
x[:20] += 1
bins1 = bayesian_blocks(t)
bins2 = bayesian_blocks(t[:80], x[:80])
assert_allclose(bins1, bins2)
def test_measures_fitness_homoscedastic(rseed=0):
rng = np.random.RandomState(rseed)
t = np.linspace(0, 1, 11)
x = np.exp(-0.5 * (t - 0.5) ** 2 / 0.01 ** 2)
sigma = 0.05
x = x + sigma * rng.randn(len(x))
bins = bayesian_blocks(t, x, sigma, fitness='measures')
assert_allclose(bins, [0, 0.45, 0.55, 1])
def test_measures_fitness_heteroscedastic():
rng = np.random.RandomState(1)
t = np.linspace(0, 1, 11)
x = np.exp(-0.5 * (t - 0.5) ** 2 / 0.01 ** 2)
sigma = 0.02 + 0.02 * rng.rand(len(x))
x = x + sigma * rng.randn(len(x))
bins = bayesian_blocks(t, x, sigma, fitness='measures')
assert_allclose(bins, [0, 0.45, 0.55, 1])
def test_regular_events():
rng = np.random.RandomState(0)
dt = 0.01
steps = np.concatenate([np.unique(rng.randint(0, 500, 100)),
np.unique(rng.randint(500, 1000, 200))])
t = dt * steps
# string fitness
bins1 = bayesian_blocks(t, fitness='regular_events', dt=dt)
assert (len(bins1) == 3)
assert_allclose(bins1[1], 5, rtol=0.05)
# class name fitness
bins2 = bayesian_
|
blocks(t, fitness=RegularEvents, dt=dt)
assert_allclose(bins1, bins2)
# class instance fitness
bins3 = bayesian_blocks(t, fitness=RegularEvents(dt=dt))
assert_allclose(bins1, bins3)
def test_errors():
rng = np.random.RandomState(0)
t = rng.rand(100)
# x must be integer or None for events
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='events', x=t)
# x must be binary for regular events
with pytest.raises(Val
|
ueError):
bayesian_blocks(t, fitness='regular_events', x=10 * t, dt=1)
# x must be specified for measures
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='measures')
# sigma cannot be specified without x
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='events', sigma=0.5)
# length of x must match length of t
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='measures', x=t[:-1])
# repeated values in t fail when x is specified
t2 = t.copy()
t2[1] = t2[0]
with pytest.raises(ValueError):
bayesian_blocks(t2, fitness='measures', x=t)
# sigma must be broadcastable with x
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='measures', x=t, sigma=t[:-1])
def test_fitness_function_results():
"""Test results for several fitness functions"""
rng = np.random.RandomState(42)
# Event Data
t = rng.randn(100)
edges = bayesian_blocks(t, fitness='events')
assert_allclose(edges, [-2.6197451, -0.71094865, 0.36866702, 1.85227818])
# Event data with repeats
t[80:] = t[:20]
edges = bayesian_blocks(t, fitness='events', p0=0.01)
assert_allclose(edges, [-2.6197451, -0.47432431, -0.46202823, 1.85227818])
# Regular event data
dt = 0.01
t = dt * np.arange(1000)
x = np.zeros(len(t))
N = len(t) // 10
x[rng.randint(0, len(t), N)] = 1
x[rng.randint(0, len(t) // 2, N)] = 1
edges = bayesian_blocks(t, x, fitness='regular_events', dt=dt)
assert_allclose(edges, [0, 5.105, 9.99])
# Measured point data with errors
t = 100 * rng.rand(20)
x = np.exp(-0.5 * (t - 50) ** 2)
sigma = 0.1
x_obs = x + sigma * rng.randn(len(x))
edges = bayesian_blocks(t, x_obs, sigma, fitness='measures')
assert_allclose(edges, [4.360377, 48.456895, 52.597917, 99.455051])
|
google/vizier
|
vizier/pyvizier/shared/parameter_config.py
|
Python
|
apache-2.0
| 21,349
| 0.007494
|
"""ParameterConfig wraps ParameterConfig and ParameterSpec protos."""
import collections
import copy
import enum
import math
from typing import Generator, List, Optional, Sequence, Tuple, Union
from absl import logging
import attr
from vizier.pyvizier.shared import trial
class ParameterType(enum.IntEnum):
"""Valid Values for ParameterConfig.type."""
DOUBLE = 1
INTEGER = 2
CATEGORICAL = 3
DISCRETE = 4
def is_numeric(self) -> bool:
return self in [self.DOUBLE, self.INTEGER, self.DISCRETE]
class ScaleType(enum.IntEnum):
"""Valid Values for ParameterConfig.scale_type."""
LINEAR = 1
LOG = 2
REVERSE_LOG = 3
UNIFORM_DISCRETE = 4
class ExternalType(enum.IntEnum):
"""Valid Values for ParameterConfig.external_type."""
INTERNAL = 0
BOOLEAN = 1
INTEGER = 2
FLOAT = 3
# A sequence of possible internal parameter values.
MonotypeParameterSequence = Union[Sequence[Union[int, float]], Sequence[str]]
MonotypeParameterList = Union[List[Union[int, float]], List[str]]
def _validate_bounds(bounds: Union[Tuple[int, int], Tuple[float, float]]):
"""Validates the bounds."""
if len(bounds) != 2:
raise ValueError('Bounds must have length 2. Given: {}'.format(bounds))
lower = bounds[0]
upper = bounds[1]
if not all([math.isfinite(v) for v in (lower, upper)]):
raise ValueError(
'Both "lower" and "upper" must be finite. Given: (%f, %f)' %
(lower, upper))
if lower > upper:
raise ValueError(
'Lower cannot be greater than upper: given lower={} upper={}'.format(
lower, upper))
def _get_feasible_points_and_bounds(
feasible_values: Sequence[float]
) -> Tuple[List[float], Union[Tuple[int, int], Tuple[float, float]]]:
"""Validates and converts feasible values to floats."""
if not all([math.isfinite(p) for p in feasible_values]):
raise ValueError('Feasible values must all be finite. Given: {}' %
feasible_values)
feasible_points = list(sorted(feasible_values))
bounds = (feasible_points[0], feasible_points[-1])
return feasible_points, bounds
def _get_categories(categories: Sequence[str]) -> List[str]:
"""Returns the categories."""
return sorted(list(categories))
def _get_default_value(
param_type: ParameterType,
default_value: Union[float, int, str]) -> Union[float, int, str]:
"""Validates and converts the default_value to the right type."""
if (param_type in (ParameterType.DOUBLE, ParameterType.DISCRETE) and
(isinstance(default_value, float) or isinstance(default_value, int))):
return float(default_value)
elif (param_type == ParameterType.INTEGER and
(isinstance(default_value, float) or isinstance(default_value, int))):
if isinstance(default_value, int):
return default_value
else:
# Check if the float rounds nicely.
default_int_value = round(default_value)
if not math.isclose(default_value, default_int_value):
raise ValueError('default_value for an INTEGER parameter should be an '
'integer, got float: [{}]'.format(default_value))
return default_int_value
elif (param_type == ParameterType.CATEGORICAL and
isinstance(default_value, str)):
return default_value
raise Va
|
lueError(
'default_value has an incorrect type. ParameterType has type {}, '
'but default_value has type {}'.format(param_type.name,
type(default_value)))
@attr.s(auto_attribs=True, frozen=True, init=True, slots=True)
class ParameterConfig:
"""A Vizier ParameterConfi
|
g.
Use ParameterConfig.factory to create a valid instance.
"""
_name: str = attr.ib(
init=True, validator=attr.validators.instance_of(str), kw_only=True)
_type: ParameterType = attr.ib(
init=True,
validator=attr.validators.instance_of(ParameterType),
repr=lambda v: v.name if v is not None else 'None',
kw_only=True)
# Only one of _feasible_values, _bounds will be set at any given time.
_bounds: Optional[Union[Tuple[int, int], Tuple[float, float]]] = attr.ib(
init=True,
validator=attr.validators.optional(
attr.validators.deep_iterable(
member_validator=attr.validators.instance_of((int, float)),
iterable_validator=attr.validators.instance_of(tuple))),
kw_only=True)
_feasible_values: Optional[MonotypeParameterList] = attr.ib(
init=True,
validator=attr.validators.optional(
attr.validators.deep_iterable(
member_validator=attr.validators.instance_of((int, float, str)),
iterable_validator=attr.validators.instance_of((list, tuple)))),
kw_only=True)
_scale_type: Optional[ScaleType] = attr.ib(
init=True,
validator=attr.validators.optional(
attr.validators.instance_of(ScaleType)),
repr=lambda v: v.name if v is not None else 'None',
kw_only=True)
_default_value: Optional[Union[float, int, str]] = attr.ib(
init=True,
validator=attr.validators.optional(
attr.validators.instance_of((float, int, str))),
kw_only=True)
_external_type: Optional[ExternalType] = attr.ib(
init=True,
validator=attr.validators.optional(
attr.validators.instance_of(ExternalType)),
repr=lambda v: v.name if v is not None else 'None',
kw_only=True)
# Parent values for this ParameterConfig. If set, then this is a child
# ParameterConfig.
_matching_parent_values: Optional[MonotypeParameterList] = attr.ib(
init=True,
validator=attr.validators.optional(
attr.validators.deep_iterable(
member_validator=attr.validators.instance_of((int, float, str)),
iterable_validator=attr.validators.instance_of((list, tuple)))),
kw_only=True)
# Children ParameterConfig. If set, then this is a parent ParameterConfig.
_child_parameter_configs: Optional[List['ParameterConfig']] = attr.ib(
init=True, kw_only=True)
# Pytype treats instances of EnumTypeWrapper as types, but they can't be
# evaluated at runtime, so a Union[] of proto enums has to be a forward
# reference below.
@classmethod
def factory(
cls,
name: str,
*,
bounds: Optional[Union[Tuple[int, int], Tuple[float, float]]] = None,
feasible_values: Optional[MonotypeParameterSequence] = None,
children: Optional[Sequence[Tuple[MonotypeParameterSequence,
'ParameterConfig']]] = None,
scale_type: Optional[ScaleType] = None,
default_value: Optional[Union[float, int, str]] = None,
external_type: Optional[ExternalType] = ExternalType.INTERNAL
) -> 'ParameterConfig':
"""Factory method.
Args:
name: The parameter's name. Cannot be empty.
bounds: REQUIRED for INTEGER or DOUBLE type. Specifies (min, max). The
type of (min, max) determines the created ParameterConfig's type.
feasible_values: REQUIRED for DISCRETE or CATEGORICAL type. The elements'
type determines the created ParameterConfig's type.
children: sequence of tuples formatted as: (matching_parent_values,
ParameterConfig). See
cs/learning_vizier.service.ParameterConfig.child_parameter_configs for
details. ONLY THE TYPES ARE VALIDATED. If the child ParameterConfig
protos already have parent values set, they will be overridden by the
provided matching_parent_values.
scale_type: Scaling to be applied. NOT VALIDATED.
default_value: A default value for the Parameter.
external_type: An annotation indicating the type this parameter should be
cast to.
Returns:
A ParameterConfig object which wraps a partially validated proto.
Raises:
ValueError: Exactly one of feasible_values and bounds must be convertible
to Boolean true. Bounds and numeric feasible_values must be finite.
Bounds and feasible_values, if provided, must consist of
elements of the same type.
TypeError: If children's matching_parent_values are not compatible with
the ParameterConfig being created.
"""
if not name:
|
pypa/virtualenv
|
tests/unit/seed/wheels/test_wheels_util.py
|
Python
|
mit
| 901
| 0
|
from __future__ import absolute_import, unicode_literals
import pytest
from virtualenv.seed.wheels.embed import MAX, get_embed_wheel
from virtualenv.seed.wheels.util import Wheel
def test_wheel_support_no_python_requires(mocker):
wheel = get_embed_wheel("setuptools", for_py_version=None)
zip_mock = mocker.MagicM
|
ock()
mocker.patch("virtualenv.seed.wheels.util.ZipFile", new=zip_mock)
zip_mock.return_value.__enter__.return_value.read = lambda name: b""
supports = wheel.support_py("3.8")
assert supports is True
def test_bad_as_version_tuple():
with pytest.raises(ValueError, match="bad"):
Wheel.as_version_tuple("bad")
def test_wheel_not_support():
wheel = get_embed_wheel("setuptoo
|
ls", MAX)
assert wheel.support_py("3.3") is False
def test_wheel_repr():
wheel = get_embed_wheel("setuptools", MAX)
assert str(wheel.path) in repr(wheel)
|
TeamSPoon/logicmoo_workspace
|
packs_sys/logicmoo_nlu/ext/candc/src/lib/tokeniser/fixes.py
|
Python
|
mit
| 820
| 0.023171
|
#!/usr/bin/env python
import sys
def fix_terminator(tokens):
if not tokens:
return
last = tokens[-1]
if last not in ('.', '?', '!') and last.endswith('.'):
tokens[-1] = last[:-1]
tokens.append('.')
def balance_quotes(tokens):
count = tokens.count("'")
if not count:
return
processed = 0
|
for i, token in enumerate(tokens):
if token == "'":
if processed % 2 == 0 and (i == 0 or processed != count - 1):
tokens[i] = "`"
processed += 1
def output(
|
tokens):
if not tokens:
return
# fix_terminator(tokens)
balance_quotes(tokens)
print ' '.join(tokens)
prev = None
for line in sys.stdin:
tokens = line.split()
if len(tokens) == 1 and tokens[0] in ('"', "'", ')', ']'):
prev.append(tokens[0])
else:
output(prev)
prev = tokens
output(prev)
|
Ebag333/Pyfa
|
eos/effects/elitereconbonusradarstrength2.py
|
Python
|
gpl-3.0
| 384
| 0.002604
|
# eliteReconBonusRadarStrength2
#
# Used by:
# Ship: Chameleon
# Ship: Falcon
# Ship: Rook
type = "passive"
def handler(fit, ship, context):
fit.modules.filteredItemBoost(lambda
|
mod: mod.item.group.name == "ECM",
"scanRadarStrengthBonus", ship.getModifiedItemAttr("eliteBonusReconShip2"),
|
skill="Recon Ships")
|
memaldi/ckanext-sparql
|
ckanext/sparql/controller.py
|
Python
|
agpl-3.0
| 2,273
| 0
|
from ckan.controllers.package import PackageController
from ckan.plugins import toolkit as tk
from ckan.common import request
import ckan.model as model
import ckan.logic as logic
import logging
import requests
import ConfigParser
import os
import json
log = logging.getLogger(__name__)
config = ConfigParser.ConfigParser()
config.read(os.environ['CKAN_CONFIG'])
PLUGIN_SECTION = 'plugin:sparql'
WELIVE_API = config.get(PLUGIN_SECTION, 'welive_api')
RDF_FORMAT = ['rdf', 'application/rdf+xml', 'text/plain',
'application/x-turtle', 'text/rdf+n3']
c = tk.c
render = tk.render
get_action = logic.get_action
check_access = logic.check_access
class SPARQLController(PackageController):
def sparql_endpoint(self, id):
query = "SELECT * WHERE { ?s ?p ?o } LIMIT 10"
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
try:
c.pkg_dict = get_action('package_show')(
context, {'id': id, 'include_tracking': True}
)
except logic.NotFound:
check_access('package_show', context, {'id': id})
resource = get_action('resource_show')(
context, {'id': id}
)
c.pkg_dict = get_action('package_show')(
context, {'id': resource['package_id'],
'include_tracking': True}
)
if request.method == 'POST':
query = request.POST.getone('sparql-query')
api_url = WELIVE_API +
|
'sparql-query-maker/query'
|
package_id = None
for resource in c.pkg_dict.get('resources', []):
if resource.get('format', '').lower() in RDF_FORMAT:
package_id = resource['id']
break
log.debug(package_id)
if package_id is not None:
payload = {'query': query, 'graphName': package_id}
r = requests.get(api_url, params=payload)
response = r.json()
result = json.loads(response['response'])
c.result = result
c.query = query
return render('sparql/sparql_endpoint.html')
|
odoo-chile/l10n_cl_invoice
|
models/partner.py
|
Python
|
agpl-3.0
| 2,394
| 0.006268
|
# -*- coding: utf-8 -*-
from openerp import fields, models, api
import re
class res_partner(models.Model):
_inherit = 'res.partner'
#def _get_default_tp_type(self):
# return self.env.ref('l10n_cl_invoice.res_IVARI').id
# todo: pasar los valores por defecto a un nuevo módulo
# por ejemplo "l10n_cl_res_partner_defaults
#def _get_default_doc_type(self):
# return self.env.ref('l10n_cl_invoice.dt_RUT').id
responsability_id = fields.Many2one(
'sii.responsability', 'Sale/Purchase Doc Type')
# dejamos el default pendiente para instalar en otro modulo,
# porque da problemas en instalaciones nuevas
# 'sii.responsability', 'Responsability', default = _get_default_tp_type)
document_type_id = fields.Many2one(
'sii.document_type', 'ID Type')
# 'sii.document_type', 'Document type', default = _get_default_doc_type)
document_number = fields.Char('Document number', size=64)
start_date = fields.Date('Start-up Date')
tp_sii_code = fields.Char('Tax Payer SII Code', compute='_get_tp_sii_code',
readonly=True)
@api.multi
@api.onchange('responsability_id')
def _get_tp_sii_code(self):
for record in self:
record.tp_sii_code=str(record.responsability_id.tp_sii_code)
@api.onchange('document_number', 'document_type_
|
id')
def onchange_document(self):
mod_obj = self.env['ir.model.data']
if self.document_number and ((
'sii.document_type',
self.document_type_id.id) == mod_obj.get_object_reference(
'l10n_cl_invoice', 'dt_RUT') or ('sii.document_type',
self.document_type_id.id) == mod_obj.get_object_reference(
'l10n
|
_cl_invoice', 'dt_RUN')):
document_number = (
re.sub('[^1234567890Kk]', '', str(
self.document_number))).zfill(9).upper()
self.vat = 'CL%s' % document_number
self.document_number = '%s.%s.%s-%s' % (
document_number[0:2], document_number[2:5],
document_number[5:8], document_number[-1])
elif self.document_number and (
'sii.document_type',
self.document_type_id.id) == mod_obj.get_object_reference(
'l10n_cl_invoice', 'dt_Sigd'):
self.document_number = ''
|
hack4impact/legal-checkup
|
app/models/api.py
|
Python
|
mit
| 2,549
| 0.003531
|
from .. import db
class ApiParameterLink(db.Model):
__tablename__ = 'api_parameter_link'
api_id = db.Column(db.Integer, db.ForeignKey('apis.id'), primary_key=True)
parameter_id = db.Column(db.Integer, db.ForeignKey('parameters.id'), primary_key=True)
parameter_description = db.Column(db.String(128))
api = db.relationship('Api', backref='parameter_associations')
parameter = db.relationship('Parameter', backref='api_associations')
def __init__(self, api, param, description):
self.api = api
self.parameter = param
self.parameter_description = description
param.incr_count
def __repr__(self):
return '<ApiParameterLink \'%s %s %s\'>' % (self.api.name, self.parameter.name,
self.parameter_description)
class Api(db.Model):
__tablename__ = 'apis'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
url = db.Column(db.String(128), unique=True)
region = db.Column(db.String(64))
description = db.Column(db.String(128))
parameters = db.relationship('Parameter', secondary='api_parameter_link')
def add_param(self, param, description):
self.parameter_associations.append(ApiParameterLink(api=self,
param=param, description=description))
def get_params(self):
param_links = ApiParameterLink.query.filter_by(api_id=self.id).all()
params = []
for link in param_links:
params.append(link.parameter)
return params
def __init__(self, name, url, region, description):
self.name = name
self.url = url
self.region = r
|
egion
self.description = description
def __repr__(self):
return '<Api \'%s %s %s %s\'>' % (self.name, self.url, self.region, self.description)
class Parameter(db.Model):
__tablename__ = 'parameters'
|
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
param_format = db.Column(db.String(64))
count = db.Column(db.Integer)
apis = db.relationship('Api', secondary='api_parameter_link')
def get_apis(self):
return ApiParameterLink.query.filter_by(parameter_id=self.id).all()
def incr_count(self):
self.count += 1
def __init__(self, name, param_format, count):
self.name = name
self.param_format = param_format
self.count = count
def __repr__(self):
return '<Parameter \'%s %s %s\'>' % (self.name, self.param_format,
self.count)
|
ovnicraft/odoo_addons
|
smile_base/models/ir_values.py
|
Python
|
agpl-3.0
| 4,447
| 0.002474
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Smile (<http://www.smile.fr>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api, fields, models, _
from openerp.addons.base.ir.ir_values import ACTION_SLOTS, EXCLUDED_FIELDS
from openerp.exceptions import except_orm, Warning
from openerp.tools.safe_eval import safe_eval as eval
class IrValues(models.Model):
_inherit = 'ir.values'
_order = 'sequence, id'
@api.one
@api.depends('window_action_ids')
def _get_window_actions(self):
self.window_actions = ', %s, ' % ', '.join(map(str, self.window_action_ids.ids))
sequence = fields.Integer('Sequence')
window_action_ids = fields.Many2many('ir.actions.act_window', 'ir_values_window_actions_rel',
'ir_value_id', 'window_action_id', 'Menus')
window_actions = fields.Char('Window Actions', size=128, compute='_get_window_actions',
default=', , ', store=True)
@api.model
def get_actions(self, action_slot, model, res_id=False):
assert action_slot in ACTION_SLOTS, 'Illegal action slot value: %s' % action_slot
# use a direct SQL query for performance reasons,
# this is called very often
# Add by Smile #
cr, uid, context = self.env.args
query = """SELECT v.id, v.name, v.value FROM ir_values v
WHERE v.key = %s AND v.key2 = %s
AND v.model = %s
AND (v.res_id = %s
OR v.res_id IS NULL
OR v.res_id = 0)
AND (v.window_actions IS NULL
OR v.window_actions=', , '
OR v.window_actions like %s)
ORDER BY v.sequence, v.id"""
cr.execute(query, ('action', action_slot, model, res_id or None, ', %s, ' % context.get('act_window_id', '')))
################
results = {}
for action in cr.dictfetchall():
if not action['value']:
continue # skip if undefined
action_model, action_id = action['value'].split(',')
if not eval(action_id):
continue
fields = [field for field in self.env[action_model]._fields
if field not in EXCLUDED_FIELDS]
# FIXME: needs cleanup
try:
action_def = self.env[action_model].browse(int(action_id)).read(fields)
if isinstance(action_def, list):
action_def = action_def[0]
if action_def:
if action_model in ('ir.actions.report.xml', 'ir.actions.act_window',
|
'ir.actions.wizard'):
groups = action_def.get('groups_id')
if groups:
cr.execute('SELECT 1 FROM res_groups_users_rel WHERE gid IN %s AND uid=%s',
(tuple(gr
|
oups), uid))
if not cr.fetchone():
if action['name'] == 'Menuitem':
raise Warning(_('You do not have the permission to perform this operation !!!'))
continue
# keep only the first action registered for each action name
results[action['name']] = (action['id'], action['name'], action_def)
except (except_orm, Warning):
continue
return sorted(results.values())
|
Spiderlover/Toontown
|
toontown/hood/DLHood.py
|
Python
|
mit
| 1,237
| 0.00485
|
from toontown.safezone.DLSafeZoneLoader import DLSafeZoneLoader
from toontown.town.DLTownLoader import DLTownLoader
from toontown.toonbase import ToontownGlobals
from toontown.hood.ToonHood import ToonHood
class DLHood(ToonHood):
not
|
ify = directNotify.newCategory('DLHood')
ID = ToontownGlobals.DonaldsDreamland
TOWNLOADER_CLASS = DLTownLoader
SAFEZONELOADER_CLASS = DLSafeZoneLoader
STO
|
RAGE_DNA = 'phase_8/dna/storage_DL.pdna'
SKY_FILE = 'phase_8/models/props/DL_sky'
TITLE_COLOR = (1.0, 0.9, 0.5, 1.0)
HOLIDAY_DNA = {
ToontownGlobals.WINTER_DECORATIONS: ['phase_8/dna/winter_storage_DL.pdna'],
ToontownGlobals.WACKY_WINTER_DECORATIONS: ['phase_8/dna/winter_storage_DL.pdna'],
ToontownGlobals.HALLOWEEN_PROPS: ['phase_8/dna/halloween_props_storage_DL.pdna'],
ToontownGlobals.SPOOKY_PROPS: ['phase_8/dna/halloween_props_storage_DL.pdna']}
def enter(self, requestStatus):
ToonHood.enter(self, requestStatus)
base.camLens.setNearFar(ToontownGlobals.DreamlandCameraNear, ToontownGlobals.DreamlandCameraFar)
def exit(self):
base.camLens.setNearFar(ToontownGlobals.DefaultCameraNear, ToontownGlobals.DefaultCameraFar)
ToonHood.exit(self)
|
wilsonianb/nacl_contracts
|
build/update_pnacl_tool_revisions.py
|
Python
|
bsd-3-clause
| 16,688
| 0.00797
|
#!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import collections
import datetime
import email.mime.text
import getpass
import os
import re
import smtplib
import subprocess
import sys
import tempfile
import urllib2
BUILD_DIR = os.path.dirname(__file__)
NACL_DIR = os.path.dirname(BUILD_DIR)
TOOLCHAIN_REV_DIR = os.path.join(NACL_DIR, 'toolchain_revisions')
PKG_VER = os.path.join(BUILD_DIR, 'package_version', 'package_version.py')
PKGS = ['pnacl_newlib', 'pnacl_translator']
REV_FILES = [os.path.join(TOOLCHAIN_REV_DIR, '%s.json' % package)
for package in PKGS]
def ParseArgs(args):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Update pnacl_newlib.json PNaCl version.
LLVM and other projects are checked-in to the NaCl repository, but their
head isn't necessarily the one that we currently use in PNaCl. The
pnacl_newlib.json and pnacl_translator.json files point at subversion
revisions to use for tools such as LLVM. Our build process then
downloads pre-built tool tarballs from the toolchain build waterfall.
git repository before running this script:
______________________
| |
v |
...----A------B------C------D------ NaCl HEAD
^ ^ ^ ^
| | | |__ Latest pnacl_{newlib,translator}.json update.
| | |
| | |__ A newer LLVM change (LLVM repository HEAD).
| |
| |__ Oldest LLVM change since this PNaCl version.
|
|__ pnacl_{newlib,translator}.json points at an older LLVM change.
git repository after running this script:
_______________
| |
v |
...----A------B------C------D------E------ NaCl HEAD
Note that there could be any number of non-PNaCl changes between each of
these changelists, and that the user can also decide to update the
pointer to B instead of C.
There is further complication when toolchain builds are merged.
""")
parser.add_argument('--email', metavar='ADDRESS', type=str,
default=getpass.getuser()+'@chromium.org',
help="Email address to send errors to.")
parser.add_argument('--svn-id', metavar='SVN_ID', type=int, default=0,
help="Update to a specific SVN ID instead of the most "
"recent SVN ID with a PNaCl change. This value must "
"be more recent than the one in the current "
"pnacl_newlib.json. This option is useful when multiple "
"changelists' toolchain builds were merged, or when "
"too many PNaCl changes would be pulled in at the "
"same time.")
parser.add_argument('--dry-run', default=False, action='store_true',
help="Print the changelist that would be sent, but "
"don't actually send anything to review.")
# TODO(jfb) The following options come from download_toolchain.py and
# should be shared in some way.
parser.add_argument('--filter_out_predicates', default=[],
help="Toolchains to filter out.")
return parser.parse_args()
def ExecCommand(command):
try:
return subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
sys.stderr.write('\nRunning `%s` returned %i, got:\n%s\n' %
(' '.join(e.cmd), e.returncode, e.output))
raise
def GetCurrentRevision():
return [ExecCommand([sys.executable, PKG_VER,
'getrevision',
'--revision-package', package]).strip()
for package in PKGS]
def SetCurrentRevision(revision_num):
for package in PKGS:
ExecCommand([sys.executable, PKG_VER] +
# TODO(dschuff) pnacl_newlib shouldn't use cloud-bucket
# once we switch fully to toolchain_build.
(['--cloud-bucket', 'nativeclient-archive2/pnacl_buildsh'] if
package == 'pnacl_newlib' else []) +
['setrevision',
'--revision-package', package,
'--revision', str(revision_num)])
def GitCurrentBranch():
return ExecCommand(['git', 'symbolic-ref', 'HEAD', '--short']).strip()
def GitStatus():
"""List of statuses, one per path, of paths in the current git branch.
Ignores untracked paths."""
out = ExecCommand(['git', 'status', '--porcelain']).strip().split('\n')
return [f.strip() for f in out if not re.match('^\?\? (.*)$', f.strip())]
def SyncSources():
"""Assumes a git-svn checkout of NaCl. See:
www.chromium.org/nativeclient/how-tos/how-to-use-git-svn-with-native-client
"""
ExecCommand(['gclient', 'sync'])
def GitCommitInfo(info='', obj=None, num=None, extra=[]):
"""Commit information, where info is one of the shorthands in git_formats.
obj can be a path or a hash.
num is the number of results to return.
extra is a list of optional extra arguments."""
# Shorthands for git's pretty formats.
# See PRETTY FORMATS format:<string> in `git help log`.
git_formats = {
'': '',
'hash': '%H',
'date': '%ci',
'author': '%aN',
'subject': '%s',
'body': '%b',
}
cmd = ['git', 'log', '--format=format:%s' % git_formats[info]] + extra
if num: cmd += ['-n'+str(num)]
if obj: cmd += [obj]
return ExecCommand(cmd).strip()
def GitCommitsSince(date):
"""List of commit hashes since a particular date,
in reverse chronological order."""
return GitCommitInfo(info='hash',
extra=['--since="%s"' % date]).split('\n')
def GitFilesChanged
|
(commit_hash):
"""List of files changed in a commit."""
return GitCommitInfo(obj=commit_hash, num=1,
extra=['--name-only']).split('\n')
def GitChangesPath(commit_hash, path):
"""Returns True if the commit changes a file under the given path."""
return any([
re.search('^' + path, f.strip()) for f in
GitFilesChanged(commit_hash)])
def GitBranchExists(name):
return len(ExecCommand(['git', 'branch', '--list
|
', name]).strip()) != 0
def GitCheckout(branch, force=False):
"""Checkout an existing branch.
force throws away local changes."""
ExecCommand(['git', 'checkout'] +
(['--force'] if force else []) +
[branch])
def GitCheckoutNewBranch(branch):
"""Create and checkout a new git branch."""
ExecCommand(['git', 'checkout', '-b', branch])
def GitDeleteBranch(branch, force=False):
"""Force-delete a branch."""
ExecCommand(['git', 'branch', '-D' if force else '-d', branch])
def GitAdd(file):
ExecCommand(['git', 'add', file])
def GitCommit(message):
with tempfile.NamedTemporaryFile() as tmp:
tmp.write(message)
tmp.flush()
ExecCommand(['git', 'commit', '--file=%s' % tmp.name])
def UploadChanges():
"""Upload changes, don't prompt."""
# TODO(jfb) Using the commit queue and avoiding git try + manual commit
# would be much nicer. See '--use-commit-queue'
return ExecCommand(['git', 'cl', 'upload', '--send-mail', '-f'])
def GitTry():
return ExecCommand(['git', 'try'])
def FindCommitWithGitSvnId(git_svn_id):
while True:
# This command needs to retry because git-svn partially rebuild its
# revision map for every commit. Asking it a second time fixes the
# issue.
out = ExecCommand(['git', 'svn', 'find-rev', 'r' + git_svn_id]).strip()
if not re.match('^Partial-rebuilding ', out):
break
return out
def CommitMessageToCleanDict(commit_message):
"""Extract and clean commit message fields that follow the NaCl commit
message convention. Don't repeat them as-is, to avoid confusing our
infrastructure."""
res = {}
fields = [
['git svn id', ('\s*git-svn-id: '
'svn://[
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scatterternary/selected/marker/_size.py
|
Python
|
mit
| 509
| 0.001965
|
import _plotly_utils.basevalidators
class SizeValid
|
ator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="scatterternary.selected.marker", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
|
role=kwargs.pop("role", "style"),
**kwargs
)
|
oleiade/Fridge
|
fabfile/__init__.py
|
Python
|
mit
| 365
| 0
|
#
|
-*- coding: utf-8 -*-
# Copyright (c) 2012 theo crevon
#
# See the file LICENSE for copying permission.
import install
import config
import service
from fabric.api import env, task
env.hosts = ['localhost']
@task
def bootstrap():
"""Deploy, configure, and start Fridge on hosts""
|
"
install.bootstrap()
config.bootstrap()
service.start_all()
|
theanalyst/cinder
|
cinder/openstack/common/db/sqlalchemy/test_migrations.py
|
Python
|
apache-2.0
| 11,078
| 0
|
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import logging
import os
import subprocess
import lockfile
from oslotest import base as test_base
from six import moves
from six.moves.urllib import parse
import sqlalchemy
import sqlalchemy.exc
from cinder.openstack.common.db.sqlalchemy import utils
from cinder.openstack.common.gettextutils import _LE
LOG = logging.getLogger(__name__)
def _have_mysql(user, passwd, database):
present = os.environ.get('TEST_MYSQL_PRESENT')
if present is None:
return utils.is_backend_avail(backend='mysql',
|
user=user,
passwd=passwd,
database=database)
return present.lower() in ('', 'true')
def _have_postgresql(user, passwd, database):
present = os.environ.get('TEST_POSTGRESQL_PRESENT')
if present is None:
return utils.is_backend_avail(backend=
|
'postgres',
user=user,
passwd=passwd,
database=database)
return present.lower() in ('', 'true')
def _set_db_lock(lock_path=None, lock_prefix=None):
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
path = lock_path or os.environ.get("CINDER_LOCK_PATH")
lock = lockfile.FileLock(os.path.join(path, lock_prefix))
with lock:
LOG.debug('Got lock "%s"' % f.__name__)
return f(*args, **kwargs)
finally:
LOG.debug('Lock released "%s"' % f.__name__)
return wrapper
return decorator
class BaseMigrationTestCase(test_base.BaseTestCase):
"""Base class fort testing of migration utils."""
def __init__(self, *args, **kwargs):
super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'test_migrations.conf')
# Test machines can set the TEST_MIGRATIONS_CONF variable
# to override the location of the config file for migration testing
self.CONFIG_FILE_PATH = os.environ.get('TEST_MIGRATIONS_CONF',
self.DEFAULT_CONFIG_FILE)
self.test_databases = {}
self.migration_api = None
def setUp(self):
super(BaseMigrationTestCase, self).setUp()
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
if os.path.exists(self.CONFIG_FILE_PATH):
cp = moves.configparser.RawConfigParser()
try:
cp.read(self.CONFIG_FILE_PATH)
defaults = cp.defaults()
for key, value in defaults.items():
self.test_databases[key] = value
except moves.configparser.ParsingError as e:
self.fail("Failed to read test_migrations.conf config "
"file. Got error: %s" % e)
else:
self.fail("Failed to find test_migrations.conf config "
"file.")
self.engines = {}
for key, value in self.test_databases.items():
self.engines[key] = sqlalchemy.create_engine(value)
# We start each test case with a completely blank slate.
self._reset_databases()
def tearDown(self):
# We destroy the test data store between each test case,
# and recreate it, which ensures that we have no side-effects
# from the tests
self._reset_databases()
super(BaseMigrationTestCase, self).tearDown()
def execute_cmd(self, cmd=None):
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = process.communicate()[0]
LOG.debug(output)
self.assertEqual(0, process.returncode,
"Failed to run: %s\n%s" % (cmd, output))
def _reset_pg(self, conn_pieces):
(user,
password,
database,
host) = utils.get_db_connection_info(conn_pieces)
os.environ['PGPASSWORD'] = password
os.environ['PGUSER'] = user
# note(boris-42): We must create and drop database, we can't
# drop database which we have connected to, so for such
# operations there is a special database template1.
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
" '%(sql)s' -d template1")
sql = ("drop database if exists %s;") % database
droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
self.execute_cmd(droptable)
sql = ("create database %s;") % database
createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
self.execute_cmd(createtable)
os.unsetenv('PGPASSWORD')
os.unsetenv('PGUSER')
@_set_db_lock(lock_prefix='migration_tests-')
def _reset_databases(self):
for key, engine in self.engines.items():
conn_string = self.test_databases[key]
conn_pieces = parse.urlparse(conn_string)
engine.dispose()
if conn_string.startswith('sqlite'):
# We can just delete the SQLite database, which is
# the easiest and cleanest solution
db_path = conn_pieces.path.strip('/')
if os.path.exists(db_path):
os.unlink(db_path)
# No need to recreate the SQLite DB. SQLite will
# create it for us if it's not there...
elif conn_string.startswith('mysql'):
# We can execute the MySQL client to destroy and re-create
# the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me.
(user, password, database, host) = \
utils.get_db_connection_info(conn_pieces)
sql = ("drop database if exists %(db)s; "
"create database %(db)s;") % {'db': database}
cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s "
"-e \"%(sql)s\"") % {'user': user, 'password': password,
'host': host, 'sql': sql}
self.execute_cmd(cmd)
elif conn_string.startswith('postgresql'):
self._reset_pg(conn_pieces)
class WalkVersionsMixin(object):
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
# Determine latest version script from the repo, then
# upgrade from 1 through to the latest, with no data
# in the databases. This just checks that the schema itself
# upgrades successfully.
# Place the database under version control
self.migration_api.version_control(engine, self.REPOSITORY,
self.INIT_VERSION)
self.assertEqual(self.INIT_VERSION,
self.migration_api.db_version(engine,
self.REPOSITORY))
LOG.debug('latest version is %s' % self.REPOSITORY.latest)
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
for version in versions:
|
terbolous/SickRage
|
lib/feedparser/namespaces/georss.py
|
Python
|
gpl-3.0
| 11,117
| 0.003868
|
# Support for the GeoRSS format
# Copyright 2010-2015 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, unicode_literals
from ..util import FeedParserDict
class Namespace(object):
supported_namespaces = {
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://www.georss.org/georss': 'georss',
'http://www.opengis.net/gml': 'gml',
}
def __init__(self):
self.ingeometry = 0
super(Namespace, self).__init__()
def _start_georssgeom(self, attrsD):
self.push('geometry', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_point = _start_georssgeom
_start_georss_line = _start_georssgeom
_start_georss_polygon = _start_georssgeom
_start_georss_box = _start_georssgeom
def _save_where(self, geometry):
context = self._getContext()
context['where'].update(geometry)
def _end_georss_point(self):
geometry = _parse_georss_point(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_line(self):
geometry = _parse_georss_line(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_polygon(self):
this = self.pop('geometry')
geometry = _parse_georss_polygon(this)
if geometry:
self._save_where(geometry)
def _end_georss_box(self):
geometry = _parse_georss_box(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _start_where(self, attrsD):
self.push('where', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_where = _start_where
def _parse_srs_attrs(self, attrsD):
srsName = attrsD.get('srsname')
try:
srsDimension = int(attrsD.get('srsdimension', '2'))
except ValueError:
srsDimension = 2
context = self._getContext()
context['where']['srsName'] = srsName
context['where']['srsDimension'] = srsDimension
def _start_gml_point(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 1
self.push('geometry', 0)
def _start_gml_linestring(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 'linestring'
self.push('geometry', 0)
def _start_gml_polygon(self, attrsD):
self._parse_srs_attrs(attrsD)
self.push('geometry', 0)
def _start_gml_exterior(self, attrsD):
self.push('geometry', 0)
def _start_gml_linearring(self, attrsD):
self.ingeometry = 'polygon'
self.push('geometry', 0)
def _start_gml_pos(self, attrsD):
self.push('pos', 0)
def _end_gml_pos(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_georss_point(this, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _start_gml_poslist(self, attrsD):
self.push('pos', 0)
def _end_gml_poslist(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_poslist(
this, self.ingeometry, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _end_geom(self):
self.ingeometry = 0
self.pop('geometry')
_end_gml_point = _end_geom
_end_gml_linestring = _end_geom
_end_gml_linearring = _end_geom
_end_gml_exterior = _end_geom
_end_gml_polygon = _end_geom
def _end_where(self):
self.pop('where')
_end_georss_where = _end_where
# GeoRSS geometry parsers. Each return a dict with 'type' and 'coordinates'
# items, or None in the case of a parsing error.
def _parse_poslist(value, geom_type, swap=True, dims=2):
if geom_type == 'linestring':
|
return _parse_georss_line(valu
|
e, swap, dims)
elif geom_type == 'polygon':
ring = _parse_georss_line(value, swap, dims)
return {'type': 'Polygon', 'coordinates': (ring['coordinates'],)}
else:
return None
def _gen_georss_coords(value, swap=True, dims=2):
# A generator of (lon, lat) pairs from a string of encoded GeoRSS
# coordinates. Converts to floats and swaps order.
latlons = (float(ll) for ll in value.replace(',', ' ').split())
while True:
t = [next(latlons), next(latlons)][::swap and -1 or 1]
if dims == 3:
t.append(next(latlons))
yield tuple(t)
def _parse_georss_point(value, swap=True, dims=2):
# A point contains a single latitude-longitude pair, separated by
# whitespace. We'll also handle comma separators.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {'type': 'Point', 'coordinates': coords[0]}
except (IndexError, ValueError):
return None
def _parse_georss_line(value, swap=True, dims=2):
# A line contains a space separated list of latitude-longitude pairs in
# WGS84 coordinate reference system, with each pair separated by
# whitespace. There must be at least two pairs.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {'type': 'LineString', 'coordinates': coords}
except (IndexError, ValueError):
return None
def _parse_georss_polygon(value, swap=True, dims=2):
# A polygon contains a space separated list of latitude-longitude pairs,
# with each pair separated by whitespace. There must be at least four
# pairs, with the last being identical to the first (so a polygon has a
# minimum of three actual points).
try:
ring = list(_gen_georss_coords(value, swap, dims))
except (IndexError, ValueError):
return None
if len(ring) < 4:
return None
return {'type': 'Polygon', 'coordinates': (ring,)}
def _parse_georss_box(value, swap=True, dims=2):
# A bounding box is a rectangular region, often used to define the extents
# of a map or a rough area of interest. A box contains two space separate
# latitude-longitude pairs, with each pair separated by whitespace. The
# first pair is the lower corner, the second is the upper corner.
try:
coords =
|
Barmaley13/BA-Software
|
gate/sleepy_mesh/node/headers/header/unit.py
|
Python
|
gpl-3.0
| 5,361
| 0.003917
|
"""
Header Unit Class
"""
### INCLUDES ###
import logging
from gate.conversions import round_int
from variable import HeaderVariable
from common import MIN_ALARM, MAX_ALARM
### CONSTANTS ###
## Logger ##
LOGGER = logging.getLogger(__name__)
# LOGGER.setLevel(logging.DEBUG)
### CLASSES ###
class HeaderUnit(HeaderVariable):
""" ADC Unit Class"""
def __init__(self, formula, **kwargs):
"""
Initializes header unit, done as part of Header initialization by using provided unit dictionary.
:param formula: formula to calculate this variable. You can use any internal constant names or
internal variable names in this formula that have been declared earlier.
:param measuring_units: Official unit name that will be displayed to user via web interface.
:param min_value: Minimum constant value or a formula to calculate it. Used for validation.
:param max_value: Maximum constant value or a formula to calculate it. Used for validation.
:param str_format: Specify string formatting. Used for display and logs.
:return: Header Unit instance
"""
defaults = {
# Local Must Haves
'formula': formula,
# Internal
'_external': True,
# Defaults
'measuring_units': '',
'min_value': 0,
'max_value': 100,
# Min Alarm
'min_alarm_message': MIN_ALARM,
# Max Alarm
'max_alarm_message': MAX_ALARM,
'step': 0.01,
'str_format': '{0:.2f}'
}
defaults.update(kwargs)
super(HeaderUnit, self).__init__(**defaults)
# Fetch Value Methods
def get_min(self, provider):
"""
Get minimum value for the selected unit. Either fetch static value or calculate using internal formula.
:param provider: data provider we are working with
:return: minimum value for the selected unit.
"""
return self._get_min_max('min', provider)
def get_max(self, provider):
"""
Get maximum value for the selected unit. Either fetch static value or calculate using internal formula.
:param provider: data provider we are working with
:return: maximum value for the selected unit.
"""
return self._get_min_max('max', provider)
def _get_min_max(self, selector, provider):
"""
Internal shortcut for min/max value fetch/calculation
:param selector: ``min`` or ``max``
:param provider: data provider we are working with
:return: min/max value
"""
output = None
if selector in ('min', 'max'):
if self.enables(provider, 'const_set'):
_selector_value = self[selector + '_value']
if type(_selector_value) in (int, float):
# We have constant value!
output = _selector_value
else:
# We have another constant or variable!
for node_field in ('constants', 'data_out'):
if _selector_value in provider[node_field][self['data_field']]:
output = provider[node_field][self['data_field']][_selector_value]
break
if output is not None:
_rounding_scheme = {'min': 'floor', 'max': 'ceil'}
output = round_int(output, _rounding_scheme[selector], 0)
return output
def get_float(self, provider, data_in=None):
"""
Fetches current value for the selected units if log data is not provided.
Otherwise, applies formulas using provided data_in and fetches results dictionary.
:param provider: data provider that we are working with
:return: current value dictionary/calculated value dictionary using log data
"""
output = None
# if data_in is None:
# header_enable = self.enables(provider, 'live_enables')
# header_enable |= self.enables(provider, 'diag_enables')
# else:
# header_enable = self.enables(provider, 'log_enables')
header_enable = self.enables(provider, 'const_set')
if header_enable:
data_out = {}
if data_in is None:
|
data_out = provider['data_out'][self['data_field']]
elif self['data_field'] in data_in:
data_
|
out = data_in[self['data_field']]
if self['internal_name'] in data_out:
output = data_out[self['internal_name']]
return output
def get_string(self, provider, data_in=None):
"""
Fetches current value for the selected units if log data is not provided.
Otherwise, applies formulas using provided data_in and fetches results dictionary.
:param provider: data provider that we are working with
:return: current value dictionary/calculated value dictionary using log data
"""
output = self.get_float(provider, data_in)
if output is not None:
if type(self['str_format']) in (str, unicode):
output = self['str_format'].format(output)
else:
output = self['str_format'](output)
return output
|
elffersj/cnfgen
|
tests/test_subsetcardinality.py
|
Python
|
gpl-3.0
| 2,350
| 0.00766
|
import networkx as nx
import sys
from cnfformula import CNF
from cnfformula import SubsetCardinalityFormula
from . import TestCNFBase
from .test_commandline_helper import TestCommandline
from .test_graph_helper import complete_bipartite_graph_proper
class TestSubsetCardinality(TestCNFBase):
def test_empty(self):
G = CNF()
graph =
|
nx.Graph()
F = SubsetCardinalityFormula(graph)
self.assertCnfEqual(F,G)
def test_not_bipartite(self):
graph = nx.complete_graph(3)
with self.assertRaises(KeyError):
SubsetCardinalityFormula(graph)
def test_complete_even(self):
graph = complet
|
e_bipartite_graph_proper(2,2)
F = SubsetCardinalityFormula(graph)
dimacs = """\
p cnf 4 4
1 2 0
3 4 0
-1 -3 0
-2 -4 0
"""
self.assertCnfEqualsDimacs(F,dimacs)
def test_complete_even_odd(self):
graph = complete_bipartite_graph_proper(2,3)
F = SubsetCardinalityFormula(graph)
dimacs = """\
p cnf 6 9
1 2 0
1 3 0
2 3 0
4 5 0
4 6 0
5 6 0
-1 -4 0
-2 -5 0
-3 -6 0
"""
self.assertCnfEqualsDimacs(F,dimacs)
def test_complete_odd(self):
graph = complete_bipartite_graph_proper(3,3)
F = SubsetCardinalityFormula(graph)
dimacs = """\
p cnf 9 18
1 2 0
1 3 0
2 3 0
4 5 0
4 6 0
5 6 0
7 8 0
7 9 0
8 9 0
-1 -4 0
-1 -7 0
-4 -7 0
-2 -5 0
-2 -8 0
-5 -8 0
-3 -6 0
-3 -9 0
-6 -9 0
"""
self.assertCnfEqualsDimacs(F,dimacs)
class TestSubsetCardinalityCommandline(TestCommandline):
def test_complete(self):
for rows in range(2,5):
for columns in range(2,5):
parameters = ["cnfgen","-q","subsetcard", "--bcomplete", rows, columns]
graph = complete_bipartite_graph_proper(rows, columns)
F = SubsetCardinalityFormula(graph)
self.checkFormula(sys.stdin,F, parameters)
def test_not_bipartite(self):
parameters = ["cnfgen","-q","subsetcard", "--complete", "3"]
self.checkCrash(sys.stdin, parameters)
|
sckasturi/saltlake
|
commands/score.py
|
Python
|
gpl-2.0
| 3,127
| 0.002558
|
# Copyright (C) 2013-2014 Fox Wilson, Peter Foley, Srijay Kasturi, Samuel Damashek, James Forcier and Reed Koser
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
from random import randint
from helpers.orm import Scores
from helpers.command import Command
def pluralize(s, n):
if n == 1:
return s
else:
return s + 's'
@Command('score', ['config', 'db', 'botnick'])
def cmd(send, msg, args):
"""Gets scores.
Syntax: {command} <--high|--low|nick>
"""
if not args['config']['feature'].getboolean('hooks'):
send("Hooks are disabled, and this command depends on hooks. Please contact the bot admin(s).")
return
session = args['db']
match = re.match('--(.+)', msg)
if match:
if match.group(1) == 'high':
data = session.query(Scores).order_by(Scores.score.
|
desc()).limit(3).all()
send('High Scores:')
for x in data:
|
send("%s: %s" % (x.nick, x.score))
elif match.group(1) == 'low':
data = session.query(Scores).order_by(Scores.score).limit(3).all()
send('Low Scores:')
for x in data:
send("%s: %s" % (x.nick, x.score))
else:
send("%s is not a valid flag" % match.group(1))
return
matches = re.findall('(%s+)' % args['config']['core']['nickregex'], msg)
if matches:
for match in matches:
name = match.lower()
if name == 'c':
send("We all know you love C better than anything else, so why rub it in?")
return
score = session.query(Scores).filter(Scores.nick == name).scalar()
if score is not None:
if name == args['botnick'].lower():
output = 'has %s %s! :)' % (score.score, pluralize('point', score.score))
send(output, 'action')
else:
send("%s has %i %s!" % (name, score.score, pluralize('point', score.score)))
else:
send("Nobody cares about %s" % name)
elif msg:
send("Invalid nick")
else:
count = session.query(Scores).count()
if count == 0:
send("Nobody cares about anything =(")
else:
randid = randint(1, count)
query = session.query(Scores).get(randid)
send("%s has %i %s!" % (query.nick, query.score, pluralize('point', query.score)))
|
mscuthbert/abjad
|
abjad/tools/tonalanalysistools/test/test_tonalanalysistools_ChordSuspension___eq__.py
|
Python
|
gpl-3.0
| 634
| 0.009464
|
# -*- encoding: utf-8 -*-
from abjad import *
from abjad.tools import tonalanalysistools
def test_tonalanalysistools_ChordSuspension___eq___01():
|
chord_suspension = tonalanalysistools.ChordSuspens
|
ion(4, 3)
u = tonalanalysistools.ChordSuspension(4, 3)
voice = tonalanalysistools.ChordSuspension(2, 1)
assert chord_suspension == chord_suspension
assert chord_suspension == u
assert not chord_suspension == voice
assert u == chord_suspension
assert u == u
assert not u == voice
assert not voice == chord_suspension
assert not voice == u
assert voice == voice
|
nicolargo/pymdstat
|
pymdstat/__init__.py
|
Python
|
mit
| 297
| 0
|
#!/usr/bin/env py
|
thon
# -*- coding: utf-8 -*-
#
# PyMDstat
# ...
#
# Copyright (C) 2014 Nicolargo <nicolas@nicolargo.com>
__appname__ = "PyMDstat"
_
|
_version__ = "0.4.2"
__author__ = "Nicolas Hennion <nicolas@nicolargo.com>"
__licence__ = "MIT"
__all__ = ['MdStat']
from .pymdstat import MdStat
|
kazukiotsuka/mongobase
|
mongobase/modelbase.py
|
Python
|
mit
| 5,018
| 0.000598
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tools/modelbase.py
#
#
# MODEL DEFINITION:
# 1. create a subclass.
# 2. set each definitions as below in the subclass.
#
# __structure__ = {} # define keys and the data type
# __required_fields__ = [] # lists required keys
# __default_values__ = {} # set default values to some keys
# __validators__ = {} # set pairs like key: validatefunc()
#
# BASIC USAGE EXAMPLE:
#
# animal_name = 'wild boar'
# cat = Animal({
# 'name': 'cat',
# 'num_of_legs': 4
# })
#
# cat.validate() # check types in __structure__ and run __validators__
# cat.purify() # convert to dict
# cat.serialize() # convert to json compatible dict
# cat._is_required_fields_satisfied() # raise RequiredKeyIsNotSatisfied if not enough
import logging
import datetime
import sys
from .exceptions import RequiredKeyIsNotSatisfied
class ModelBase(dict):
# __collection__ = '' # set the collection name
__structure__ = {} # define keys and the data type
__required_fields__ = [] # lists required keys
__default_values__ = {} # set default values to some keys
__validators__ = {} # set pairs like key: validatefunc()
# __search_text_keys__ = [] # set index keys for text search
# attributed dictionary extension
# obj['foo'] <-> obj.foo
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
def __init__(self, init_dict):
# set properties written in __structure__
for key in self.__structure__:
if key in init_dict and init_dict[key] is not None:
# when an initial value is given and it's not None
default_val = init_dict[key]
elif key in self.__default_values__:
# when an initial value is given for some keys
default_val = self
|
.__default_values__[key]
else:
default_val = None
setattr(self, key, default_val)
def getattr(self, key):
return getattr(self, key)
def setattr(self, key, value):
return setattr(self, key, value)
|
def purify(self):
"""Return an instance as dictionary format.
returns:
object (dict): object only with keys in __structure__.
"""
extracted = {}
for key in self.__structure__:
extracted[key] = self[key]
if 'search_text' in self:
extracted['search_text'] = self['search_text']
# if self.__search_text_keys__:
# extracted.update({'search_text': self['search_text']})
return extracted
def serialize(self):
"""Return the json formatted dict.
1. datetime.datetime -> YYYY/mm/dd/HH/MM/SS
returns:
object (dict): pure json format dict
"""
extracted = {}
for key in self.__structure__:
extracted[key] = datetime.datetime.strftime(self[key], '%Y/%m/%d/%H/%M/%S')\
if isinstance(self[key], datetime.datetime) else self[key]
return extracted
def validate(self, target=None):
"""Validate properties usually before inserted or updated.
1. validate values according to rules written in the __validators__
2. validate values according to types written in the __structure__
returns:
result (bool): True if no error occured.
"""
if target is None:
target = self
# validate values according to rules in the __validators__
for name in self.__validators__:
logging.info(u'VALIDATE {}'.format(name))
assert self.__validators__[name](target[name])
# validate values according to types written in the __structure__
for key in self.__structure__:
if not (isinstance(target[key], self.__structure__[key])
or target[key] is None):
if not key == '_id':
raise TypeError(
'the key \'{}\' must be of type {} but {}'
.format(
key, self.__structure__[key], type(target[key])))
return True
def _is_required_fields_satisfied(self):
"""Check if required fields are filled.
Required fields are defined as __required_fields__.
`RequiredKeyIsNotSatisfied` exception is raised if not enough.
returns:
satisfied (bool): True if all fields have a value.
"""
for key in self.__required_fields__:
if getattr(self, key) is None:
raise RequiredKeyIsNotSatisfied(
'the key \'{}\' must not be None'.format(key)
)
return False
return True
@classmethod
def generateInstances(cls, documents):
"""Return this instances converted from dicts in documents.
Convert dict objects to this instance and return them.
"""
for obj in documents:
yield cls(obj)
|
joeflack4/jflack
|
joeutils/data_structures/comprehensions/maps/__init__.py
|
Python
|
mit
| 883
| 0
|
"""Map Comprehensions"""
def inverse_filter_dict(dictionary, keys):
"""Filter a dictionary by any keys not given.
Args:
dictionary (dict): Dictionary.
keys (iterable): Iterable containing data type(s) for valid
|
dict key.
Return:
dict: Filtered dictionary.
"""
return {key: val for key, val in dictionary.items() if key not in keys}
def ne_dict(dictionary):
"""Prune dictionary of empty key-value pairs.
Aliases: pruned()
"""
return {k: v for k, v in dictionary.items() if v}
def pruned(dictionary):
"""Prune dictionary of empty key-value pairs.
Alias of ne_dict().
"""
return ne_dict(dictionary)
def prune_by_n_required_chi
|
ldren(dictionary, n=1):
"""Return with only key value pairs that meet required n children."""
return {key: val for key, val in dictionary.items() if len(val) >= n}
|
Elastica/kombu
|
kombu/transport/pyamqp.py
|
Python
|
bsd-3-clause
| 5,262
| 0.00019
|
"""
kombu.transport.pyamqp
======================
pure python amqp transport.
"""
from __future__ import absolute_import, unicode_literals
import amqp
from kombu.five import items
from kombu.utils.amq_manager import get_manager
from kombu.utils.text import version_string_as_tuple
from . import base
DEFAULT_PORT = 5672
DEFAULT_SSL_PORT = 5671
class Message(base.Message):
def __init__(self, channel, msg, **kwargs):
props = msg.properties
super(Message, self).__init__(
channel,
body=msg.body,
delivery_tag=msg.delivery_tag,
content_type=props.get('content_type'),
content_encoding=props.get('content_encoding'),
delivery_info=msg.delivery_info,
properties=msg.properties,
headers=props.get('application_headers') or {},
**kwargs)
class Channel(amqp.Channel, base.StdChannel):
Message = Message
def prepare_message(self, body, priority=None,
content_type=None, content_encoding=None,
headers=None, properties=None, _Message=amqp.Message):
"""Prepares message so that it can be sent using this transport."""
return _Message(
body,
priority=priority,
content_type=content_type,
content_encoding=content_encoding,
application_headers=headers,
**properties or {}
)
def message_to_python(self, raw_message):
"""Convert encoded message body back to a Python value."""
return self.Message(self, raw_message)
class Connection(amqp.Connection):
Channel = Channel
class Transport(base.Transport):
Connection = Connection
default_port = DEFAULT_PORT
default_ssl_port = DEFAULT_SSL_PORT
# it's very annoying that pyamqp sometimes raises AttributeError
# if the connection is lost, but nothing we can do about that here.
connection_errors = amqp.Connection.connection_errors
channel_errors = amqp.Connection.channel_errors
recoverable_connection_errors = \
amqp.Connection.recoverable_connection_errors
recoverable_channel_errors = amqp.Connection.recoverable_channel_errors
driver_name = 'py-amqp'
driver_type = 'amqp'
implements = base.Transport.implements.extend(
async=True,
heartbeats=True,
)
def __init__(self, client,
default_port=None, default_ssl_port=None, **kwargs):
self.client = client
self.default_port = default_port or self.default_port
self.default_ssl_port = default_ssl_port or self.default_ssl_port
def driver_version(self):
return amqp.__version__
def create_channel(self, connection):
return connection.channel()
def drain_events(self, connection, **kwargs):
return connection.drain_events(**kwargs)
def _collect(self, connection):
if connection is not None:
connection.collect()
def establish_connection(self):
"""Establish connection to the AMQP broker."""
conninfo = self.client
for name, default_value in items(self.default_connection_params):
if not getattr(conninfo, name, None):
setattr(conninfo, name, default_value)
if conninfo.hostname == 'localhost':
conninfo.hostname = '127.0.0.1'
opts = dict({
'host': conninfo.host,
'userid': conninfo.userid,
'password': conninfo.password,
'login_method': conninfo.login_method,
'virtual_host': conninfo.virtual_host,
'insist': conninfo.insist,
'ssl': conninfo.ssl,
'connect_timeout': conninfo.connect_timeout,
'heartbeat': conni
|
nfo.heartbeat,
}, **conninfo.transport_options or {})
conn = self.Connection(**opts)
conn.client = self.client
conn.connect()
return conn
def verify_connection(self, connection):
return c
|
onnection.connected
def close_connection(self, connection):
"""Close the AMQP broker connection."""
connection.client = None
connection.close()
def get_heartbeat_interval(self, connection):
return connection.heartbeat
def register_with_event_loop(self, connection, loop):
connection.transport.raise_on_initial_eintr = True
loop.add_reader(connection.sock, self.on_readable, connection, loop)
def heartbeat_check(self, connection, rate=2):
return connection.heartbeat_tick(rate=rate)
def qos_semantics_matches_spec(self, connection):
props = connection.server_properties
if props.get('product') == 'RabbitMQ':
return version_string_as_tuple(props['version']) < (3, 3)
return True
@property
def default_connection_params(self):
return {
'userid': 'guest',
'password': 'guest',
'port': (self.default_ssl_port if self.client.ssl
else self.default_port),
'hostname': 'localhost',
'login_method': 'AMQPLAIN',
}
def get_manager(self, *args, **kwargs):
return get_manager(self.client, *args, **kwargs)
|
wdv4758h/ZipPy
|
edu.uci.python.benchmark/src/benchmarks/sympy/sympy/printing/tests/test_ccode.py
|
Python
|
bsd-3-clause
| 10,134
| 0.001875
|
from sympy.core import pi, oo, symbols, Function, Rational, Integer, GoldenRatio, EulerGamma, Catalan, Lambda, Dummy, Eq
from sympy.functions import Piecewise, sin, cos, Abs, exp, ceiling, sqrt, gamma
from sympy.utilities.pytest import raises
from
|
sympy.printing.ccode import CCodePrinter
from sympy.utilities.lambdify import implemented_function
from sympy.tensor import IndexedBase, Idx
# import test
from sympy import ccode
x, y, z = symbols('x,y,z')
g = Function('g')
def test_printmethod():
class fabs(Abs):
def _ccode(self, printer):
r
|
eturn "fabs(%s)" % printer._print(self.args[0])
assert ccode(fabs(x)) == "fabs(x)"
def test_ccode_sqrt():
assert ccode(sqrt(x)) == "sqrt(x)"
assert ccode(x**0.5) == "sqrt(x)"
assert ccode(sqrt(x)) == "sqrt(x)"
def test_ccode_Pow():
assert ccode(x**3) == "pow(x, 3)"
assert ccode(x**(y**3)) == "pow(x, pow(y, 3))"
assert ccode(1/(g(x)*3.5)**(x - y**x)/(x**2 + y)) == \
"pow(3.5*g(x), -x + pow(y, x))/(pow(x, 2) + y)"
assert ccode(x**-1.0) == '1.0/x'
assert ccode(x**Rational(2, 3)) == 'pow(x, 2.0L/3.0L)'
_cond_cfunc = [(lambda base, exp: exp.is_integer, "dpowi"),
(lambda base, exp: not exp.is_integer, "pow")]
assert ccode(x**3, user_functions={'Pow': _cond_cfunc}) == 'dpowi(x, 3)'
assert ccode(x**3.2, user_functions={'Pow': _cond_cfunc}) == 'pow(x, 3.2)'
def test_ccode_constants_mathh():
assert ccode(exp(1)) == "M_E"
assert ccode(pi) == "M_PI"
assert ccode(oo) == "HUGE_VAL"
assert ccode(-oo) == "-HUGE_VAL"
def test_ccode_constants_other():
assert ccode(2*GoldenRatio) == "double const GoldenRatio = 1.61803398874989;\n2*GoldenRatio"
assert ccode(
2*Catalan) == "double const Catalan = 0.915965594177219;\n2*Catalan"
assert ccode(2*EulerGamma) == "double const EulerGamma = 0.577215664901533;\n2*EulerGamma"
def test_ccode_Rational():
assert ccode(Rational(3, 7)) == "3.0L/7.0L"
assert ccode(Rational(18, 9)) == "2"
assert ccode(Rational(3, -7)) == "-3.0L/7.0L"
assert ccode(Rational(-3, -7)) == "3.0L/7.0L"
assert ccode(x + Rational(3, 7)) == "x + 3.0L/7.0L"
assert ccode(Rational(3, 7)*x) == "(3.0L/7.0L)*x"
def test_ccode_Integer():
assert ccode(Integer(67)) == "67"
assert ccode(Integer(-1)) == "-1"
def test_ccode_functions():
assert ccode(sin(x) ** cos(x)) == "pow(sin(x), cos(x))"
def test_ccode_inline_function():
x = symbols('x')
g = implemented_function('g', Lambda(x, 2*x))
assert ccode(g(x)) == "2*x"
g = implemented_function('g', Lambda(x, 2*x/Catalan))
assert ccode(
g(x)) == "double const Catalan = %s;\n2*x/Catalan" % Catalan.n()
A = IndexedBase('A')
i = Idx('i', symbols('n', integer=True))
g = implemented_function('g', Lambda(x, x*(1 + x)*(2 + x)))
assert ccode(g(A[i]), assign_to=A[i]) == (
"for (int i=0; i<n; i++){\n"
" A[i] = (A[i] + 1)*(A[i] + 2)*A[i];\n"
"}"
)
def test_ccode_exceptions():
assert ccode(ceiling(x)) == "ceil(x)"
assert ccode(Abs(x)) == "fabs(x)"
assert ccode(gamma(x)) == "tgamma(x)"
def test_ccode_user_functions():
x = symbols('x', integer=False)
n = symbols('n', integer=True)
custom_functions = {
"ceiling": "ceil",
"Abs": [(lambda x: not x.is_integer, "fabs"), (lambda x: x.is_integer, "abs")],
}
assert ccode(ceiling(x), user_functions=custom_functions) == "ceil(x)"
assert ccode(Abs(x), user_functions=custom_functions) == "fabs(x)"
assert ccode(Abs(n), user_functions=custom_functions) == "abs(n)"
def test_ccode_boolean():
assert ccode(x & y) == "x && y"
assert ccode(x | y) == "x || y"
assert ccode(~x) == "!x"
assert ccode(x & y & z) == "x && y && z"
assert ccode(x | y | z) == "x || y || z"
assert ccode((x & y) | z) == "z || x && y"
assert ccode((x | y) & z) == "z && (x || y)"
def test_ccode_Piecewise():
p = ccode(Piecewise((x, x < 1), (x**2, True)))
s = \
"""\
if (x < 1) {
x
}
else {
pow(x, 2)
}\
"""
assert p == s
def test_ccode_Piecewise_deep():
p = ccode(2*Piecewise((x, x < 1), (x**2, True)))
s = \
"""\
2*((x < 1) ? (
x
)
: (
pow(x, 2)
) )\
"""
assert p == s
def test_ccode_settings():
raises(TypeError, lambda: ccode(sin(x), method="garbage"))
def test_ccode_Indexed():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o = symbols('n m o', integer=True)
i, j, k = Idx('i', n), Idx('j', m), Idx('k', o)
p = CCodePrinter()
p._not_c = set()
x = IndexedBase('x')[j]
assert p._print_Indexed(x) == 'x[j]'
A = IndexedBase('A')[i, j]
assert p._print_Indexed(A) == 'A[%s]' % (m*i+j)
B = IndexedBase('B')[i, j, k]
assert p._print_Indexed(B) == 'B[%s]' % (i*o*m+j*o+k)
assert p._not_c == set()
def test_ccode_Indexed_without_looking_for_contraction():
len_y = 5
y = IndexedBase('y', shape=(len_y,))
x = IndexedBase('x', shape=(len_y,))
Dy = IndexedBase('Dy', shape=(len_y-1,))
i = Idx('i', len_y-1)
e=Eq(Dy[i], (y[i+1]-y[i])/(x[i+1]-x[i]))
code0 = ccode(e.rhs, assign_to=e.lhs, contract=False)
assert code0 == 'Dy[i] = (y[%s] - y[i])/(x[%s] - x[i]);' % (i + 1, i + 1)
def test_ccode_loops_matrix_vector():
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (int i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' y[i] = x[j]*A[%s] + y[i];\n' % (i*n + j) +\
' }\n'
'}'
)
c = ccode(A[i, j]*x[j], assign_to=y[i])
assert c == s
def test_dummy_loops():
# the following line could also be
# [Dummy(s, integer=True) for s in 'im']
# or [Dummy(integer=True) for s in 'im']
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'for (int i_%(icount)i=0; i_%(icount)i<m_%(mcount)i; i_%(icount)i++){\n'
' y[i_%(icount)i] = x[i_%(icount)i];\n'
'}'
) % {'icount': i.label.dummy_index, 'mcount': m.dummy_index}
code = ccode(x[i], assign_to=y[i])
assert code == expected
def test_ccode_loops_add():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
z = IndexedBase('z')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (int i=0; i<m; i++){\n'
' y[i] = x[i] + z[i];\n'
'}\n'
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' y[i] = x[j]*A[%s] + y[i];\n' % (i*n + j) +\
' }\n'
'}'
)
c = ccode(A[i, j]*x[j] + x[i] + z[i], assign_to=y[i])
assert c == s
def test_ccode_loops_multiple_contractions():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s = (
'for (int i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' for (int k=0; k<o; k++){\n'
' for (int l=0; l<p; l++){\n'
' y[i] = y[i] + b[%s]*a[%s];\n' % (j*o*p + k*p + l, i*n*o*p + j*o*p + k*p + l) +\
' }\n'
' }\n'
' }\n'
'}'
)
c = ccode(b[j, k, l]*a[i, j, k, l], assign_to=y[i])
assert c == s
def test_ccode_loops_addfactor():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
c = IndexedBase('c')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s
|
ProjectSWGCore/NGECore2
|
scripts/mobiles/naboo/narglatch_sick.py
|
Python
|
lgpl-3.0
| 1,632
| 0.026961
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('narglatch_sick')
mobileTemplate.setLevel(21)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTempla
|
te.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Carnivore Meat")
mobileTemplate.setMeatAmount(60)
mobileTemplate.setHideType("Bristley Hide")
mobileTem
|
plate.setHideAmount(45)
mobileTemplate.setBoneType("Animal Bones")
mobileTemplate.setBoneAmount(40)
mobileTemplate.setSocialGroup("narglatch")
mobileTemplate.setAssistRange(2)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
mobileTemplate.setStalker(False)
templates = Vector()
templates.add('object/mobile/shared_narglatch_hue.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_claw_2')
attacks.add('bm_slash_2')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('narglatch_sick', mobileTemplate)
return
|
jesopo/bitbot
|
src/core_modules/cron.py
|
Python
|
gpl-2.0
| 2,510
| 0.00239
|
import datetime, time
from src import ModuleManager, utils
TIMESTAMP_BOUNDS = [
[0, 59],
[0, 23],
[1, 31],
[1, 12],
[0, 6],
]
class Module(ModuleManager.BaseModule):
def on_load(self):
now = datetime.datetime.utcnow()
next_minute = now.replace(second=0, microsecond=0)
next_minute += datetime.timedelta(minutes=1)
until = time.time()+((next_minute-now).total_seconds())
self.timers.add("cron", self._minute, 60, until)
def _minute(self, timer):
now = datetime.datetime.utcnow().replace(second=0, microsecond=0)
timer.redo()
timestamp = [now.minute, now.hour, now.day, now.month,
now.isoweekday()%7]
events = self.events.on("cron")
def _check(schedule):
return self._schedule_match(timestamp, schedule.split(" "))
event = events.make_event(schedule=_check)
for cron in events.get_hooks():
schedule = cron.get_kwarg("schedule", None)
if schedule and not _check(schedule):
continue
else:
cron.call(event)
def _schedule_match(self, timestamp, schedule):
items = enumerate(zip(timestamp, schedule))
for i, (timestamp_part, schedule_par
|
t) in items:
if not self._schedule_match_part(i, timestamp_part, schedule_part):
return False
return True
def _schedule_match_part(self, i, timestamp_part, schedule_part):
if "," in schedule_part:
for schedule_part in schedule_part.split(","):
if self._schedule_match_part(i, timestamp_part, schedule_part):
|
return True
elif "/" in schedule_part:
range_s, _, step = schedule_part.partition("/")
if "-" in range_s:
range_min, _, range_max = range_s.partition("-")
range_min = int(range_min)
range_max = int(range_max)
else:
range_min, range_max = TIMESTAMP_BOUNDS[i]
if (range_min <= timestamp_part <= range_max and
((timestamp_part-range_min)%int(step)) == 0):
return True
elif "-" in schedule_part:
left, right = schedule_part.split("-", 1)
return int(left) <= timestamp_part <= int(right)
elif schedule_part == "*":
return True
elif timestamp_part == int(schedule_part):
return True
return False
|
archen/django
|
django/core/urlresolvers.py
|
Python
|
bsd-3-clause
| 22,195
| 0.001532
|
"""
This module converts requested URLs to callback view functions.
RegexURLResolver is the main class here. Its resolve() method takes a URL (as
a string) and returns a tuple in this format:
(view_function, function_args, function_kwargs)
"""
from __future__ import unicode_literals
from importlib import import_module
import re
from threading import local
from django.http import Http404
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str, force_text, iri_to_uri
from django.utils.functional import lazy
from django.utils.http import urlquote
from django.utils.module_loading import module_has_submodule
from django.utils.regex_helper import normalize
from django.utils import six, lru_cache
from django.utils.translation import get_language
# SCRIPT_NAME prefixes for each thread are stored here. If there's no entry for
# the current thread (which is the only one we ever access), it is assumed to
# be empty.
_prefixes = local()
# Overridden URLconfs for each thread are stored here.
_urlconfs = local()
class ResolverMatch(object):
def __init__(self, func, args, kwargs, url_name=None, app_name=None, namespaces=None):
self.func = func
self.args = args
self.kwargs = kwargs
self.app_name = app_name
if namespaces:
self.namespaces = [x for x in namespaces if x]
else:
self.namespaces = []
if not url_name:
if not hasattr(func, '__name__'):
# An instance of a callable class
url_name = '.'.join([func.__class__.__module__, func.__class__.__name__])
else:
# A function
url_name = '.'.join([func.__module__, func.__name__])
self.u
|
rl_name = url_name
|
@property
def namespace(self):
return ':'.join(self.namespaces)
@property
def view_name(self):
return ':'.join(filter(bool, (self.namespace, self.url_name)))
def __getitem__(self, index):
return (self.func, self.args, self.kwargs)[index]
def __repr__(self):
return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name='%s', app_name='%s', namespace='%s')" % (
self.func, self.args, self.kwargs, self.url_name, self.app_name, self.namespace)
class Resolver404(Http404):
pass
class NoReverseMatch(Exception):
pass
@lru_cache.lru_cache(maxsize=None)
def get_callable(lookup_view, can_fail=False):
"""
Convert a string version of a function name to the callable object.
If the lookup_view is not an import path, it is assumed to be a URL pattern
label and the original string is returned.
If can_fail is True, lookup_view might be a URL pattern label, so errors
during the import fail and the string is returned.
"""
if not callable(lookup_view):
mod_name, func_name = get_mod_func(lookup_view)
if func_name == '':
return lookup_view
try:
mod = import_module(mod_name)
except ImportError:
parentmod, submod = get_mod_func(mod_name)
if (not can_fail and submod != '' and
not module_has_submodule(import_module(parentmod), submod)):
raise ViewDoesNotExist(
"Could not import %s. Parent module %s does not exist." %
(lookup_view, mod_name))
if not can_fail:
raise
else:
try:
lookup_view = getattr(mod, func_name)
if not callable(lookup_view):
raise ViewDoesNotExist(
"Could not import %s.%s. View is not callable." %
(mod_name, func_name))
except AttributeError:
if not can_fail:
raise ViewDoesNotExist(
"Could not import %s. View does not exist in module %s." %
(lookup_view, mod_name))
return lookup_view
@lru_cache.lru_cache(maxsize=None)
def get_resolver(urlconf):
if urlconf is None:
from django.conf import settings
urlconf = settings.ROOT_URLCONF
return RegexURLResolver(r'^/', urlconf)
@lru_cache.lru_cache(maxsize=None)
def get_ns_resolver(ns_pattern, resolver):
# Build a namespaced resolver for the given parent urlconf pattern.
# This makes it possible to have captured parameters in the parent
# urlconf pattern.
ns_resolver = RegexURLResolver(ns_pattern, resolver.url_patterns)
return RegexURLResolver(r'^/', [ns_resolver])
def get_mod_func(callback):
# Converts 'django.views.news.stories.story_detail' to
# ['django.views.news.stories', 'story_detail']
try:
dot = callback.rindex('.')
except ValueError:
return callback, ''
return callback[:dot], callback[dot + 1:]
class LocaleRegexProvider(object):
"""
A mixin to provide a default regex property which can vary by active
language.
"""
def __init__(self, regex):
# regex is either a string representing a regular expression, or a
# translatable string (using ugettext_lazy) representing a regular
# expression.
self._regex = regex
self._regex_dict = {}
@property
def regex(self):
"""
Returns a compiled regular expression, depending upon the activated
language-code.
"""
language_code = get_language()
if language_code not in self._regex_dict:
if isinstance(self._regex, six.string_types):
regex = self._regex
else:
regex = force_text(self._regex)
try:
compiled_regex = re.compile(regex, re.UNICODE)
except re.error as e:
raise ImproperlyConfigured(
'"%s" is not a valid regular expression: %s' %
(regex, six.text_type(e)))
self._regex_dict[language_code] = compiled_regex
return self._regex_dict[language_code]
class RegexURLPattern(LocaleRegexProvider):
def __init__(self, regex, callback, default_args=None, name=None):
LocaleRegexProvider.__init__(self, regex)
# callback is either a string like 'foo.views.news.stories.story_detail'
# which represents the path to a module and a view function name, or a
# callable object (view).
if callable(callback):
self._callback = callback
else:
self._callback = None
self._callback_str = callback
self.default_args = default_args or {}
self.name = name
def __repr__(self):
return force_str('<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern))
def add_prefix(self, prefix):
"""
Adds the prefix string to a string-based callback.
"""
if not prefix or not hasattr(self, '_callback_str'):
return
self._callback_str = prefix + '.' + self._callback_str
def resolve(self, path):
match = self.regex.search(path)
if match:
# If there are any named groups, use those as kwargs, ignoring
# non-named groups. Otherwise, pass all non-named arguments as
# positional arguments.
kwargs = match.groupdict()
if kwargs:
args = ()
else:
args = match.groups()
# In both cases, pass any extra_kwargs as **kwargs.
kwargs.update(self.default_args)
return ResolverMatch(self.callback, args, kwargs, self.name)
@property
def callback(self):
if self._callback is not None:
return self._callback
self._callback = get_callable(self._callback_str)
return self._callback
class RegexURLResolver(LocaleRegexProvider):
def __init__(self, regex, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
LocaleRegexProvider.__init__(self, regex)
# urlconf_name is a string representing the module contai
|
lunixbochs/fs-uae-gles
|
launcher/fs_uae_launcher/fsui/wx/separator.py
|
Python
|
gpl-2.0
| 394
| 0.010152
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute
|
_import
import wx
from .common import update_class
class Separator(wx.StaticLine):
def __init__(self, parent):
wx.StaticLine.__i
|
nit__(self, parent.get_container(), -1,
wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
update_class(Separator)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.