repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
varunmehta/photobooth
|
photobooth.py
|
Python
|
mit
| 12,204
| 0.002786
|
#!/usr/bin/env python
# created by chris@drumminhands.com
# modified by varunmehta
# see instructions at http://www.drumminhands.com/2014/06/15/raspberry-pi-photo-booth/
import atexit
import glob
import logging
import math
import os
import subprocess
import sys
import time
import traceback
from time import sleep
import RPi.GPIO as GPIO
import picamera # http://picamera.readthedocs.org/en/release-1.4/install2.html
import pygame
from pygame.locals import QUIT, KEYDOWN, K_ESCAPE
import config # this is the config python file config.py
########################
### Variables Config ###
########################
led_pin = 17 # LED
btn_pin = 2 # pin for the start button
total_pics = 2 # number of pics to be taken
capture_delay = 1 # delay between pics
prep_delay = 3 # number of seconds at step 1 as users prep to have photo taken
restart_delay = 3 # how long to display finished message before beginning a new session
# full frame of v1 camera is 2592x1944. Wide screen max is 2592,1555
# if you run into resource issues, try smaller, like 1920x1152.
# or increase memory http://picamera.readthedocs.io/en/release-1.12/fov.html#hardware-limits
high_res_w = 1190 # width of high res image, if taken
high_res_h = 790 # height of high res image, if taken
#############################
### Variables that Change ###
#############################
# Do not change these variables, as the code will change it anyway
transform_x = config.monitor_w # how wide to scale the jpg when replaying
transfrom_y = config.monitor_h # how high to scale the jpg when replaying
offset_x = 0 # how far off to left corner to display photos
offset_y = 0 # how far off to left corner to display photos
replay_delay = 1 # how much to wait in-between showing pics on-screen after taking
replay_cycles = 1 # how many times to show each photo on-screen after taking
####################
### Other Config ###
####################
real_path = os.path.dirname(os.path.realpath(__file__))
# GPIO setup
GPIO.setmode(GPIO.BCM)
GPIO.setup(led_pin, GPIO.OUT) # LED
GPIO.setup(btn_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.output(led_pin, False) # for some reason the pin turns on at the beginning of the program. Why?
# initialize pygame
pygame.init()
pygame.display.set_mode((config.monitor_w, config.monitor_h))
screen = pygame.display.get_surface()
pygame.display.set_caption('Photo Booth Pics')
pygame.mouse.set_visible(False) # hide the mouse cursor
pygame.display.toggle_fullscreen()
# init logging
logging.basicConfig(format='%(asctime)s %(message)s', filename='photobooth.log', level=logging.INFO)
#################
### Functions ###
#################
# clean up running programs as needed when main program exits
def cleanup():
logging.critical('Ended abruptly')
pygame.quit()
GPIO.cleanup()
atexit.register(cleanup)
# A function to handle keyboard/mouse/device input events
def input(events):
for event in events: # Hit the ESC key to quit the slideshow.
if (event.type == QUIT or
(event.type == KEYDOWN and event.key == K_ESCAPE)):
pygame.quit()
# delete files in folder
def clear_pics(channel):
files = glob.glob(config.file_path + '*')
for f in files:
os.remove(f)
# light the lights in series to show completed
logging.warning("Deleted previous pics")
for x in range(0, 3): # blink light
GPIO.output(led_pin, True)
sleep(0.25)
GPIO.output(led_pin, False)
sleep(0.25)
def init_event_folders():
if (not os.path.exists(config.file_path)):
os.mkdir(config.file_path)
os.mkdir(config.file_path + "/final")
logging.info("Initalized event folder")
# set variables to properly display the image on screen at right ratio
def set_dimensions(img_w, img_h):
# Note this only works when in booting in desktop mode.
# When running in terminal, the size is not correct (it displays small). Why?
# connect to global vars
global transform_y, transform_x, offset_y, offset_x
# based on output screen resolution, calculate how to display
ratio_h = (config.monitor_w * img_h) / img_w
if (ratio_h < config.monitor_h):
# Use horizontal black bars
# print("horizontal black bars")
transform_y = ratio_h
transform_x = config.monitor_w
offset_y = (config.monitor_h - ratio_h) / 2
offset_x = 0
elif (ratio_h > config.monitor_h):
# Use vertical black bars
# print("vertical black bars")
transform_x = (config.monitor_h * img_w) / img_h
transform_y = config.monitor_h
offset_x = (config.monitor_w - transform_x) / 2
offset_y = 0
else:
# No need for black bars as photo ratio equals screen ratio
# print("no black bars")
transform_x = config.monitor_w
transform_y = config.monitor_h
offset_y = offset_x = 0
# Ceil and floor floats to integers
transform_x = math.ceil(transform_x)
transform_y = math.ceil(transform_y)
offset_x = math.floor(offset_x)
offset_y = math.floor(offset_y)
# uncomment these lines to troubleshoot screen ratios
# print(str(img_w) + " x " + str(img_h))
# print("ratio_h: " + str(ratio_h))
# print("transform_x: " + str(transform_x))
# print("transform_y: " + str(transform_y))
# print("offset_y: " + str(offset_y))
# print("offset_x: " + str(offset_x))
# display one image on screen
def show_image(image_path):
# print(" Displaying... " + image_path)
# clear the screen
screen.fill((0, 0, 0))
# load the image
img = pygame.image.load(image_path)
img = img.convert()
# set pixel dimensions based on image
set_dimensions(img.get_width(), img.get_height())
# rescale the image to fit the current display
img = pygame.transform.scale(img, (transform_x, transfrom_y))
screen.blit(img, (offset_x, offset_y))
pygame.display.flip()
# display a blank screen
def clear_screen():
screen.fill((0, 0, 0))
pygame.display.flip()
# display a group of images
def display_pics(jpg_group):
for i in range(0, replay_cycles): # show pics a few times
for i in range(1, total_pics + 1): # show each pic
show_image(config.file_path + jpg_group + "-0" + str(i) + ".jpg")
time.sleep(replay_delay) # pause
# define the photo taking function for when the big button is pressed
def start_photobooth():
input(pygame.event.get()) # press escape to exit pygame. Then press ctrl-c to exit
|
python.
################################# Begin Step 1 #################################
logging.info("Get Ready")
GPIO.output(led_pin, False)
show_image(real_path + "/instructions.png")
sleep(prep_delay)
# clear the screen
clear_screen()
camera = picamera.PiCamera()
camera.vflip = False
camera.hflip = True # flip for preview, showing users a mirror image
camera.rotation = 0 # revisit this depending upon final camera placem
|
ent
# camera.saturation = -100 # comment out this line if you want color images
# camera.iso = config.camera_iso
camera.resolution = (high_res_w, high_res_h) # set camera resolution to high res
################################# Begin Step 2 #################################
logging.info("Starting to take pics...")
# All images will be number appended by now, 20160310113034-01.jpg
now = time.strftime("%Y%m%d-%H%M%S") # get the current date and time for the start of the filename
montage_img = now + "-" + config.event_name + ".jpg" # montage file name
if config.capture_count_pics:
logging.debug("Decided to go count pics")
try: # take the photos
for i in range(1, total_pics + 1):
show_image(real_path + "/pose" + str(i) + ".png")
time.sleep(capture_delay) # pause in-between shots
clear_screen()
camera.hflip = True # preview a mirror image
camera.start_preview(
resolution=(high_res_w, high_res_h)) # start preview at low res but the right ratio
tim
|
ThomasGerstenberg/serial_monitor
|
stream/__init__.py
|
Python
|
bsd-3-clause
| 551
| 0
|
from serial_settings import SerialSettings
class AbstractStream(object):
def __init__(self, config, name):
"""
:type name: str
"""
self.confi
|
g = config
self.name = name
def open(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
def read(self, num_bytes=1):
raise NotImplementedError
def write(self, data):
raise NotImplementedErro
|
r
def reconfigure(self, config):
raise NotImplementedError
|
google/google-ctf
|
third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/pickle2db.py
|
Python
|
apache-2.0
| 4,089
| 0.000734
|
#!/usr/bin/env python
"""
Synopsis: %(prog)s [-h|-b|-g|-r|-a|-d] [ picklefile ] dbfile
Read the given picklefile as a series of key/value pairs and write to a new
database. If the database already exists, any contents are deleted. The
optional flags indicate the type of the output database:
-a - open using anydbm
-b - open as bsddb btree file
-d - open as dbm file
-g - open as gdbm file
-h - open as bsddb hash file
-r - open as bsddb recno file
The default is hash. If a pickle file is named it is opened for read
access. If no pickle file is named, the pickle input is read from standard
input.
Note that recno databases can only contain integer keys, so you can't dump a
hash or btree database using db2pickle.py and reconstitute it to a recno
database with %(prog)s unless your keys are integers.
"""
import getopt
try:
import bsddb
except ImportError:
bsddb = None
try:
import dbm
except ImportError:
dbm = None
try:
import gdbm
except ImportError:
gdbm = None
try:
import anydbm
except ImportError:
anydbm = None
import sys
try:
import cPickle as pickle
except ImportError:
import pickle
prog = sys.argv[0]
def usage():
sys.stderr.write(__doc__ % globals())
def main(args):
try:
opts, args = getopt.getopt(args, "hbrdag",
["hash", "btree", "recno", "dbm", "anydbm",
"gdbm"])
except getopt.error:
usage()
return 1
if len(args) == 0 or len(args) > 2:
usage()
return 1
elif len(args) == 1:
pfile = sys.stdin
dbfile = args[0]
else:
try:
pfile = open(args[0], 'rb')
except IOError:
sys.stderr.write("Unable to open %s\n" % args[0])
return 1
dbfile = args[1]
dbopen = None
for opt, arg in opts:
if opt in ("-h", "--hash"):
try:
dbopen = bsddb.hashopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt in ("-b", "--btree"):
try:
dbopen = bsddb.btopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt in ("-r", "--recno"):
try:
dbopen = bsddb.rnopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt
|
in ("-a", "--anydbm"):
try:
dbopen = anydbm.open
except AttributeError:
sys.stderr.write("anydbm module unavailable.\n")
return 1
elif opt in ("-g", "--gdbm"):
try:
dbopen = gdbm.open
except AttributeError:
sys.stderr.write("gdbm module unava
|
ilable.\n")
return 1
elif opt in ("-d", "--dbm"):
try:
dbopen = dbm.open
except AttributeError:
sys.stderr.write("dbm module unavailable.\n")
return 1
if dbopen is None:
if bsddb is None:
sys.stderr.write("bsddb module unavailable - ")
sys.stderr.write("must specify dbtype.\n")
return 1
else:
dbopen = bsddb.hashopen
try:
db = dbopen(dbfile, 'c')
except bsddb.error:
sys.stderr.write("Unable to open %s. " % dbfile)
sys.stderr.write("Check for format or version mismatch.\n")
return 1
else:
for k in db.keys():
del db[k]
while 1:
try:
(key, val) = pickle.load(pfile)
except EOFError:
break
db[key] = val
db.close()
pfile.close()
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
How2Compute/SmartHome
|
cli/demo2.py
|
Python
|
mit
| 1,643
| 0.008521
|
# Import time (for delay) library (for SmartHome api) and GPIO (for raspberry pi gpio)
from library import SmartHomeApi
import RPi.GPIO as GPIO
import time
from datetime import datetime
# 7 -> LED
# Create the client with pre-existing credentials
api = SmartHomeApi("http://localhost:5000/api/0.1", id=10, api_key="api_eMxSb7n6G10Svojn3PlU5P6srMaDrFxmKAnWvnW6UyzmBG")
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.OUT)
last_status = "UNKNOWN"
while True:
preferences = api.GetUserPrefences(2)['results']
print(preferences)
preference = (item for item in preferences if item["key"] == "bedtime").next()
if not preference:
print("Could not fin 'bedtime' preference!")
api.AddPreference(2, "bedtime", "00:00")
print("Created bedtim
|
e preference!
|
Please set it to the correct value in your dashboard")
else:
bedtime = preference['value']
if not bedtime:
print("Unexpected error occured!")
else:
print(bedtime)
time_str = datetime.now().strftime('%H:%M')
print("time: {}".format(time_str))
bedtime_dt = datetime.strptime(bedtime, "%H:%M")
time_hm = datetime.strptime(time_str, "%H:%M")
if time_hm >= bedtime_dt:
print("Going to bed! Currently: {}, going to bed at {}".format(time_str, bedtime))
GPIO.output(7, GPIO.LOW)
else:
print("Not yet time for bed. Currently: {}, going to bed at {}".format(time_str, bedtime))
GPIO.output(7, GPIO.HIGH)
time.sleep(1)
|
shabab12/edx-platform
|
lms/djangoapps/ccx/tests/test_overrides.py
|
Python
|
agpl-3.0
| 7,563
| 0.001587
|
# coding=UTF-8
"""
tests for overrides
"""
import datetime
import mock
import pytz
from nose.plugins.attrib import attr
from ccx_keys.locator import CCXLocator
from courseware.courses import get_course_by_id
from courseware.field_overrides import OverrideFieldData
from courseware.testutils import FieldOverrideTestMixin
from django.test.utils import override_settings
from lms.djangoapps.courseware.tests.test_field_overrides import inject_field_overrides
from request_cache.middleware import RequestCache
from student.tests.factories import AdminFactory
from xmodule.modulestore.tests.django_utils import (
SharedModuleStoreTestCase,
TEST_DATA_SPLIT_MODULESTORE)
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from lms.djangoapps.ccx.models import CustomCourseForEdX
from lms.djangoapps.ccx.overrides import override_field_for_ccx
from lms.djangoapps.ccx.tests.utils import flatten, iter_blocks
@attr('shard_1')
@override_settings(
XBLOCK_FIELD_DATA_WRAPPERS=['lms.djangoapps.courseware.field_overrides:OverrideModulestoreFieldData.wrap'],
MODULESTORE_FIELD_OVERRIDE_PROVIDERS=['ccx.overrides.CustomCoursesForEdxOverrideProvider'],
)
class TestFieldOverrides(FieldOverrideTestMixin, SharedModuleStoreTestCase):
"""
Make sure field overrides behave in the expected manner.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
"""
Course is created here and shared by all the class's tests.
"""
super(TestFieldOverrides, cls).setUpClass()
cls.course = CourseFactory.create()
cls.course.enable_ccx = True
# Create a course outline
start = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=pytz.UTC)
due = datetime.datetime(2010, 7, 7, 0, 0, tzinfo=pytz.UTC)
chapters = [ItemFactory.create(start=start, parent=cls.course)
for _ in xrange(2)]
sequentials = flatten([
[ItemFactory.create(parent=chapter) for _ in xrange(2)]
for chapter in chapters])
verticals = flatten([
[ItemFactory.create(due=due, parent=sequential) for _ in xrange(2)]
for sequential in sequentials])
blocks = flatten([ # pylint: disable=unused-variable
[ItemFactory.create(parent=vertical) for _ in xrange(2)]
for vertical in verticals])
def setUp(self):
"""
Set up tests
"""
super(TestFieldOverrides, self).setUp()
self.ccx = ccx = CustomCourseForEdX(
course_id=self.course.id,
display_name='Test CCX',
coach=AdminFactory.create())
ccx.save()
patch = mock.patch('ccx.overrides.get_current_ccx')
self.get_ccx = get_ccx = patch.start()
get_ccx.return_value = ccx
self.addCleanup(patch.stop)
self.addCleanup(RequestCache.clear_request_cache)
inject_field_overrides(iter_blocks(ccx.course), self.course, AdminFactory.create())
self.ccx_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
self.ccx_course = get_course_by_id(self.ccx_key, depth=None)
def cleanup_provider_classes():
"""
After everything is done, clean up by un-doing the change to the
OverrideFieldData object that is done during the wrap method.
"""
OverrideFieldData.provider_classes = None
self.addCleanup(cleanup_provider_classes)
def test_override_start(self):
"""
Test that overriding start date on a chapter works.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx_course.get_children()[0]
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
self.assertEquals(chapter.start, ccx_start)
def test_override_num_queries_new_field(self):
"""
Test that for creating new field executed only create query
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx_course.get_children()[0]
# One outer SAVEPOINT/RELEASE SAVEPOINT pair around everything caused by the
# transaction.atomic decorator wrapping override_field_for_ccx.
# One SELECT and one INSERT.
# One inner SAVEPOINT/RELEAS
|
E SAVEPOINT pair around the INSERT caused by the
# transaction.atomic down in Django's get_or_create()/_create_object_from_params().
with self.assertNumQueries(6):
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
|
def test_override_num_queries_update_existing_field(self):
"""
Test that overriding existing field executed create, fetch and update queries.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
new_ccx_start = datetime.datetime(2015, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx_course.get_children()[0]
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
with self.assertNumQueries(3):
override_field_for_ccx(self.ccx, chapter, 'start', new_ccx_start)
def test_override_num_queries_field_value_not_changed(self):
"""
Test that if value of field does not changed no query execute.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx_course.get_children()[0]
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
with self.assertNumQueries(2): # 2 savepoints
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
def test_overriden_field_access_produces_no_extra_queries(self):
"""
Test no extra queries when accessing an overriden field more than once.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx_course.get_children()[0]
# One outer SAVEPOINT/RELEASE SAVEPOINT pair around everything caused by the
# transaction.atomic decorator wrapping override_field_for_ccx.
# One SELECT and one INSERT.
# One inner SAVEPOINT/RELEASE SAVEPOINT pair around the INSERT caused by the
# transaction.atomic down in Django's get_or_create()/_create_object_from_params().
with self.assertNumQueries(6):
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
def test_override_is_inherited(self):
"""
Test that sequentials inherit overridden start date from chapter.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx_course.get_children()[0]
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
self.assertEquals(chapter.get_children()[0].start, ccx_start)
self.assertEquals(chapter.get_children()[1].start, ccx_start)
def test_override_is_inherited_even_if_set_in_mooc(self):
"""
Test that a due date set on a chapter is inherited by grandchildren
(verticals) even if a due date is set explicitly on grandchildren in
the mooc.
"""
ccx_due = datetime.datetime(2015, 1, 1, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx_course.get_children()[0]
chapter.display_name = 'itsme!'
override_field_for_ccx(self.ccx, chapter, 'due', ccx_due)
vertical = chapter.get_children()[0].get_children()[0]
self.assertEqual(vertical.due, ccx_due)
|
h0ke/pynt
|
pynt/pynt.py
|
Python
|
mit
| 290
| 0
|
"""
Pynt is a Python client that wraps the Open Beer Database API.
Questions, comments? m@h0ke.com
"""
__author__ = "Matthew Ho
|
kanson <m@h0ke.com>"
__version__ = "0.2.0"
from beer import Beer
from brewery import Brewery
from
|
request import Request
from settings import Settings
|
purduesigbots/purdueros-cli
|
proscli/flasher.py
|
Python
|
bsd-3-clause
| 8,643
| 0.003934
|
import click
import os
import os.path
import ntpath
import serial
import sys
import prosflasher.ports
import prosflasher.upload
import prosconfig
from proscli.utils import default_cfg, AliasGroup
from proscli.utils import get_version
@click.group(cls=AliasGroup)
def flasher_cli():
pass
@flasher_cli.command(short_help='Upload binaries to the microcontroller.', aliases=['upload'])
@click.option('-sfs/-dfs', '--save-file-system/--delete-file-system', is_flag=True, default=False,
help='Specify whether or not to save the file system when writing to the Cortex. Saving the '
'file system takes more time.')
@click.option('-y', is_flag=True, default=False,
help='Automatically say yes to all confirmations.')
@click.option('-f', '-b', '--file', '--binary', default='default', metavar='FILE',
help='Specifies a binary file, project directory, or project config file.')
@click.option('-p', '--port', default='auto', metavar='PORT', help='Specifies the serial port.')
@click.option('--no-poll', is_flag=True, default=False)
@click.option('-r', '--retry', default=2,
help='Specify the number of times the flasher should retry the flash when it detects a failure'
' (default two times).')
@default_cfg
# @click.option('-m', '--strategy', default='cortex', metavar='STRATEGY',
# help='Specify the microcontroller upload strategy. Not currently used.')
def flash(ctx, save_file_system, y, port, binary, no_poll, retry):
"""Upload binaries to the microcontroller. A serial port and binary file need to be specified.
By default, the port is automatically selected (if you want to be pedantic, 'auto').
Otherwise, a system COM port descriptor needs to be used. In Windows/NT, this takes the form of COM1.
In *nx systems, this takes the form of /dev/tty1 or /dev/acm1 or similar.
\b
Specifying 'all' as the COM port will automatically upload to all available microcontrollers.
By default, the CLI will look around for a proper binary to upload to the microcontroller. If one was not found, or
if you want to change the default binary, you can specify it.
"""
click.echo(' ====:: PROS Flasher v{} ::===='.format(get_version()))
if port == 'auto':
ports = prosflasher.ports.list_com_ports()
if len(ports) == 0:
click.echo('No microcontrollers were found. Please plug in a cortex or manually specify a serial port.\n',
err=True)
click.get_current_context().abort()
sys.exit(1)
port = ports[0].device
if len(ports) > 1 and port is not None and y is False:
port = None
for p in ports:
if click.confirm('Download to ' + p.device, default=True):
|
port = p.device
break
if port is None:
click.echo('No additional ports found.')
click.get_current_context().abort()
sys.exit(1)
i
|
f port == 'all':
port = [p.device for p in prosflasher.ports.list_com_ports()]
if len(port) == 0:
click.echo('No microcontrollers were found. Please plug in a cortex or manually specify a serial port.\n',
err=True)
click.get_current_context().abort()
sys.exit(1)
if y is False:
click.confirm('Download to ' + ', '.join(port), default=True, abort=True, prompt_suffix='?')
else:
port = [port]
if binary == 'default':
binary = os.getcwd()
if ctx.verbosity > 3:
click.echo('Default binary selected, new directory is {}'.format(binary))
binary = find_binary(binary)
if binary is None:
click.echo('No binary was found! Ensure you are in a built PROS project (run make) '
'or specify the file with the -f flag',
err=True)
click.get_current_context().exit()
if ctx.verbosity > 3:
click.echo('Final binary is {}'.format(binary))
click.echo('Flashing ' + binary + ' to ' + ', '.join(port))
for p in port:
tries = 1
code = prosflasher.upload.upload(p, y, binary, no_poll, ctx)
while tries <= retry and (not code or code == -1000):
click.echo('Retrying...')
code = prosflasher.upload.upload(p, y, binary, no_poll, ctx)
tries += 1
def find_binary(path):
"""
Helper function for finding the binary associated with a project
The algorithm is as follows:
- if it is a file, then check if the name of the file is 'pros.config':
- if it is 'pros.config', then find the binary based off the pros.config value (or default 'bin/output.bin')
- otherwise, can only assume it is the binary file to upload
- if it is a directory, start recursively searching up until 'pros.config' is found. max 10 times
- if the pros.config file was found, find binary based off of the pros.config value
- if no pros.config file was found, start recursively searching up (from starting path) until a directory
named bin is found
- if 'bin' was found, return 'bin/output.bin'
:param path: starting path to start the search
:param ctx:
:return:
"""
# logger = logging.getLogger(ctx.log_key)
# logger.debug('Finding binary for {}'.format(path))
if os.path.isfile(path):
if ntpath.basename(path) == 'pros.config':
pros_cfg = prosconfig.ProjectConfig(path)
return os.path.join(path, pros_cfg.output)
return path
elif os.path.isdir(path):
try:
cfg = prosconfig.ProjectConfig(path, raise_on_error=True)
if cfg is not None and os.path.isfile(os.path.join(cfg.directory, cfg.output)):
return os.path.join(cfg.directory, cfg.output)
except prosconfig.ConfigNotFoundException:
search_dir = path
for n in range(10):
dirs = [d for d in os.listdir(search_dir)
if os.path.isdir(os.path.join(path, search_dir, d)) and d == 'bin']
if len(dirs) == 1: # found a bin directory
if os.path.isfile(os.path.join(path, search_dir, 'bin', 'output.bin')):
return os.path.join(path, search_dir, 'bin', 'output.bin')
search_dir = ntpath.split(search_dir)[:-1][0] # move to parent dir
return None
@flasher_cli.command('poll', short_help='Polls a microcontroller for its system info')
@click.option('-y', '--yes', is_flag=True, default=False,
help='Automatically say yes to all confirmations.')
@click.argument('port', default='all')
@default_cfg
def get_sys_info(cfg, yes, port):
if port == 'auto':
ports = prosflasher.ports.list_com_ports()
if len(ports) == 0:
click.echo('No microcontrollers were found. Please plug in a cortex or manually specify a serial port.\n',
err=True)
sys.exit(1)
port = prosflasher.ports.list_com_ports()[0].device
if port is not None and yes is False:
click.confirm('Poll ' + port, default=True, abort=True, prompt_suffix='?')
if port == 'all':
port = [p.device for p in prosflasher.ports.list_com_ports()]
if len(port) == 0:
click.echo('No microcontrollers were found. Please plug in a cortex or manually specify a serial port.\n',
err=True)
sys.exit(1)
else:
port = [port]
for p in port:
sys_info = prosflasher.upload.ask_sys_info(prosflasher.ports.create_serial(p, serial.PARITY_EVEN), cfg)
click.echo(repr(sys_info))
pass
@flasher_cli.command(short_help='List connected microcontrollers')
@default_cfg
def lsusb(cfg):
if len(prosflasher.ports.list_com_ports()) == 0 or prosflasher.ports.list_co
|
stvstnfrd/edx-platform
|
common/lib/xmodule/xmodule/modulestore/split_mongo/mongo_connection.py
|
Python
|
agpl-3.0
| 23,649
| 0.002241
|
"""
Segregation of pymongo functions from the data modeling mechanisms for split modulestore.
"""
import datetime
import logging
import math
import re
import zlib
from contextlib import contextmanager
from time import time
import pymongo
import pytz
import six
from six.moves import cPickle as pickle
from contracts import check, new_contract
from mongodb_proxy import autoretry_read
# Import this just to export it
from pymongo.errors import DuplicateKeyError # pylint: disable=unused-import
from xmodule.exceptions import HeartbeatFailure
from xmodule.modulestore import BlockData
from xmodule.modulestore.split_mongo import BlockKey
from xmodule.mongo_utils import connect_to_mongodb, create_collection_index
try:
from django.core.cache import caches, InvalidCacheBackendError
DJANGO_AVAILABLE = True
except ImportError:
DJANGO_AVAILABLE = False
new_contract('BlockData', BlockData)
log = logging.getLogger(__name__)
def get_cache(alias):
"""
Return cache for an `alias`
Note: The primary purpose of this is to mock the cache in test_split_modulestore.py
"""
return caches[alias]
def round_power_2(value):
"""
Return value rounded up to the nearest power of 2.
"""
if value == 0:
return 0
return math.pow(2, math.ceil(math.log(value, 2)))
class Tagger(object):
"""
An object used by :class:`QueryTimer` to allow timed code blocks
to add measurements and tags to the timer.
"""
def __init__(self, default_sample_rate):
self.added_tags = []
self.measures = []
self.sample_rate = default_sample_rate
def measure(self, name, size):
"""
Record a measurement of the timed data. This would be something to
indicate the size of the value being timed.
Arguments:
name: The name of the measurement.
size (float): The size of the measurement.
"""
self.measures.append((name, size))
def tag(self, **kwargs):
"""
Add tags to the timer.
Arguments:
**kwargs: Each keyword is treated as a tag name, and the
value of the argument is the tag value.
"""
self.added_tags.extend(list(kwargs.items()))
@property
def tags(self):
"""
Return all tags for this (this includes any tags added with :meth:`tag`,
and also all of the added measurements, bucketed into powers of 2).
"""
return [
'{}:{}'.format(name, round_power_2(size))
for name, size in self.measures
] + [
'{}:{}'.format(name, value)
for name, value in self.added_tags
]
class QueryTimer(object):
"""
An object that allows timing a block of code while also recording measurements
about that code.
"""
def __init__(self, metric_base, sample_rate=1):
"""
Arguments:
metric_base: The prefix to be used for all queries captured
with this :class:`QueryTimer`.
"""
self._metric_base = metric_base
self._sample_rate = sample_rate
@contextmanager
def timer(self, metric_name, course_context):
"""
Contextmanager which acts as a timer for the metric ``metric_name``,
but which also yields a :class:`Tagger` object that allows the timed block
of code to add tags and quantity measurements. Tags are added verbatim to the
timer output. Measurements are recorded as histogram measurements in their own,
and also as bucketed tags on the timer measurement.
Arguments:
metric_name: The name used to aggregate all of these metrics.
course_context: The course which the query is being made for.
"""
tagger = Tagger(self._sample_rate)
metric_name = "{}.{}".format(self._metric_base, metric_name)
start = time() # lint-amnesty, pylint: disable=unused-variable
try:
yield tagger
finally:
end = time() # lint-amnesty, pylint: disable=unused-variable
tags = tagger.tags
tags.append('course:{}'.format(course_context))
TIMER = QueryTimer(__name__, 0.01)
def structure_from_mongo(structure, course_context=None):
"""
Converts the 'blocks' key from a list [block_data] to a map
{BlockKey: block_data}.
Converts 'root' from [block_type, block_id] to BlockKey.
Converts 'blocks.*.fields.children' from [[block_type, block_id]] to [BlockKey].
N.B. Does not convert any other ReferenceFields (because we don't know which fields they are at this level).
Arguments:
structure: The document structure to convert
course_context (CourseKey): For metrics gathering, the CourseKey
for the course that this data is being processed for.
"""
with TIMER.timer('structure_from_mongo', course_context) as tagger:
tagger.measure('blocks', len(structure['blocks']))
check('seq[2]', structure['root'])
check('list(dict)', structure['blocks'])
for block in structure['blocks']:
if 'children' in block['fields']:
check('list(list[2])', block['fields']['children'])
structure['root'] = BlockKey(*structure['root'])
new_blocks = {}
for block in structure['blocks']:
if 'children' in block['fields']:
block['fields']['children'] = [BlockKey(*child) for child in block['fields']['children']]
new_blocks[BlockKey(block['block_type'], block.pop('block_id'))] = BlockData(**block)
structure['blocks'] = new_blocks
return structure
def structure_to_mongo(structure, course_context=None):
"""
Converts the 'blocks' key from a map {BlockKey: block_data} to
a list [block_data], inserting BlockKey.type as 'block_type'
and BlockKey.id as 'block_id'.
Doesn't convert 'root', since namedtuple's can be inserted
directly into mongo.
"""
with TIMER.timer('structure_to_mongo', course_context) as tagger:
tagger.measure('blocks', len(structure['blocks']))
check('BlockKey', structure['root'])
check('dict(BlockKey: BlockData)', structure['blocks'])
for block in six.itervalues(structure['blocks']):
if 'children' in block.fields:
check('list(BlockKey)', block.fields['children'])
new_structure = dict(structure)
new_structure['blocks'] = []
for block_key, block in six.iteritems(structure['b
|
locks']
|
):
new_block = dict(block.to_storable())
new_block.setdefault('block_type', block_key.type)
new_block['block_id'] = block_key.id
new_structure['blocks'].append(new_block)
return new_structure
class CourseStructureCache(object):
"""
Wrapper around django cache object to cache course structure objects.
The course structures are pickled and compressed when cached.
If the 'course_structure_cache' doesn't exist, then don't do anything for
for set and get.
"""
def __init__(self):
self.cache = None
if DJANGO_AVAILABLE:
try:
self.cache = get_cache('course_structure_cache')
except InvalidCacheBackendError:
pass
def get(self, key, course_context=None):
"""Pull the compressed, pickled struct data from cache and deserialize."""
if self.cache is None:
return None
with TIMER.timer("CourseStructureCache.get", course_context) as tagger:
try:
compressed_pickled_data = self.cache.get(key)
tagger.tag(from_cache=str(compressed_pickled_data is not None).lower())
if compressed_pickled_data is None:
# Always log cache misses, because they are unexpected
tagger.sample_rate = 1
return None
tagger.measure('compressed_size', len(compressed_pickled_data))
pickled_data = zlib.decompress(compressed_pickled_data)
tagger.measure('uncom
|
kylebegovich/ICIdo
|
mainsite/temp.py
|
Python
|
mit
| 138
| 0.007246
|
from models import *
donation = Donation()
donor = Donor
|
()
donor.first_name = "FirstName"
donor.last_name = "LastName"
|
print(donor)
|
Sh1n/AML-ALL-classifier
|
main.py
|
Python
|
gpl-2.0
| 5,647
| 0.015583
|
import Orange
import logging
import random
from discretization import *
from FeatureSelector import *
from utils import *
from sklearn import svm
from sklearn import cross_validation
from sklearn.metrics import f1_score, precision_recall_fscore_support
from sklearn.feature_extraction import DictVectorizer
import numpy as np
# Vars
testsetPercentage = .2
validationsetPercentage = .3
progress = False
baseline = .9496
# Utilities
logging.basicConfig(filename='main.log',level=logging.DEBUG,format='%(levelname)s\t%(message)s')
def logmessage(message, color):
print color(message)
logging.info(message)
def copyDataset(dataset):
return Orange.data.Table(dataset)
# Compute S Threshold
# =============================================================================
boxmessage("Start", warning)
data = Orange.data.Table("dataset.tab")
data.randomGenerator = Orange.orange.RandomGenerator(random.randint(0, 10))
logmessage("Main Dataset Loaded", success)
# =============================================================================
# Extracts Test Set
boxmessage("Extracting Test Set and Working Set", info)
testSet = None
workingSet = None
if progress:
try:
with open("finaltestset.tab"):
logmessage("Final Test Set found", info)
with open("trainingset.tab"):
logmessage("Working Set found", info)
testSet = Orange.data.Table("finaltestset.tab")
workingSet = Orange.data.Table("trainingset.tab")
except IOError:
logmessage("IOError in loading final and working sets", error)
pass
else:
selection = Orange.orange.MakeRandomIndices2(data, testsetPercentage)
testSet = data.select(selection, 0)
testSet.save("finaltestset.tab")
workingSet = data.select(selection, 1)
workingSet.save("workingset.tab")
print success("Extraction performed")
print info("Test Instances: %s" % len(testSet))
print info("Training + Validation Instances: %s" % len(workingSet))
# =============================================================================
# Starts Iterations
K = 1
S = 0
C = 0
boxmessage("Starting main Loop", info)
#while(performanceIncrease):
# Split
if not progress:
info("Splitting Working Dataset for training and validation (70-30)")
selection = Orange.orange.MakeRandomIndices2(workingSet, validationsetPercentage)
validationSet = workingSet.select(selection, 0)
trainingSet = workingSet.select(selection, 1)
trainingSet.save("trainingset.tab")
validationSet.save("validationset.tab")
else:
validationSet = Orange.data.Table("validationset.tab")
trainingSet = Orange.data.Table("trainingset.tab")
# Discretization
ds = Discretizer(trainingSet, K, logging)
if progress:
try:
with open("discretizer.K.gains"):
print info("Loading Previous Iteration")
ds.load()
except IOError:
logmessage("IOError in loading found gains", error)
pass
else:
ds.findThresholds()
if progress:
try:
with open("discretized.tab"):
trainingSet = Orange.data.Table("discretized.tab")
print info("Discretized Dataset Loaded")
except IOError:
logmessage("IOError in loading discretized training dataset", error)
else:
trainingSet = ds.discretizeDataset(trainingSet)
trainingSet.save("discretized.tab")
# ============================================================================ #
# Feature Selection
fs = FeatureSelector()
if progress:
try:
with open("featureselected.tab"):
trainingSet = Orange.data.Table("featureselected.tab")
print info("Features Selected Dataset Loaded")
except IOError:
fs.computeThreshold(trainingSet)
fs.save()
trainingSet = fs.select(trainingSet)
trainingSet.save("featureselected.tab")
print info("New training dataset is %s" %len(trainingSet))
print info("New training dataset features are %s" % len(trainingSet[0]))
# Model Training
|
# Convert Train Dataset
# Apply transformation, from labels to you know what I mean
converted_train_data = ([
|
[ d[f].value for f in trainingSet.domain if f != trainingSet.domain.class_var] for d in trainingSet])
converted_train_data = [dict(enumerate(d)) for d in converted_train_data]
vector = DictVectorizer(sparse=False)
converted_train_data = vector.fit_transform(converted_train_data)
converted_train_targets = ([ 0 if d[trainingSet.domain.class_var].value == 'ALL' else 1 for d in trainingSet ])
clf = svm.SVC(kernel='linear')
clf.fit(converted_train_data, converted_train_targets)
logmessage("Model learnt", success)
# Performances
# Apply Discretization and feature selection to validation set
validationSet = ds.discretizeDataset(validationSet)
validationSet = fs.select(validationSet)
logmessage("Validation set length is %s" % len(validationSet), info)
logmessage("Validation feature length is %s" % len(validationSet[0]), info)
# Convert Test Dataset
converted_test_data = ([[ d[f].value for f in validationSet.domain if f != validationSet.domain.class_var] for d in validationSet])
converted_test_data = [dict(enumerate(d)) for d in converted_test_data]
converted_test_data = vector.fit_transform(converted_test_data)
converted_test_targets = ([0 if d[validationSet.domain.class_var].value == 'ALL' else 1 for d in validationSet ])
logmessage("Starting Prediction Task", info)
prediction = clf.predict(converted_test_data)
p, r, f1, support = precision_recall_fscore_support(converted_test_targets, prediction)
f1_avg = np.average(f1)
logmessage("Average F1(Over 2 classes): %s" % f1_avg, info)
if f1_avg > baseline:
logmessage("Performance Increased", success)
logmessage("Using K: %s, S: %s, C: default" % (ds.K, fs.threshold), info)
else:
logmessage("Performance Decreased", error)
# =============================================================================
# Final Test
|
KanoComputing/kano-settings
|
kano_settings/system/boards/__init__.py
|
Python
|
gpl-2.0
| 1,199
| 0
|
#
# __init__.py
#
# Copyright (C) 2016 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# Module for board specific settings
#
import importlib
import re
import pkgutil
from kano.logging import logger
from kano.utils.hardware import RPI_1_CPU_PROFILE, get_board_pr
|
operty, \
get_rpi_model
__author__ = 'Kano Computing Ltd.'
__email__ = 'dev@kano.me'
def get_board_props(board_
|
name=None):
if not board_name:
board_name = get_rpi_model()
cpu_profile = get_board_property(board_name, 'cpu_profile')
if not cpu_profile:
cpu_profile = RPI_1_CPU_PROFILE
board_module = re.sub(r'[-/ ]', '_', cpu_profile).lower()
try:
board = importlib.import_module(
'{}.{}'.format(__name__, board_module)
)
except ImportError:
logger.error('Board not found')
return None
required_props = ['CLOCKING', 'DEFAULT_CONFIG']
for prop in required_props:
if not hasattr(board, prop):
logger.error('No {} data in board config'
.format(prop.replace('_', ' ').lower()))
return None
# TODO: Validate board info
return board
|
ecdavis/spacegame
|
tests/unit/test_star_system.py
|
Python
|
apache-2.0
| 3,976
| 0.002012
|
import mock
from pantsmud.driver import hook
from spacegame.core import hook_types
from spacegame.universe.star_system import StarSystem
from spacegame.universe.universe import Universe
from tests.unit.util import UnitTestCase
class StarSystemUnitTestCase(UnitTestCase):
def setUp(self):
UnitTestCase.setUp(self)
self.hook_star_system_reset = mock.MagicMock()
self.hook_star_system_reset.__name__ = 'hook_star_system_reset'
hook.add(hook_types.STAR_SYSTEM_RESET, self.hook_star_system_reset)
self.star_system = StarSystem()
self.star_system.reset_interval = 10
def test_links(self):
u = Universe()
s1 = StarSystem()
u.add_star_system(s1)
s2 = StarSystem()
u.add_star_system(s2)
s1.link_uuids.add(s2.uuid)
self.assertEqual({s2}, s1.links)
self.assertEqual(set(), s2.links)
def test_change_reset_interval_from_negative_updates_reset_timer(self):
self.star_system.reset_interval = -1
self.star_system.reset_timer = -1
self.star_system.reset_interval = 10
self.assertEqual(self.star_system.reset_timer, 10)
def test_change_reset_interval_with_reset_timer_below_one_updates_reset_timer(self):
self.star_system.reset_timer = 0
self.star_system.reset_interval = 5
self.assertEqual(self.star_system.reset_timer, 5)
def test_reduce_reset_interval_below_reset_timer_updates_reset_timer(self):
self.star_system.reset_interval = 10
self.star_system.reset_timer = 10
self.star_system.reset_interval = 5
self.assertEqual(self.star_system.reset_timer, 5)
def test_increase_reset_interval_above_reset_timer_does_not_change_reset_timer(self):
self.star_system.reset_timer = 10
self.star_system.reset_interval = 20
self.assertEqual(self.star_system.reset_timer, 10)
def test_force_reset_resets_reset_timer(self):
self.star_system.force_reset()
self.assertEqual(self.star_system.reset_timer, self.star_system.reset_interval)
def test_force_reset_calls_hook_star_system_reset(self):
self.star_system.force_reset()
self.hook_star_system_reset.assert_called()
def test_force_reset_with_negative_reset_interval_calls_hook_star_system_reset(self):
self.star_system.reset_interval = -1
self.star_system.force_reset()
self.hook_star_system_reset.assert_called()
def test_pulse_with_reset_timer_above_one_does_not_call_hook_star_system_reset(self):
self.star_system.reset_timer = 2
self.star_system.pulse()
self.hook_star_system_reset.assert_not_called()
def test_pulse_with_reset_timer_at_one_calls_hook_star_system_reset(self):
self.star_system.reset_timer = 1
self.star_sys
|
tem.pulse()
self.hook_star_system_reset.assert_called()
def test_pulse_with_reset_timer_below_one_does_not_call_hook_star_system_reset(self):
self.star_system.reset_timer = 0
self.star_system.pulse()
self.hook_star_system_reset.assert_not_called()
def test_pulse_with_reset_timer_above_one_decrements_reset_timer(self):
self.star_s
|
ystem.reset_timer = 2
self.star_system.pulse()
self.assertEqual(self.star_system.reset_timer, 1)
def test_pulse_with_reset_timer_at_one_resets_reset_timer(self):
self.star_system.reset_timer = 1
self.star_system.pulse()
self.assertEqual(self.star_system.reset_timer, self.star_system.reset_interval)
def test_pulse_with_reset_timer_at_zero_decrements_reset_timer(self):
self.star_system.reset_timer = 0
self.star_system.pulse()
self.assertEqual(self.star_system.reset_timer, -1)
def test_pulse_with_reset_timer_below_zero_does_not_change_reset_timer(self):
self.star_system.reset_timer = -1
self.star_system.pulse()
self.assertEqual(self.star_system.reset_timer, -1)
|
Morphux/IRC-Bot
|
modules/mv/mv.py
|
Python
|
gpl-2.0
| 1,360
| 0.001472
|
# -*- coding: utf8 -*-
class Mv:
def command(self):
self.config = {
"command": {
"mv": {
"function": self.mvScreams,
"usage": "mv <us
|
er>",
"help": "Le clavier y colle!"
}
}}
return self.config
def mvScreams(self, Morphux, infos):
print(infos)
if (len(infos['args']) == 0 and infos['nick'] == "valouche"):
Morphux.sendMessage("Ta mere la chauve", infos['nick'])
elif (len(infos['args']) == 0 and infos['nick'] == "Ne02ptzero"):
Morphux.sendMessage("TU VAS LA CHIER TA CHIASSE?", infos['nick'])
|
elif (len(infos['args']) == 0):
Morphux.sendMessage("SARACE BOULBA", infos['nick'])
elif (infos['args'][0] == "allow"):
Morphux.sendMessage("ALLOW?", infos['nick'])
elif (infos['args'][0] == "thunes"):
Morphux.sendMessage("Money equals power", infos['nick'])
elif (infos['args'][0] == "theodule"):
Morphux.sendMessage("THEODUUULE", infos['nick'])
elif (infos['args'][0] == "gg"):
Morphux.sendMessage("Le beau jeu, le beau geste, la lucidité !", infos['nick'])
elif (Morphux.userExists(infos['args'][0]) == 0):
Morphux.sendMessage("Respecte toi " + infos['args'][0] + "!", infos['nick'])
|
jamesr/sky_engine
|
build/zip.py
|
Python
|
bsd-3-clause
| 1,167
| 0.011997
|
#!/usr/bin/env python3
#
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code i
|
s governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import zipfile
import os
import sys
def _zip_dir(path, zip_file, prefix):
path = path.rstrip('/\\')
for root, dirs, files in os.walk(path):
for file in files:
zip_file.write(os.path.join(root, file), os.path.join(
root.replace(path, prefix), file))
def main(args):
zip_file = zipfile.ZipFile(args.output, 'w', zipfile.ZIP_DEFLATED)
for path, archive_name in args.input_pa
|
irs:
if os.path.isdir(path):
_zip_dir(path, zip_file, archive_name)
else:
zip_file.write(path, archive_name)
zip_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='This script creates zip files.')
parser.add_argument('-o', dest='output', action='store',
help='The name of the output zip file.')
parser.add_argument('-i', dest='input_pairs', nargs=2, action='append',
help='The input file and its destination location in the zip archive.')
sys.exit(main(parser.parse_args()))
|
Chasego/cod
|
leetcode/665-Non-decreasing-Array/NonDecreasingArr.py
|
Python
|
mit
| 567
| 0.001764
|
class S
|
olution(object):
def checkPossibility(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
is_modified = False
for i in xrange(len(nums) - 1):
if nums[i] > nums[i+1]:
if is_modified:
return False
else:
|
if i == 0 or nums[i-1] <= nums[i+1]:
nums[i] = nums[i+1]
else:
nums[i+1] = nums[i]
is_modified = True
return True
|
karpierz/libpcap
|
setup.py
|
Python
|
bsd-3-clause
| 39
| 0.051282
|
from
|
setuptools i
|
mport setup ; setup()
|
DarioGT/docker-carra
|
src/prototype/actions/graphModel.py
|
Python
|
mit
| 8,442
| 0.014215
|
#!/usr/bin/env python
"""
Prototype to DOT (Graphviz) converter by Dario Gomez
Table format from django-extensions
"""
from protoExt.utils.utilsBase import Enum, getClassName
from protoExt.utils.utilsConvert import slugify2
class GraphModel():
def __init__(self):
self.tblStyle = False
self.dotSource = 'digraph Sm {'
self.dotSource += 'fontname="Helvetica";fontsize = 8;'
self.GRAPH_LEVEL = Enum(['all', 'essential', 'required' , 'primary', 'title'])
self.GRAPH_FORM = Enum(['orf', 'erf', 'drn'])
if self.tblStyle:
self.dotSource += 'node [shape="plaintext"];\n'
self.tblTitle = '\n{0} [label=<<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0" style="width:100px"><TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"> <FONT FACE="Helvetica Bold" COLOR="white">{1}</FONT> </TD></TR>'
self.tblField = '\n<TR><TD ALIGN="LEFT" BORDER="0"><FONT FACE="Helvetica {2}">{0}</FONT></TD><TD ALIGN="LEFT"><FONT FACE="Helvetica {2}">{1}</FONT></TD></TR>'
else:
# Animal [label = "{{{1}|+ name : string\l+ age : int\l|+ die() : void\l}"]
self.dotSource += 'rankdir = BT;node [shape=record,width=0,height=0,concentrate=true];\n'
self.tblRecord = '\n{0} [label = "{{{1}|'
self.lnkComposition = '[dir=both,arrowhead=diamond,arrowtail=none]\n'
self.lnkAgregation = '[dir=both,arrowhead=ediamond,arrowtail=none]\n'
self.lnkNoCascade = '[dir=both,arrowhead=diamondtee,arrowtail=none]\n'
self.lnkHeritage = '[dir=both,arrowhead=empty,arrowtail=none]\n'
self.lnkER = '[dir=both,arrowhead=none,arrowtail=invempty]\n'
def getDiagramDefinition(self, diagramSet):
self.diagrams = []
self.entities = []
|
for pDiag in diagramSet:
gDiagram = {
'code': getClassName(pDiag.code) ,
'label': slugify2( pDiag.code ),
'clusterName': slugify2( getattr(pDiag, 'title', pDiag.code)),
'graphLevel' : getattr(pDiag, 'graphLevel' , self.GRAPH_LEV
|
EL.all),
'graphForm' : getattr(pDiag, 'graphForm' , self.GRAPH_FORM.orf),
'showPrpType': getattr(pDiag, 'showPrpType' , False),
'showBorder' : getattr(pDiag, 'showBorder' , False),
'showFKey' : getattr(pDiag, 'showFKey' , False),
'prefix' : slugify2( getattr(pDiag, 'prefix' , '')),
'entities': []
}
for pDiagEntity in pDiag.diagramentity_set.all():
pEntity = pDiagEntity.entity
enttCode = self.getEntityCode(pEntity.code, gDiagram.get('prefix'))
# Si ya se encuentra en otro diagrama no la dibuja
if enttCode in self.entities:
continue
self.entities.append(enttCode)
gEntity = {
'code': enttCode,
'fields': [],
'relations': []
}
for pProperty in pEntity.property_set.all():
pptCode = slugify2(pProperty.code, '_')
if pProperty.isForeign:
pLinkTo = self.getEntityCode(pProperty.relationship.refEntity.code, gDiagram.get('prefix'))
gEntity['relations'].append({
'code': pptCode,
'linkTo': pLinkTo,
'primary': pProperty.isPrimary,
'required': pProperty.isRequired,
'essential': pProperty.isEssential,
'foreign': True
})
else:
pType = slugify2(pProperty.baseType , '_')
gEntity['fields'].append({
'code': pptCode,
'type': pType or 'string',
'primary': pProperty.isPrimary,
'required': pProperty.isRequired,
'essential': pProperty.isEssential,
'foreign': False
})
gDiagram['entities'].append(gEntity)
self.diagrams.append(gDiagram)
def generateDotModel(self):
# Dibuja las entidades
for gDiagram in self.diagrams:
if gDiagram.get('graphLevel') < self.GRAPH_LEVEL.title :
self.dotSource += '\nsubgraph cluster_{0} {{'.format(gDiagram.get('code'))
if not gDiagram.get('showBorder', False) :
self.dotSource += 'style=dotted;'
if len(gDiagram.get('label', '')) > 0:
self.dotSource += 'label="{}";'.format(gDiagram.get('label', ''))
for gEntity in gDiagram['entities']:
self.entity2dot(gDiagram, gEntity)
self.dotSource += '}\n'
# Dibuja los vinculos
for gDiagram in self.diagrams:
for gEntity in gDiagram['entities']:
self.link2dot(gEntity, gDiagram.get( 'showFKey'))
self.dotSource += '}'
# Dibuja las relaciones
# for gDiagram in self.diagrams:
# for relation in gEntity['relations']:
# if relation['target'] in nodes:
# relation['needs_node'] = False
return self.dotSource
def link2dot(self, gEntity, showFKey):
for gLink in gEntity['relations']:
pEntity = gEntity.get('code')
pLinkTo = gLink.get('linkTo')
if ( not showFKey ) and ( pLinkTo not in self.entities ):
continue
self.dotSource += '{0} -> {1} '.format(pEntity, pLinkTo) + self.lnkComposition
def entity2dot(self, gDiagram, gEntity):
if self.tblStyle:
enttTable = self.tblTitle.format(gEntity.get('code'), gEntity.get('label', gEntity.get('code')))
else:
enttRecord = self.tblRecord.format(gEntity.get('code'), gEntity.get('label', gEntity.get('code')))
# 0 : colName; 1 : baseType; 2 : Bold / Italic
for gField in gEntity['fields'] + gEntity['relations'] :
if gDiagram.get('showPrpType') :
sPrpType = gField.get('type', ' ')
else : sPrpType = ' '
sPk = ''
fildLv = 0
diagLv = gDiagram.get('graphLevel')
if gField.get('primary') :
fildLv = self.GRAPH_LEVEL.primary
sPk = 'Bold'
elif gField.get('required'):
fildLv = self.GRAPH_LEVEL.required
elif gField.get('essential'):
fildLv = self.GRAPH_LEVEL.essential
# Si no alcanza el nivel
if fildLv >= diagLv:
sFk = ''
if gField.get('foreign'):
sFk = ' Italic'
if self.tblStyle:
enttTable += self.tblField.format(gField.get('code'), sPrpType, sPk + sFk)
else:
if len(sPk) > 0:
sPk = '*'
if len(sFk) > 0:
sPk += '+'
if len(sPk) > 0:
sPk += ' '
if len(sPrpType) > 1:
sPrpType = ': ' + sPrpType
enttRecord += '{2}{0}{1}\l'.format(gField.get('code'), sPrpType, sPk)
if self.tblStyle:
enttTable += '</TABLE>>]\n'
else:
enttRecord += '}"]\n'
# self.dotSource += enttTable
self.dotSource += enttRecord
def getEntityCode(self, code, prefix):
# Formatea el nom
|
andrewhead/Package-Qualifiers
|
tests/compute/test_compute_code.py
|
Python
|
mit
| 4,317
| 0.001853
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import unittest
from bs4 import BeautifulSoup
from compute.code import CodeExtractor
logging.basicConfig(level=logging.INFO, format="%(message)s")
class ExtractCodeTest(unittest.TestCase):
def setUp(self):
self.code_extractor = CodeExtractor()
def _extract_code(self, document):
return self.code_extractor.extract(document)
def _make_document_with_body(self, body):
return BeautifulSoup('\n'.join([
"<html>",
" <body>",
body,
" </body>",
"</html>",
]), 'html.parser')
def test_extract_valid_javascript(self):
document = self._make_document_with_body("<code>var i = 0;</code")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 1)
self.assertEqual(snippets[0], "var i = 0;")
def test_extract_valid_javascript_with_padding(self):
# In the past, some parsers I have used have had trouble parsing with whitespace
# surrounding the parsed content. This is a sanity test to make sure that the
# backend parser will still detect JavaScript padded with whitespace.
document = self._make_document_with_body("<code>\n\n\t var i = 0;\t \n</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(snippets[0], "\n\n\t var i = 0;\t \n")
def test_extract_valid_multiline_javascript(self):
document = self._make_document_with_body('\n'.join([
"<code>for (var i = 0; i < 2; i++) {",
" console.log(\"Hello, world!\");",
"}</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(snippets[0], '\n'.join([
"for (var i = 0; i < 2; i++) {",
" console.log(\"Hello, world!\");",
"}",
]))
def test_extract_multiple_blocks(self):
document = self._make_document_with_body('\n'.join([
"<code>var i = 0;</code>",
"<code>i = i + 1;</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 2)
self.assertIn("var i = 0;", snippets)
self.assertIn("i = i + 1;", snippets)
def test_fail_to_detect_text_in_code_block(self):
document = self._make_document_with_body("<code>This is a plain E
|
nglish sentence.</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 0)
def test_fail_to_detect_command_line(self):
document = self._make_document_with_body("<code>npm install package</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippet
|
s), 0)
def test_skip_whitespace_only(self):
document = self._make_document_with_body("<code>\t \n</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 0)
# In practice I don't expect the next two scenarios to come up. But the expected behavior of
# the code extractor is to scan children of all nodes that are marked as invalid. This
# test makes sure that functionality is correct.
def test_skip_child_of_code_block_parent(self):
document = self._make_document_with_body('\n'.join([
"<code>",
"var outer = 0;",
"<code>var inner = 1;</code>",
"</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 1)
self.assertEqual(snippets[0], '\n'.join([
"",
"var outer = 0;",
"var inner = 1;",
"",
]))
def test_detect_code_block_nested_inside_invalid_code_block(self):
document = self._make_document_with_body('\n'.join([
"<code>",
" This plaintext invalidates this block as a whole.",
" <code>var i = 0; // But this child will be valid</code>",
"</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 1)
self.assertEqual(snippets[0], "var i = 0; // But this child will be valid")
|
Golker/wttd
|
eventex/core/migrations/0003_contact.py
|
Python
|
mit
| 822
| 0.00365
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-19 00:28
from __future__ import unicode_literals
from django.db import migrations, mode
|
ls
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20160218_2359'),
]
operations = [
migrations.CreateMo
|
del(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('kind', models.CharField(choices=[('E', 'Email'), ('P', 'Phone')], max_length=1)),
('value', models.CharField(max_length=255)),
('speaker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Speaker')),
],
),
]
|
DonaldWhyte/module-dependency
|
tests/test_tokeniser.py
|
Python
|
mit
| 6,029
| 0.026373
|
import unittest
import sys
import os
sys.path.append(os.environ.get("PROJECT_ROOT_DIRECTORY", "."))
from moduledependency.tokeniser import Token, Tokeniser
class TestToken(unittest.TestCase):
def test_construction(self):#
# Test with invalid token type
with self.assertRaises(ValueError):
Token("HAHAHAHAHA")
# Test with valid token types (one with value and one without)
token = Token("identifier", "testVariable")
self.assertEqual(tok
|
en.type, "identifier")
self.assertEqual(token.value, "testVariable")
token = Token("from")
self.assert
|
Equal(token.type, "from")
self.assertEqual(token.value, "from")
class TestTokeniser(unittest.TestCase):
def setUp(self):
self.tokeniser = Tokeniser()
# Create test data
self.noImportSource = """
def testFunction(x):
\"\"\"This is a docstring but I'm not sure
how far it goes.
\"\"\"
return x * 2
\'\'\'Another multi
line string\'\'\'
'test'
something = [ "hello" ]
"""
self.importSource = """#comment here
import a
from a import b
from c import *
from d import e, f
from g import dummy, *
from . import h
from . import i, j
from .k import l
from .m import *
from .n import o.p
from .q import another_dummy, *
class DummyClass:
def something():
# Hello World!
from sys import path # test
print(path)
def somethingEntirelyDifferent():
import bang
bang.start()
"""
self.noImportTokens = [
Token("identifier", "def"), Token("identifier", "testFunction"),
Token("other", "("), Token("identifier", "x"), Token("other", ")"), Token("other", ":"),
Token("identifier", "return"), Token("identifier", "x"),
Token("*"), Token("other", "2"), Token("identifier", "something"),
Token("other", "="), Token("other", "["), Token("other", "]"),
]
self.importTokens = [
Token("import"), Token("identifier", "a"),
Token("from"), Token("identifier", "a"), Token("import"), Token("identifier", "b"),
Token("from"), Token("identifier", "c"), Token("import"), Token("*"),
Token("from"), Token("identifier", "d"), Token("import"), Token("identifier", "e"), Token(","), Token("identifier", "f"),
Token("from"), Token("identifier", "g"), Token("import"), Token("identifier", "dummy"), Token(","), Token("*"),
Token("from"), Token("."), Token("import"), Token("identifier", "h"),
Token("from"), Token("."), Token("import"), Token("identifier", "i"), Token(","), Token("identifier", "j"),
Token("from"), Token("."), Token("identifier", "k"), Token("import"), Token("identifier", "l"),
Token("from"), Token("."), Token("identifier", "m"), Token("import"), Token("*"),
Token("from"), Token("."), Token("identifier", "n"), Token("import"), Token("identifier", "o"), Token("."), Token("identifier", "p"),
Token("from"), Token("."), Token("identifier", "q"), Token("import"), Token("identifier", "another_dummy"), Token(","), Token("*"),
Token("identifier", "class"), Token("identifier", "DummyClass"), Token("other", ":"),
Token("identifier", "def"), Token("identifier", "something"), Token("other", "("), Token("other", ")"), Token("other", ":"),
Token("from"), Token("identifier", "sys"), Token("import"), Token("identifier", "path"),
Token("identifier", "print"), Token("other", "("), Token("identifier", "path"), Token("other", ")"),
Token("identifier", "def"), Token("identifier", "somethingEntirelyDifferent"), Token("other", "("), Token("other", ")"), Token("other", ":"),
Token("import"), Token("identifier", "bang"),
Token("identifier", "bang"), Token("."), Token("identifier", "start"), Token("other", "("), Token("other", ")")
]
def tearDown(self):
self.tokeniser = None
self.noImportSource = None
self.importSource = None
self.noImportTokens = None
self.importTokens = None
def test_tokenise(self):
# Test with invalid type
with self.assertRaises(TypeError):
self.tokeniser.tokenise(3636)
# Test with empty Python source code
self.assertEqual(self.tokeniser.tokenise(""), [])
# Test with source code that has no imports
self.assertEqual(self.tokeniser.tokenise(self.noImportSource), self.noImportTokens)
# Test with source code that has imports
self.assertEqual(self.tokeniser.tokenise(self.importSource), self.importTokens)
# Test with source that ends STRAIGHT after import
self.assertEqual(self.tokeniser.tokenise("from . import pack"),
[ Token("from"), Token("."), Token("import"), Token("identifier", "pack") ])
def test_skipComment(self):
# First element of tuple is the index to start skipping from
# and the second element is the desired end element
TEST_SOURCE = """#comment at the start
hello = 5 # comment at the end of a thing
# # # # nestetd comment
"""
TEST_INDICES = [ (0, 21), (31, 65), (66, 91) ]
for test in TEST_INDICES:
self.tokeniser.clear()
self.tokeniser.source = TEST_SOURCE
self.tokeniser.index = test[0]
self.tokeniser.skipComment()
self.assertEqual(self.tokeniser.index, test[1])
def test_skipString(self):
# Contains tuples where the first element is the index of
# the character the test should start at, the second
# element is where the tokeniser should stop skipping and
# the third element is the delimiter of the test string
TEST_INDICES = [
(31, 8, "\""),
(51, 7, "\'"),
(70, 24, "\"\"\""),
(106, 38, "'''"),
(155, 14, "\"")
]
# Set the source code that will be used for comment skipping
TEST_SOURCE = """#comment at the start
test = "hel\\"lo"
test2 = 'el\\'lo'
test3 = \"\"\""hello"
multiline\"\"\"
test4 = '''can be multiline but 'test' isn't'''
no_end=" ijruiytie
"""
for test in TEST_INDICES:
self.tokeniser.clear()
self.tokeniser.source = TEST_SOURCE
self.tokeniser.index = test[0]
self.tokeniser.skipString(test[2])
self.assertEqual(self.tokeniser.index, test[0] + test[1])
|
kpeiruza/incubator-spot
|
spot-oa/oa/dns/dns_oa.py
|
Python
|
apache-2.0
| 17,663
| 0.018174
|
import logging
import os
import json
import shutil
import sys
import datetime
import csv, math
from tld import get_tld
from collections import OrderedDict
from utils import Util
from components.data.data import Data
from components.iana.iana_transform import IanaTransform
from components.nc.network_context import NetworkContext
from multiprocessing import Process
import pandas as pd
import time
class OA(object):
def __init__(self,date,limit=500,logger=None):
self._initialize_members(date,limit,logger)
def _initialize_members(self,date,limit,logger):
# get logger if exists. if not, create new instance.
self._logger = logging.getLogger('OA.DNS') if logger else Util.get_logger('OA.DNS',create_file=False)
# initialize required parameters.
self._scrtip_path = os.path.dirname(os.path.abspath(__file__))
self._date = date
self._table_name = "dns"
self._dns_results = []
self._limit = limit
self._data_path = None
self._ipynb_path = None
self._ingest_summary_path = None
self._dns_scores = []
self._dns_scores_headers = []
self._results_delimiter = '\t'
self._details_limit = 250
# get app configuration.
self._spot_conf = Util.get_spot_conf()
# get scores fields conf
conf_file = "{0}/dns_conf.json".format(self._scrtip_path)
self._conf = json.loads(open (conf_file).read(),object_pairs_hook=OrderedDict)
# initialize data engine
self._db = self._spot_conf.get('conf', 'DBNAME').replace("'", "").replace('"', '')
self._engine = Data(self._db,self._table_name ,self._logger)
def start(self):
####################
start = time.time()
####################
self._create_folder_structure()
self._add_ipynb()
self._get_dns_results()
self._add_tld_column()
self._add_reputation()
self._add_hh_and_severity()
self._add_iana()
self._add_network_context()
self._create_dns_scores_csv()
self._get_oa_details()
self._ingest_summary()
##################
end = time.time()
print(end - start)
##################
def _create_folder_structure(self):
# create date folder structure if it does not exist.
self._logger.info("Creating folder structure for OA (data and ipynb)")
self._data_path,self._ingest_summary_path,self._ipynb_path = Util.create_oa_folders("dns",self._date)
def _add_ipynb(self):
if os.path.isdir(self._ipynb_path):
self._logger.info("Adding edge investigation IPython Notebook")
shutil.copy("{0}/ipynb_templates/Edge_Investigation_master.ipynb".format(self._scrtip_path),"{0}/Edge_Investigation.ipynb".format(self._ipynb_path))
self._logger.info("Adding threat investigation IPython Notebook")
shutil.copy("{0}/ipynb_templates/Threat_Investigation_master.ipynb".format(self._scrtip_path),"{0}/Threat_Investigation.ipynb".format(self._ipynb_path))
else:
self._logger.error("There was a problem adding the IPython Notebooks, please check the directory exists.")
def _get_dns_results(self):
self._logger.info("Getting {0} Machine Learning Results from HDFS".format(self._date))
dns_results = "{0}/dns_results.csv".format(self._data_path)
# get hdfs path from conf file.
HUSER = self._spot_conf.get('conf', 'HUSER').replace("'", "").replace('"', '')
hdfs_path = "{0}/dns/scored_results/{1}/scores/dns_results.csv".format(HUSER,self._date)
# get results file from hdfs.
get_command = Util.get_ml_results_form_hdfs(hdfs_path,self._data_path)
self._logger.info("{0}".format(get_command))
# validate files exists
if os.path.isfile(dns_results):
# read number of results based in the limit specified.
self._logger.info("Reading {0} dns results file: {1}".format(self._date,dns_results))
self._dns_results = Util.read_results(dns_results,self._limit,self._results_delimiter)[:]
if len(self._dns_results) == 0: self._logger.error("There are not flow results.");sys.exit(1)
else:
self._logger.error("There was an error getting ML results from HDFS")
sys.exit(1)
# add headers.
self._logger.info("Adding headers")
self._dns_scores_headers = [ str(key) for (key,value) in self._conf['dns_score_fields'].items() ]
# add dns content.
self._dns_scores = [ conn[:] for conn in self._dns_results][:]
def _move_time_stamp(self,dns_data):
for dns in dns_data:
time_stamp = dns[1]
dns.remove(time_stamp)
dns.append(time_stamp)
return dns_data
def _create_dns_scores_csv(self):
dns_scores_csv = "{0}/dns_scores.csv".format(self._data_path)
dns_scores_final = self._move_time_stamp(self._dns_scores)
dns_scores_final.insert(0,self._dns_scores_headers)
Util.create_csv_file(dns_scores_csv,dns_scores_final)
# create bk file
dns_scores_bu_csv = "{0}/dns_scores_bu.csv".format(self._data_path)
Util.create_csv_file(dns_scores_bu_csv,dns_scores_final)
def _add_tld_column(self):
qry_name_col = self._conf['dns_results_fields']['dns_qry_name']
self._dns_scores = [conn + [ get_tld("http://" + str(conn[qry_name_col]), fail_silently=True) if "http://" not in str(conn[qry_name_col]) else get_tld(str(conn[qry_name_col]), fail_silently=True)] for conn in self._dns_scores ]
def _add_reputation(self):
# read configuration.
reputation_conf_file = "{0}/components/reputation/reputation_config.json".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
self._logger.info("Reading reputation configuration file: {0}".format(reputation_conf_file))
rep_conf = json.loads(open(reputation_conf_file).read())
# initialize reputation services.
self._rep_services = []
self._logger.info("Initializing reputation services.")
for service in rep_conf:
config = rep_conf[service]
module = __import__("components.reputation.{0}.{0}".format(service), fromlist=['Reputation'])
self._rep_services.append(module.Reputation(config,self._logger))
# get columns for reputation.
rep_cols = {}
indexes = [ int(value) for key, value in self._conf["add_reputation"].items()]
self._logger.info("Getting columns to add reputation based on config file: dns_conf.json".format())
for index in indexes:
col_list = []
for conn in self._dns_scores:
col_list.append(conn[index])
rep_cols[index] = list(set(col_list))
# get reputation per column.
|
self._logger.info("Getting reputation for each service in config")
rep_services_results = []
if self._rep_services
|
:
for key,value in rep_cols.items():
rep_services_results = [ rep_service.check(None,value) for rep_service in self._rep_services]
rep_results = {}
for result in rep_services_results:
rep_results = {k: "{0}::{1}".format(rep_results.get(k, ""), result.get(k, "")).strip('::') for k in set(rep_results) | set(result)}
self._dns_scores = [ conn + [ rep_results[conn[key]] ] for conn in self._dns_scores ]
else:
self._dns_scores = [ conn + [""] for conn in self._dns_scores ]
def _add_hh_and_severity(self):
# add hh value and sev columns.
dns_date_index = self._conf["dns_results_fields"]["frame_time"]
self._dns_scores = [conn + [ filter(None,conn[dns_date_index].split(" "))[3].split(":")[0]] + [0] + [0] for conn in self._dns_scores ]
def _add_ia
|
tescalada/npyscreen-restructure
|
npyscreen/ThemeManagers.py
|
Python
|
bsd-2-clause
| 4,810
| 0.005821
|
# encoding: utf-8
"""
IMPORTANT - COLOUR SUPPORT IS CURRENTLY EXTREMELY EXPERIMENTAL. THE API MAY CHANGE, AND NO DEFAULT
WIDGETS CURRENTLY TAKE ADVANTAGE OF THEME SUPPORT AT ALL.
"""
import curses
from . import global_options
def disable_color():
global_options.DISABLE_ALL_COLORS = True
def enable_color():
global_options.DISABLE_ALL_COLORS = False
class ThemeManager(object):
_colors_to_define = (
# DO NOT DEFINE THIS COLOR - THINGS BREAK
#('WHITE_BLACK', DO_NOT_DO_THIS, DO_NOT_DO_THIS),
('BLACK_WHITE', curses.COLOR_BLACK, curses.COLOR_WHITE),
#('BLACK_ON_DEFAULT', curses.COLOR_BLACK, -1),
#('WHITE_ON_DEFAULT', curses.COLOR_WHITE, -1),
('BLUE_BLACK', curses.COLOR_BLUE, curses.COLOR_BLACK),
('CYAN_BLACK', curses.COLOR_CYAN, curses.COLOR_BLACK),
('GREEN_BLACK', curses.COLOR_GREEN, curses.COLOR_BLACK),
('MAGENTA_BLACK', curses.COLOR_MAGENTA, curses.COLOR_BLACK),
('RED_BLACK', curses.COLOR_RED, curses.COLOR_BLACK),
('YELLOW_BLACK', curses.COLOR_YELLOW, curses.COLOR_BLACK),
('BLACK_RED', curses.COLOR_BLACK, curses.COLOR_RED),
('BLACK_GREEN', curses.COLOR_BLACK, curses.COLOR_GREEN),
('BLACK_YELLOW', curses.COLOR_BLACK, curses.COLOR_YELLOW),
('BLUE_WHITE', curses.COLOR_BLUE, curses.COLOR_WHITE),
('CYAN_WHITE', curses.COLOR_CYAN, curses.COLOR_WHITE),
('GREEN_WHITE', curses.COLOR_GREEN, curses.COLOR_WHITE),
('MAGENTA_WHITE', curses.COLOR_MAGENTA, curses.COLOR_WHITE),
('RED_WHITE', curses.COLOR_RED, curses.COLOR_WHITE),
('YELLOW_WHITE', curses.COLOR_YELLOW, curses.COLOR_WHITE),
)
default_colors = {
'DEFAULT' : 'WHITE_BLACK',
'FORMDEFAULT' : 'WHITE_BLACK',
'NO_EDIT' : 'BLUE_BLACK',
'STANDOUT' : 'CYAN_BLACK',
'CURSOR' : 'WHITE_BLACK',
'LABEL' : 'GREEN_BLACK',
'LABELBOLD' : 'WHITE_BLACK',
'CONTROL' : 'YELLOW_BLACK',
'IMPORTANT' : 'GREEN_BLACK',
'SAFE' : 'GREEN_BLACK',
'WARNING' : 'YELLOW_BLACK',
'DANGER' : 'RED_BLACK',
'CRITICAL' : 'BLACK_RED',
'GOOD' : 'GREEN_BLACK',
'GOODHL' : 'GREEN_BLACK',
'VERYGOOD' : 'BLACK_GREEN',
'CAUTION' : 'YELLOW_BLACK',
'CAUTIONHL' : 'BLACK_YELLOW',
}
def __init__(self):
#curses.use_default_colors()
self._defined_pairs = {}
self._names = {}
try:
self._max_pairs = curses.COLOR_PAIRS - 1
do_color = True
except AttributeError:
# curses.start_color has failed or has not been called
do_color = False
# Disable all color use across the application
disable_color()
if do_color and curses.has_colors():
self.initialize_pairs()
self.initialize_names()
def find_pair(self, caller, request='DEFAULT'):
if not curses.has_colors() or global_options.DISABLE_ALL_COLORS:
return False
if request == 'DEFAULT':
request = caller.color
# Locate the requested color pair. Default to default if not found.
try:
pair = self._defined_pairs[self._names[request]]
except:
pair = self._defined_pairs[self._names['DEFAULT']]
# now make the actual attribute
color_attribute = curses.color_pair(pair[0])
return color_attribute
def set_default(self, caller):
return False
def initialize_pairs(self):
# White on Black is fixed as color_pair 0
self._defined_pairs['WHITE_BLACK'] = (0, curses.COLOR_WHITE, curses.COLOR_BLACK)
for cp in self.__class__._colors_to_define:
if cp[0] == 'WHITE_BLACK':
# silently protect the user from breaking things.
continue
self.initalize_pair(cp[0], cp[1], cp[2])
def initialize_names(self):
self._names.update(self.__class__.d
|
efault_colors)
def initalize_pair(self, name, fg, bg):
#Initialize a colo
|
r_pair for the required color and return the number.
#Raise an exception if this is not possible.
if (len(list(self._defined_pairs.keys())) + 1) == self._max_pairs:
raise Exception("Too many colors")
_this_pair_number = len(list(self._defined_pairs.keys())) + 1
curses.init_pair(_this_pair_number, fg, bg)
self._defined_pairs[name] = (_this_pair_number, fg, bg)
return _this_pair_number
def get_pair_number(self, name):
return self._defined_pairs[name][0]
|
kevinnguyeneng/django-uwsgi-nginx
|
app/naf_autoticket/migrations/0024_alertcorrelationweight.py
|
Python
|
gpl-3.0
| 1,043
| 0.002876
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
|
class Migration(migrations.Migration):
dependencies = [
('naf_autoticket', '0023_hostdevice_hostlocation'),
]
operations = [
migrations.CreateModel(
name='AlertCorrelationWeight',
fields=[
|
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('AlertCompare', models.CharField(max_length=100)),
('TimeWeight', models.CharField(max_length=50)),
('LocationWeight', models.CharField(max_length=255, null=True)),
('LogicalWeight', models.CharField(max_length=255, null=True)),
('AlertInfo', models.ForeignKey(to='naf_autoticket.AlertInfo')),
],
options={
'db_table': 'nafautoticket_alertcorrelationweight',
'verbose_name_plural': 'alertcorrelationweight',
},
),
]
|
pierreboudes/pyThymio
|
garden_real.py
|
Python
|
lgpl-3.0
| 1,328
| 0.003765
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pythymio
import random
from gardenworld import *
init('info2_1')
with pythymio.thymio(["acc"],[]) as Thym:
state = dict([])
state["time"] = 0
state["delay"] = 10
def dispatch(evtid, evt_name, evt_args):
# https://www.thymio.org/en:thymioapi prox freq is 16Hz
if evt_name == "fwd.acc": # every 0.0625 sec
state["time"] += 0.0625
state["delay"] -= 1
if state["delay"] < 0:
if 7 < evt_args[1] < 14:
if evt_args[0] > 10:
state["delay"] = 20
tg()
elif evt_args[0] < -10:
state["delay"] = 20
td()
elif evt_args[1] > 20 and abs(evt_args[0]) < 8:
state["delay"] = 10
av()
elif evt_args[1] < 5:
if evt_args[0] > 10:
state["de
|
lay"] = 20
dp()
elif evt_args[0] < -10:
state["delay"] = 20
ra()
else: # Wat?
print evt_name
# Now lets start the loopy thing
Thym.loop(dis
|
patch)
print "state is %s" % state
print "Sayonara"
|
oxfordinternetinstitute/scriptingcourse
|
Lecture 2/PR_printRedditJson1.1.py
|
Python
|
gpl-3.0
| 3,376
| 0.039396
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Week 2: Project 1: Download and print json from reddit's main page.
This file contains methods to download and parse Reddit.com's main page.
One could do this hourly to sample reddit activity, and plot it over some metric.
hmmm....
CHANGELOG
version 1.1. Fixed bug to send output to the wrong path.
Tweaked method attributes.
Pretty print is more obvious
version 1.2. Set time to sleep 3 seconds. Gets around timeout bug.
"""
__author__ = 'Bernie Hogan'
__version__= '1.2'
import json
# import simplejson as json # alternate for < 2.6
import urllib2
import string
import os
import time
PATH = (os.getcwd())
def getRedditJson(count=0,after="",url = "http://www.reddit.com/.json",prettyPrint=False):
'''getRedditJson will append an after and a count to a reddit.com url.
It can also be used with subreddits/elsewhere, using the optional url'''
if count > 0:
url += "?count=%d&after=%s" % (count,after)
redditfile = urllib2.urlopen(url)
if prettyPrint:
return downloadJsonToPretty(url,"reddit-json_%d_%s.json" % (count,after))
else:
return json.load( redditfile )
def addToTable(jsondata,fileoutpath,count,header=False):
'''This method takes a json file and adds it to a table.
Notice the header is only added if the count is 0.
- Certainly, there\'s tidier way to do this?'''
outstr = ""
queries = ["rank","ups","downs"]
if count == 0:
fileout = open(fileoutpath,'w')
outstr += "queries\tups\tdowns\tscore\tcomments\tsubmitter\n"
else:
fileout = open(fileoutpath,'a')
for c,i in enumerate(jsondata):
outlist = []
outlist.append(str(c+count+1))
outlist.append(i["data"]["ups"])
outlist.append(i["data"]["downs"])
outlist.append(i["data"]["score"])
outlist.append(i["data"]["num_comments"])
outlist.append(i["data"]["author"])
outstr += string.join([unicode(x) for x in outlist],'\t') + "\n"
fileout.write(outstr)
outstr = ""
fileout.close()
# Note: os.sep below was not in the earlier version,
# causing file to be written in dir immediately a
|
bove.
def getIteratedReddits(max=20
|
0,url="http://www.reddit.com/.json"):
'''This is the main controller method. Notice _i_ is in a range stepping by 25.
This is a user configurable setting, so if this code worked on a logged in user
it would have to be changed. I look at 50 reddits per page, for example.'''
after = ""
step = 25
for i in range(0,max,step):
print "Downloading stories from %d to %d (after %s)" % (i,i+step,after)
reddit = getRedditJson(i,after,url)
time.sleep(3)
addToTable(reddit["data"]["children"],PATH+os.sep+"redditstats.txt",i)
after = reddit["data"]["after"]
print after
print "Finished downloading. File available at %s" % PATH + os.sep+"redditstats.txt"
# This is an unused helper method.
# Use it to have a cleaner look at json than is provided raw from the server.
def downloadJsonToPretty(url = "http://www.reddit.com/.json", name="prettyjson.txt"):
fileout = open(PATH + os.sep + name, 'w')
jsonfile = json.load(urllib2.urlopen(url))
fileout.write(json.dumps(jsonfile, indent = 4))
fileout.close()
return jsonfile
# This method calls the other two.
# See method above for optional arguments.
getIteratedReddits(150)
# This method will print the main page by default to prettyjson.txt.
# downloadJsonToPretty()
|
scottrice/Ice
|
ice/emulators.py
|
Python
|
mit
| 1,887
| 0.015898
|
# encoding: utf-8
import os
def emulator_rom_launch_command(emulator, rom):
"""Generates a command string that will launch `rom` with `emulator` (using
the format provided by the user). The return value of this function should
be suitable to use as the `Exe` field of a Steam shortcut"""
# Normalizing the strings is just removing any leading/trailing quotes.
# The beautiful thing is that strip does nothing if it doesnt contain quotes,
# so normalizing it then adding quotes should do what I want 100% of the time
normalize = lambda s: s.strip("\"")
add_quotes = lambda s: "\"%s\"" % s
# We don't know if the user put quotes around the emulator location. If
# so, we dont want to add another pair and screw things up.
#
# The user didnt
|
give us the ROM information, but screw it, I already
# have some code to add quotes to a string, might as well use it.
quoted_location = add_quotes(normalize(emulator.location))
quoted_rom = add_quotes(normalize(rom.path))
# The format string contains a bunch of specifies that users can use to
# substitute values in at runtime. Right now the only supported values
|
are:
# %l - The location of the emulator (to avoid sync bugs)
# %r - The location of the ROM (so the emulator knows what to launch)
# %fn - The ROM filename without its extension (for emulators that utilize separete configuration files)
#
# More may be added in the future, but for now this is what we support
return (
emulator.format
.replace("%l", quoted_location)
.replace("%r", quoted_rom)
.replace("%fn", os.path.splitext(os.path.basename(rom.path))[0])
)
def emulator_startdir(emulator):
"""Returns the directory which stores the emulator. The return value of this
function should be suitable to use as the 'StartDir' field of a Steam
shortcut"""
return os.path.dirname(emulator.location)
|
carlgao/lenga
|
images/lenny64-peon/usr/share/python-support/mercurial-common/hgext/convert/cvs.py
|
Python
|
mit
| 11,997
| 0.001584
|
# CVS conversion code inspired by hg-cvs-import and git-cvsimport
import os, locale, re, socket
from cStringIO import StringIO
from mercurial import util
from common import NoRepo, commit, converter_source, checktool
class convert_cvs(converter_source):
def __init__(self, ui, path, rev=None):
super(convert_cvs, self).__init__(ui, path, rev=rev)
cvs = os.path.join(path, "CVS")
if not os.path.exists(cvs):
raise NoRepo("%s does not look like a CVS checkout" % path)
self.cmd = ui.config('convert', 'cvsps', 'cvsps -A -u --cvs-direct -q')
cvspsexe = self.cmd.split(None, 1)[0]
for tool in (cvspsexe, 'cvs'):
checktool(tool)
self.changeset = {}
self.files = {}
self.tags = {}
self.lastbranch = {}
self.parent = {}
self.socket = None
self.cvsroot = file(os.path.join(cvs, "Root")).read()[:-1]
self.cvsrepo = file(os.path.join(cvs, "Repository")).read()[:-1]
self.encoding = locale.getpreferredencoding()
self._parse()
self._connect()
def _parse(self):
if self.changeset:
return
maxrev = 0
cmd = self.cmd
if self.rev:
# TODO: handle tags
try:
# patchset number?
maxrev = int(self.rev)
except ValueError:
try:
# date
util.parsedate(self.rev, ['%Y/%m/%d %H:%M:%S'])
cmd = '%s -d "1970/01/01 00:00:01" -d "%s"' % (cmd, self.rev)
except util.Abort:
raise util.Abort('revision %s is not a patchset number or date' % self.rev)
d = os.getcwd()
try:
os.chdir(self.path)
id = None
state = 0
filerevids = {}
for l in util.popen(cmd):
if state == 0: # header
if l.startswith("PatchSet"):
id = l[9:-2]
if maxrev and int(id) > maxrev:
# ignore everything
state = 3
elif l.startswith("Date"):
date = util.parsedate(l[6:-1], ["%Y/%m/%d %H:%M:%S"])
date = util.datestr(date)
elif l.startswith("Branch"):
branch = l[8:-1]
self.parent[id] = self.lastbranch.get(branch, 'bad')
self.lastbranch[branch] = id
elif l.startswith("Ancestor branch"):
ancestor = l[17:-1]
# figure out the parent later
self.parent[id] = self.lastbranch[ancestor]
elif l.startswith("Author"):
author = self.recode(l[8:-1])
elif l.startswith("Tag:") or l.startswith("Tags:"):
t = l[l.index(':')+1:]
t = [ut.strip() for ut in t.split(',')]
if (len(t) > 1) or (t[0] and (t[0] != "(none)")):
self.tags.update(dict.fromkeys
|
(t, id))
elif l.startswith("Log:"):
# switch to gathering log
state = 1
log = ""
elif state == 1: # lo
|
g
if l == "Members: \n":
# switch to gathering members
files = {}
oldrevs = []
log = self.recode(log[:-1])
state = 2
else:
# gather log
log += l
elif state == 2: # members
if l == "\n": # start of next entry
state = 0
p = [self.parent[id]]
if id == "1":
p = []
if branch == "HEAD":
branch = ""
if branch:
latest = None
# the last changeset that contains a base
# file is our parent
for r in oldrevs:
latest = max(filerevids.get(r, None), latest)
if latest:
p = [latest]
# add current commit to set
c = commit(author=author, date=date, parents=p,
desc=log, branch=branch)
self.changeset[id] = c
self.files[id] = files
else:
colon = l.rfind(':')
file = l[1:colon]
rev = l[colon+1:-2]
oldrev, rev = rev.split("->")
files[file] = rev
# save some information for identifying branch points
oldrevs.append("%s:%s" % (oldrev, file))
filerevids["%s:%s" % (rev, file)] = id
elif state == 3:
# swallow all input
continue
self.heads = self.lastbranch.values()
finally:
os.chdir(d)
def _connect(self):
root = self.cvsroot
conntype = None
user, host = None, None
cmd = ['cvs', 'server']
self.ui.status("connecting to %s\n" % root)
if root.startswith(":pserver:"):
root = root[9:]
m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)',
root)
if m:
conntype = "pserver"
user, passw, serv, port, root = m.groups()
if not user:
user = "anonymous"
if not port:
port = 2401
else:
port = int(port)
format0 = ":pserver:%s@%s:%s" % (user, serv, root)
format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root)
if not passw:
passw = "A"
pf = open(os.path.join(os.environ["HOME"], ".cvspass"))
for line in pf.read().splitlines():
part1, part2 = line.split(' ', 1)
if part1 == '/1':
# /1 :pserver:user@example.com:2401/cvsroot/foo Ah<Z
part1, part2 = part2.split(' ', 1)
format = format1
else:
# :pserver:user@example.com:/cvsroot/foo Ah<Z
format = format0
if part1 == format:
passw = part2
break
pf.close()
sck = socket.socket()
sck.connect((serv, port))
sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw,
"END AUTH REQUEST", ""]))
if sck.recv(128) != "I LOVE YOU\n":
raise util.Abort("CVS pserver authentication failed")
self.writep = self.readp = sck.makefile('r+')
if not conntype and root.startswith(":local:"):
conntype = "local"
root = root[7:]
if not conntype:
# :ext:user@host/home/user/path/to/cvsroot
if root.startswith(":ext:"):
root = root[5:]
m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
# Do not take Windows path "c:\foo\bar" for a connection strings
if os.path.isdir(root) or not m:
conntype = "local"
else:
conntype = "rsh"
user, host, root = m.group(1), m.group(2), m.group(3)
if conntype != "pserver":
if conntype == "rsh":
rsh = os.environ.get("CVS_RSH") or
|
cott81/rosha
|
rosha/rosha_repair_executor/test/repair_action_RedundantLoc.py
|
Python
|
lgpl-3.0
| 905
| 0.01989
|
#!/usr/bin/env python
##\author Dominik Kirchner
##\brief Publishes diagnostic messages for diagnostic aggregator unit test
from debian.changelog import keyvalue
PKG = 'rosha_repair_executor'
import roslib; roslib.load_manifest(PKG)
import rospy
from time import sleep
#from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
from rosha_msgs.msg import RepairAction
if __name__ == '__main__':
rospy.init_node('repair_action_pub')
pub = rospy.Publisher('/repair_action', RepairAction)
#pub = rospy.Publisher('/testOut4', RepairAction)
|
msg = RepairAction()
msg.robotId = 12
#
# redundancy replace loc
#
msg.repairActionToPerform = 32
msg.compName = "GPS"
msg.compId = -1
msg.msgType = ""
#pub.publish(msg)
#sleep(2)
while not rospy.is_shutdown():
pub.publish(msg)
sleep
|
(5)
|
exelearning/iteexe
|
exe/export/cmdlineexporter.py
|
Python
|
gpl-2.0
| 6,556
| 0.002289
|
# -- coding: utf-8 --
# ===========================================================================
# eXe
# Copyright 2012, Pedro Peña Pérez, Open Phoenix IT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
'''
@author: Pedro Peña Pérez
'''
import sys
import logging
from exe.engine.persistxml import encodeObjectToXML
from exe.engine.path import Path
from exe.engine.package import Package
from exe.export.scormexport import ScormExport
from exe.export.imsexport import IMSExport
from exe.export.websiteexport import WebsiteExport
from exe.export.singlepageexport import SinglePageExport
from exe.export.xliffexport import XliffExport
from exe.export.epub3export import Epub3Export
from exe.export.textexport import TextExport
from exe.export.epub3subexport import Epub3SubExport
LOG = logging.getLogger(__name__)
ENCODING = sys.stdout.encoding or "UTF-8"
class CmdlineExporter(object):
extensions = {'xml': '.xml',
'scorm12': '.zip',
'scorm2004': '.zip',
'agrega': '.zip',
'ims': '.zip',
'website': '',
'webzip': '.zip',
'singlepage': '',
'xliff': '.xlf',
'epub3': '.epub',
'report': '.csv',
'text': '.txt'
}
de
|
f __init__(self, config, options):
self.config = config
self.options = options
self.web_dir = Path(self.config.webDir)
self.styles_dir = None
def do_export(self, inputf, outputf):
if hasattr(self, 'export_' + self.options["export"]):
LOG.debug("Exporting to type %s, in: %s, out: %s, overwrite: %s" \
% (self.options["export"], inputf, outputf, str(self.options["overwrite"])))
|
if not outputf:
if self.options["export"] in ('website', 'singlepage'):
outputf = inputf.rsplit(".elp")[0]
else:
outputf = inputf + self.extensions[self.options["export"]]
outputfp = Path(outputf)
if outputfp.exists() and not self.options["overwrite"]:
error = _(u'"%s" already exists.\nPlease try again \
with a different filename') % outputf
raise Exception(error.encode(ENCODING))
else:
if outputfp.exists() and self.options["overwrite"]:
if outputfp.isdir():
for filen in outputfp.walkfiles():
filen.remove()
outputfp.rmdir()
else:
outputfp.remove()
pkg = Package.load(inputf)
LOG.debug("Package %s loaded" % (inputf))
if not pkg:
error = _(u"Invalid input package")
raise Exception(error.encode(ENCODING))
self.styles_dir = self.config.stylesDir / pkg.style
LOG.debug("Styles dir: %s" % (self.styles_dir))
pkg.exportSource = self.options['editable']
getattr(self, 'export_' + self.options["export"])(pkg, outputf)
return outputf
else:
raise Exception(_(u"Export format not implemented")\
.encode(ENCODING))
def export_xml(self, pkg, outputf):
open(outputf, "w").write(encodeObjectToXML(pkg))
def export_scorm12(self, pkg, outputf):
scormExport = ScormExport(self.config, self.styles_dir, outputf,
'scorm1.2')
pkg.scowsinglepage = self.options['single-page']
pkg.scowwebsite = self.options['website']
scormExport.export(pkg)
def export_scorm2004(self, pkg, outputf):
scormExport = ScormExport(self.config, self.styles_dir, outputf,
'scorm2004')
pkg.scowsinglepage = self.options['single-page']
pkg.scowwebsite = self.options['website']
scormExport.export(pkg)
def export_ims(self, pkg, outputf):
imsExport = IMSExport(self.config, self.styles_dir, outputf)
imsExport.export(pkg)
def export_website(self, pkg, outputf):
outputfp = Path(outputf)
outputfp.makedirs()
websiteExport = WebsiteExport(self.config, self.styles_dir, outputf)
websiteExport.export(pkg)
def export_webzip(self, pkg, outputf):
websiteExport = WebsiteExport(self.config, self.styles_dir, outputf)
websiteExport.exportZip(pkg)
def export_singlepage(self, pkg, outputf, print_flag=0):
images_dir = self.web_dir.joinpath('images')
scripts_dir = self.web_dir.joinpath('scripts')
css_dir = self.web_dir.joinpath('css')
templates_dir = self.web_dir.joinpath('templates')
singlePageExport = SinglePageExport(self.styles_dir, outputf, \
images_dir, scripts_dir, css_dir, templates_dir)
singlePageExport.export(pkg, print_flag)
def export_xliff(self, pkg, outputf):
xliff = XliffExport(self.config, outputf, \
source_copied_in_target=self.options["copy-source"], \
wrap_cdata=self.options["wrap-cdata"])
xliff.export(pkg)
def export_epub3(self, pkg, outputf):
epub3Export = Epub3Export(self.config, self.styles_dir, outputf)
epub3Export.export(pkg)
def export_subepub3(self, pkg, outputf):
epub3SubExport = Epub3SubExport(self.config, self.styles_dir, outputf)
epub3SubExport.export(pkg)
def export_report(self, pkg, outputf):
websiteExport = WebsiteExport(self.config, self.styles_dir, outputf, report=True)
websiteExport.export(pkg)
def export_text(self, pkg, outputf):
textExport =TextExport(outputf)
textExport.export(pkg)
textExport.save(outputf)
|
ebruck/pyxis
|
pyxis/Player.py
|
Python
|
gpl-2.0
| 1,159
| 0.014668
|
#!/usr/bin/python
#Pyxis and Original Sipie: Sirius Command Line Player
#Copyright (C) Corey Ling, Eli Criffield
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FI
|
TNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from StreamHandler import StreamHandler
class Player(object):
def __init__(self, opts):
self.streamHandler = StreamHan
|
dler(opts)
def play(self, url, stream):
self.streamHandler.play(url, stream)
def playing(self):
return self.streamHandler.playing();
def close(self):
self.streamHandler.close()
|
sbaechler/simpleshop
|
simpleshop/payment_modules.py
|
Python
|
bsd-3-clause
| 320
| 0.00625
|
from plata.payme
|
nt.modules import cod
from django.shortcuts import redirect
from feincms.content.application.models import app_reverse
class CodPaymentProcessor(cod.PaymentProcessor):
def redirect(self, url_name):
return redirect(app_reverse(url_name,
'
|
simpleshop.urls'))
|
sazzadBuet08/programming-contest
|
hackar_rank/infolytx_mock_hackar_rank/ABigSum.py
|
Python
|
apache-2.0
| 176
| 0
|
#!/
|
bin/python3
def aVeryBigSum(n, ar):
return sum(ar)
n = int(input().strip())
ar = list(map(int, input().strip().split(' ')))
result = aVeryBi
|
gSum(n, ar)
print(result)
|
xulesc/spellchecker
|
impl1.py
|
Python
|
gpl-3.0
| 1,898
| 0.029505
|
## mostly copied from: http://norvig.com/spell-correct.html
import sys, random
import re, collections, time
TXT_FILE='';
BUF_DIR='';
NWORDS=None;
def words(text): return re.findall('[a-z]+', text)
def train(features):
model = collections.defaultdict(lambda: 1)
for f in features:
model[f] += 1
return model
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def edits1(word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]
replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]
inserts = [a + c + b for a, b in splits for c in alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(word):
return set(e2 for e1 in edits1(word) for e2 in edits1(e1) i
|
f e2 in NWORDS)
def known(words): return set(w for w in words if w in NWORDS)
def correct(word):
candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word
|
]
return max(candidates, key=NWORDS.get)
#######################################################################################
if __name__ == '__main__':
TXT_FILE = sys.argv[1]
t0 = time.clock()
o_words = words(file(TXT_FILE).read())
NWORDS = train(o_words)
#print time.clock() - t0, " seconds build time"
#print "dictionary size: %d" %len(NWORDS)
et1 = time.clock() - t0
t_count = 10
rl = o_words[0:t_count] #random.sample(o_words, t_count)
orl = [''.join(random.sample(word, len(word))) for word in o_words]
t1 = time.clock()
r_count = 10
for i in range(0, r_count):
for w1, w2 in zip(rl, orl):
correct(w1); correct(w2)
et2 = (time.clock() - t1)/t_count/r_count/2
print '%d\t%f\t%f' %(len(NWORDS), et1, et2)
#######################################################################################
print 'Done'
|
rtucker/sycamore
|
Sycamore/macro/allusers.py
|
Python
|
gpl-2.0
| 12,303
| 0.003658
|
# -*- coding: utf-8 -*-
import time
import re
from cStringIO import StringIO
from Sycamore import wikiutil
from Sycamore import config
from Sycamore import wikidb
from Sycamore import user
from Sycamore.Page import Page
def execute(macro, args, formatter=None):
if not formatter:
formatter = macro.formatter
request = macro.request
if args:
# personalized stats
htmltext = []
theuser = user.User(macro.request, name=args.lower())
wiki_info = theuser.getWikiInfo()
if not wiki_info.first_edit_date:
first_edit_date = "<em>unknown</em>"
else:
first_edit_date = request.user.getFormattedDateTime(
wiki_info.first_edit_date)
created_count = wiki_info.created_count
edit_count = wiki_info.edit_count
file_count = wiki_info.file_count
last_page_edited = wiki_info.last_page_edited
last_edit_date = wiki_info.last_edit_date
if not last_edit_date:
last_edit_date = "<em>unknown</em>"
else:
last_edit_date = request.user.getFormattedDateTime(last_edit_date)
if last_page_edited:
htmltext.append(
'<p><h2>%s\'s Statistics</h2></p>'
'<table width=100%% border=0><tr>'
'<td><b>Edits </b></td>'
'<td><b>Pages Created </b></td>'
'<td><b>Files Contributed </b></td>'
'<td><b>First Edit Date </b></td>'
'<td><b>Last Edit </b></td>'
'<td><b>Last Page Edited </b></td></tr>' % args)
htmltext.append('<tr>'
'<td>%s</td><td>%s</td><td>%s</td><td>%s</td>'
'<td>%s</td><td>%s</td>'
'</tr></table>' %
(edit_count, created_count, file_count,
first_edit_date, last_edit_date,
Page(last_page_edited, request).link_to()))
elif edit_count or wiki_info.first_edit_date:
htmltext.append('<p><h2>%s\'s Statistics</h2></p>'
'<table width=100%% border=0><tr>'
'<td><b>Edits </b></td>'
'<td><b>Pages Created </b></td>'
'<td><b>Files Contributed </b></td>'
'<td><b>First Edit Date </b></td>'
'<td><b>Last Edit </b></td>'
'<td><b>Last Page Edited </b></td>'
'</tr>' % args)
htmltext.append('<tr>'
'<td>%s</td><td>%s</td><td>%s</td><td>%s</td>'
'<td>%s</td><td> </td>'
'</tr></table>' %
(edit_count, created_count, file_count,
first_edit_date, last_edit_date))
else:
htmltext.append('<p>' + macro.formatter.highlight(1) +
'The user "%s" has not edited this wiki.' % args +
macro.formatter.highlight(0) + '</p>')
else:
htmltext = []
sort_by = 'edit_count'
if macro.request.form.has_key('sort_by'):
sort_by = macro.request.form['sort_by'][0]
# this is to prevent SQL exploits
if sort_by not in ['edit_count', 'created_count',
'first_edit_date', 'file_count',
'last_edit_date']:
sort_by = 'edit_count'
list = []
cursor = macro.request.cursor
if sort_by == 'first_edit_date':
cursor.execute(
"""SELECT users.propercased_name, userWikiInfo.first_edit_date,
userWikiInfo.created_count, userWikiInfo.edit_count,
userWikiInfo.file_count,
userWikiInfo.last_page_edited,
userWikiInfo.last_edit_date,
userWikiInfo.first_edit_date IS NULL AS join_isnull
FROM userWikiInfo, users
WHERE users.name !='' and userWikiInfo.edit_count >= 0 and
users.name=userWikiInfo.user_name and
userWikiInfo.wiki_id=%%(wiki_id)s
ORDER BY join_isnull ASC, %s DESC""" % sort_by,
{'wiki_id':macro.request.config.wiki_id})
elif sort_by == 'last_edit_date':
cursor.execute(
"""SELECT users.propercased_name, userWikiInfo.first_edit_date,
userWikiInfo.created_count, userWikiInfo.edit_count,
userWikiInfo.file_count,
userWikiInfo.last_page_edited,
userWikiInfo.last_edit_date,
userWikiInfo.last_edit_date IS NULL AS edit_isnull
FROM users, userWikiInfo
WHERE users.name !='' and userWikiInfo.edit_count >= 0 and
users.name=userWikiInfo.user_name and
userWikiInfo.wiki_id=%%(wiki_id)s
ORDER BY edit_isnull ASC, %s DESC""" % sort_by,
{'wiki_id':macro.request.config.wiki_id})
else:
cursor.execute(
"""SELECT users.propercased_name, userWikiInfo.first_edit_date,
userWikiInfo.created_count, userWikiInfo.edit_count,
userWikiInfo.file_count,
userWikiInfo.last_page_edited,
userWikiInfo.last_edit_date
FROM users, userWikiInfo
WHERE users.name !='' and userWikiInfo.edit_count >= 0 and
users.name=userWikiInfo.user_name and
userWikiInfo.wiki_id=%%(wiki_id)s
ORDER BY %s DESC""" % sort_by,
{'wiki_id':macro.request.config.wiki_id})
user_stats = cursor.fetchall()
page = Page("User Statistics", request)
htmltext.append('<p><h2>All User Statistics</h2></p>'
'<table width=100%% border=0><tr>'
'<td><b>User</b></td><td><b>%s </b></td>'
'<td><b>%s </b></td>'
'<td><b>%s </b></td>'
'<td><b>%s </b></td>'
'<td><b>%s </b></td>'
'<td><b>Last Page Edited </b></td>'
'</tr>' %
(page.link_to(know_status=True,
know_status_exists=True,
querystr="sort_by=edit_count",
text="Edits"),
page.link_to(know_status=True,
know_status_exists=True,
querystr="sort_by=created_count",
text="Pages Created"),
|
page.link_to(know_status=True,
know_status_exists=True,
|
querystr="sort_by=file_count",
text="Files Contributed"),
page.link_to(know_status=True,
know_status_exists=True,
querystr="sort_by=first_edit_date",
text="First Edit Date"),
page.link_to(know_status=True,
know_status_exists=True,
querystr="sort_by=last_edit_date",
text="Last Edit")))
toggle = -1
for result in user_stats:
toggle = toggle*(-1)
name = result[0]
first_edit
|
jmbeuken/abinit
|
tests/pymods/memprof.py
|
Python
|
gpl-3.0
| 12,121
| 0.00693
|
from __future__ import print_function, division, unicode_literals
from pprint import pprint
from itertools import groupby
from functools import wraps
from collections import namedtuple, deque
# OrderedDict was added in 2.7. ibm6 still uses python2.6
try:
from collections import OrderedDict
except ImportError:
from .ordereddict import OrderedDict
def group_entries_bylocus(entries):
d = {}
for e in entries:
if e.locus not in d:
d[e.locus] = [e]
else:
d[e.locus].append(e)
return d
class Entry(namedtuple("Entry", "vname, ptr, action, size, file, func, line, tot_memory, sidx")):
@classmethod
def from_line(cls, line, sidx):
args = line.split()
args.append(sidx)
return cls(*args)
def __new__(cls, *args):
"""Extends the base class adding type conversion of arguments."""
# write(logunt,'(a,t60,a,1x,2(i0,1x),2(a,1x),2(i0,1x))')&
# trim(vname), trim(act), addr, isize, trim(basename(file)), trim(func), line, memtot_abi%memory
return super(cls, Entry).__new__(cls,
vname=args[0],
action=args[1],
ptr=int(args[2]),
size=int(args[3]),
file=args[4],
func=args[5],
line=int(args[6]),
tot_memory=int(args[7]),
sidx=args[8],
)
def __repr__(self):
return self.as_repr(with_addr=True)
def as_repr(self, with_addr=True):
if with_addr:
return "<var=%s, %s@%s:%s:%s, addr=%s, size=%d, idx=%d>" % (
self.vname, self.action, self.file, self.func, self.line, hex(self.ptr), self.size, self.sidx)
else:
return "<var=%s, %s@%s:%s:%s, size=%d, idx=%d>" % (
self.vname, self.action, self.file, self.func, self.line, self.size, self.sidx)
@property
def basename(self):
return self.vname.split("%")[-1]
@property
def isalloc(self):
"""True if entry represents an allocation."""
return self.action == "A"
@property
def isfree(self):
"""True if entry represents a deallocation."""
return self.action == "D"
@property
def iszerosized(self):
"""True if this is a zero-sized alloc/free."""
return self.size == 0
@property
def locus(self):
"""This is almost unique"""
return self.func + "@" + self.file
def frees_onheap(self, other):
if (not self.isfree) or other.isalloc: return False
if self.size + other.size != 0: return False
return True
def frees_onstack(self, other):
if (not self.isfree) or other.isalloc: return False
if self.size + other.size != 0: return False
if self.locus != other.locus: return False
return True
class Heap(dict):
def show(self):
print("=== HEAP OF LEN %s ===" % len(self))
if not self: return
# for p, elist in self.items():
pprint(self, indent=4)
print("")
def pop_alloc(self, entry):
if not entry.isfree: return 0
elist = self.get[entry.ptr]
if elist is None: return 0
for i, olde in elist:
if entry.size + olde.size != 0:
elist.pop(i)
return 1
return 0
class Stack(dict):
def show(self):
print("=== STACK OF LEN %s ===)" % len(self))
if not self: return
pprint(self)
print("")
def catchall(method):
@wraps(method)
def wrapper(*args, **kwargs):
self = args[0]
try:
return method(*args, **kwargs)
except Exception as exc:
# Add info on file and re-raise.
msg = "Exception while parsing file: %s\n" % self.path
raise exc.__class__(msg + str(exc))
return wrapper
class AbimemParser(object):
def __init__(self, path):
self.path = path
#def __str__(self):
# lines = []
# app = lines.append
# return "\n".join(lines)
@catchall
def summarize(self):
with open(self.path, "rt") as fh:
l = fh.read()
print(l)
@catchall
def find_small_allocs(self, nbytes=160):
"""Zero sized allocations are not counted."""
smalles = []
with open(self.path, "rt") as fh:
for lineno, line in enumerate(fh):
if lineno == 0: continue
e = Entry.from_line(line, lineno)
if not e.isalloc: continue
if 0 < e.size <= nbytes: smalles.append(e)
pprint(smalles)
return smalles
@catchall
def find_intensive(self, threshold=2000):
d = {}
with open(self.path, "rt") as fh:
for lineno, line in enumerate(fh):
if lineno == 0: continue
e = Entry.from_line(line, lineno)
loc = e.locus
if loc not in d:
d[loc] = [e]
else:
d[loc].append(e)
# Remove entries below the threshold and perform DSU sort
dsu_list = [(elist, len(elist)) for _, elist in d.items() if len(elist) >= threshold]
intensive = [t[0] for t in sorted(dsu_list, key=lambda x: x[1], reverse=True)]
for elist in intensive:
loc = elist[0].locus
# assert all(e.locus == loc for e in elist)
print("[%s] has %s allocations/frees" % (loc, len(elist)))
return intensive
#def show_peaks(self):
@catchall
def find_zerosized(self):
elist = []
eapp = elist.append
for e in self.yield_all_entries():
if e.size == 0: eapp(e)
if elist:
print("Found %d zero-sized entries:" % len(elist))
pprint(elist)
else:
print("No zero-sized found")
return elist
@catchall
def find_weird_ptrs(self):
elist = []
eapp = elist.append
for e in self.yield_all_entries():
if e.ptr <= 0: eapp(e)
if elist:
print("Found %d weird entries:" % len(elist))
pprint(elist)
else:
print("No weird entries found")
return elist
def yield_all_entries(self):
with open(self.path, "rt") as fh:
for lineno, line in enumerate(fh):
if lineno == 0: continue
yield Entry.from_line(line, lineno)
@catchall
def find_peaks(self, maxlen=20):
# the deque is bounded to the specified maximum length. Once a bounded length deque is full,
# when new items are added, a corresponding number of items are discarded from the opposite end.
peaks = deque(maxlen=maxlen)
for e in self.yield_all_entries():
size = e.size
if size == 0 or not e.isalloc: continue
if len(peaks) == 0:
peaks.ap
|
pend(e); continue
# TODO: Should remove redondant entries.
if size > peaks[0].size:
peaks.append(e)
peaks = deque(sorted(peaks, key=lambda x: x.size), maxlen=maxlen)
peaks = deque(sorted(peaks, key=lambda x: x.size, reverse=True), maxlen=maxlen)
for peak in peaks:
print(peak)
return peaks
@catcha
|
ll
def plot_memory_usage(self, show=True):
memory = [e.tot_memory for e in self.yield_all_entries()]
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(memory)
if show: plt.show()
return fig
#def get_dataframe(self):
# import pandas as pd
# frame = pd.DataFrame()
# return frame
@catchall
def find_memleaks(self):
heap, stack = Heap(), Stack()
reallocs = []
with open(self.path, "rt") as fh:
for lineno, line in enumerate(fh):
if lineno == 0: continue
newe = Entry.from_line(line, lineno)
p = newe.ptr
if newe.size == 0: continue
# Store new entry in list if the ptr is not in d
# else we check if there
|
Code4Maine/suum
|
suum/apps/property/management/commands/import_property_csv.py
|
Python
|
bsd-3-clause
| 2,559
| 0.010942
|
from datetime import datetime
from csv import DictReader
from django.core.management.base import BaseCommand, CommandError
from property.models import Property, Owner, MailingAddress, Assessment, Building, Sale
class Command(BaseCommand):
help = 'Imports property from CSV file'
def add_arguments(self, parser):
parser.add_argument('path')
parser.add_argument('--assessment-date',
dest='adate',
default=False,
help='Assessment date for the document (yyyy-mm-dd format)')
def convert_to_float(self, string):
try:
value = float(string.strip(' ').replace('$', '').replace(',',''))
except ValueError:
value = None
return value
def handle(self, *args, **options):
file_path = options['path']
adate = datetime.strptime(options['adate'], '%Y-%m-%d')
f = open(file_path)
items = DictReader(f)
for d in items:
print('Checking for existing property with ID #{0} at {1} {2}'.format(d['Account Number'],
d['Street Number'],
d['Street Name']))
(p, created) = Property.objects.get_or_create(account_number=d['Account Number'])
if created:
print('Created new property')
else:
print('Updating existing property')
p.account_number=d['Account Number']
p.street_number=d['Street Number']
p.street=d['Street Name']
p.city='Bangor'
p.state='Maine'
p.map_lot=d['Map/Lot']
p.book_page_1=d['Book & Page']
p.save()
|
a,created = Assessment.objects.get_or_create(assoc_property=p, date=adate)
if created:
print('Adding assessment for {0}'.format(adate.year))
a.land=self.convert_to_float(d['Land Value'])
a.bu
|
ilding=self.convert_to_float(d['Building Value'])
a.exemption=self.convert_to_float(d['Exemption'])
a.tax_amount=self.convert_to_float(d['Tax Amount'])
a.date=adate
a.save()
o, created = Owner.objects.get_or_create(name=d["Owner's Name"])
try:
o.name_2=d["Owner's Name Part 2"]
o.save()
except:
pass
p.owners.add(o)
p.save()
|
hdweiss/qt-creator-visualizer
|
tests/system/shared/workarounds.py
|
Python
|
lgpl-2.1
| 9,260
| 0.005508
|
import urllib2
import re
JIRA_URL='https://bugreports.qt-project.org/browse'
class JIRA:
__instance__ = None
# Helper class
class Bug:
CREATOR = 'QTCREATORBUG'
SIMULATOR = 'QTSIM'
SDK = 'QTSDK'
QT = 'QTBUG'
QT_QUICKCOMPONENTS = 'QTCOMPONENTS'
# constructor of JIRA
def __init__(self, number, bugType=Bug.CREATOR):
if JIRA.__instance__ == None:
JIRA.__instance__ = JIRA.__impl(number, bugType)
JIRA.__dict__['_JIRA__instance__'] = JIRA.__instance__
else:
JIRA.__instance__._bugType = bugType
JIRA.__instance__._number = number
JIRA.__instance__.__fetchStatusAndResolutionFromJira__()
# overriden to make it possible to use JIRA just like the
# underlying implementation (__impl)
def __getattr__(self, attr):
return getattr(self.__instance__, attr)
# overriden to make it possible to use JIRA just like the
# underlying implementation (__impl)
def __setattr__(self, attr, value):
return setattr(self.__instance__, attr, value)
# function to get an instance of the singleton
@staticmethod
def getInstance():
if '_JIRA__instance__' in JIRA.__dict__:
return JIRA.__instance__
else:
return JIRA.__impl(0, Bug.CREATOR)
# function to check if the given bug is open or not
@staticmethod
def isBugStillOpen(number, bugType=Bug.CREATOR):
tmpJIRA = JIRA(number, bugType)
return tmpJIRA.isOpen()
# function similar to performWorkaroundForBug - but it will execute the
# workaround (function) only if the bug is still open
# returns True if the workaround function has been executed, False otherwise
@staticmethod
def performWorkaroundIfStillOpen(number, bugType=Bug.CREATOR, *args):
if JIRA.isBugStillOpen(number, bugType):
return JIRA.performWorkaroundForBug(number, bugType, *args)
else:
test.warning("Bug is closed... skipping workaround!",
"You should remove potential code inside performWorkaroundForBug()")
return False
# function that performs the workaround (function) for the given bug
# if the function needs additional arguments pass them as 3rd parameter
@staticmethod
def performWorkaroundForBug(number, bugType=Bug.CREATOR, *args):
functionToCall = JIRA.getInstance().__bugs__.get("%s-%d" % (bugType, number), None)
if functionToCall:
test.warning("Using workaround for %s-%d" % (bugType, number))
functionToCall(*args)
return True
else:
JIRA.getInstance()._exitFatal_(bugType, number)
return False
# implementation of JIRA singleton
class __impl:
# constructor of __impl
def __init__(self, number, bugType):
self._number = number
self._bugTy
|
pe = bugType
self._localOnly = os.getenv("SYSTEST_JIRA_NO_LOOKUP")=="1"
self.__initBugDict__()
self.__fetchStatusAndResolutionFromJira__()
|
# function to retrieve the status of the current bug
def getStatus(self):
return self._status
# function to retrieve the resolution of the current bug
def getResolution(self):
return self._resolution
# this function checks the resolution of the given bug
# and returns True if the bug can still be assumed as 'Open' and False otherwise
def isOpen(self):
# handle special cases
if self._resolution == None:
return True
if self._resolution in ('Duplicate', 'Moved', 'Incomplete', 'Cannot Reproduce', 'Invalid'):
test.warning("Resolution of bug is '%s' - assuming 'Open' for now." % self._resolution,
"Please check the bugreport manually and update this test.")
return True
return self._resolution != 'Done'
# this function tries to fetch the status and resolution from JIRA for the given bug
# if this isn't possible or the lookup is disabled it does only check the internal
# dict whether a function for the given bug is deposited or not
def __fetchStatusAndResolutionFromJira__(self):
global JIRA_URL
data = None
if not self._localOnly:
try:
bugReport = urllib2.urlopen('%s/%s-%d' % (JIRA_URL, self._bugType, self._number))
data = bugReport.read()
except:
data = self.__tryExternalTools__()
if data == None:
test.warning("Sorry, ssl module missing - cannot fetch data via HTTPS",
"Try to install the ssl module by yourself, or set the python "
"path inside SQUISHDIR/etc/paths.ini to use a python version with "
"ssl support OR install wget or curl to get rid of this warning!")
self._localOnly = True
if data == None:
if '%s-%d' % (self._bugType, self._number) in self.__bugs__:
test.warning("Using internal dict - bug status could have changed already",
"Please check manually!")
self._status = None
self._resolution = None
return
else:
test.fatal("No workaround function deposited for %s-%d" % (self._bugType, self._number))
self._resolution = 'Done'
return
else:
data = data.replace("\r", "").replace("\n", "")
resPattern = re.compile('<span\s+id="resolution-val".*?>(?P<resolution>.*?)</span>')
statPattern = re.compile('<span\s+id="status-val".*?>(.*?<img.*?>)?(?P<status>.*?)</span>')
status = statPattern.search(data)
resolution = resPattern.search(data)
if status:
self._status = status.group("status").strip()
else:
test.fatal("FATAL: Cannot get status of bugreport %s-%d" % (self._bugType, self._number),
"Looks like JIRA has changed.... Please verify!")
self._status = None
if resolution:
self._resolution = resolution.group("resolution").strip()
else:
test.fatal("FATAL: Cannot get resolution of bugreport %s-%d" % (self._bugType, self._number),
"Looks like JIRA has changed.... Please verify!")
self._resolution = None
# simple helper function - used as fallback if python has no ssl support
# tries to find curl or wget in PATH and fetches data with it instead of
# using urllib2
def __tryExternalTools__(self):
global JIRA_URL
cmdAndArgs = { 'curl':'-k', 'wget':'-qO-' }
for call in cmdAndArgs:
prog = which(call)
if prog:
return getOutputFromCmdline('"%s" %s %s/%s-%d' % (prog, cmdAndArgs[call], JIRA_URL, self._bugType, self._number))
return None
# this function initializes the bug dict for localOnly usage and
# for later lookup which function to call for which bug
# ALWAYS update this dict when adding a new function for a workaround!
def __initBugDict__(self):
self.__bugs__= {
'QTCREATORBUG-6853':self._workaroundCreator6853_,
'QTCREATORBUG-6918':self._workaroundCreator_MacEditorFocus_,
'QTCREATORBUG-6953':self._workaroundCreator_MacEditorFocus_,
'QTCREATORBUG-6994':self._workaroundCreator6994_,
'QTCREATORBUG-7002':self._workaroundCreator7002_
}
# helper function - will be called if no workaround for the request
|
geary/claslite
|
web/app/lib/simplejson/tests/test_decimal.py
|
Python
|
unlicense
| 1,752
| 0.002854
|
from decimal import Decimal
from unittest import TestCase
from StringIO import StringIO
import simplejson as json
class TestDecimal(TestCase):
NUMS = "1.0", "10.00", "1.1", "1234567890.1234567890", "500"
def dumps(self, obj, **kw):
sio = StringIO()
json.dump(obj, sio, **kw)
res = json.dumps(obj, **kw)
self.assertEquals(res, sio.getvalue())
return res
def loads(self, s, **kw):
sio = StringIO(s)
res = json.loads(s, **kw)
self.assertEquals(res, json.load(sio, **kw))
return res
def test_decimal_encode(self):
for d i
|
n map(Decimal, self.NUMS):
self.assertEquals(self.dumps(d, use_decimal=True), str(d))
def test_decimal_decode(self):
|
for s in self.NUMS:
self.assertEquals(self.loads(s, parse_float=Decimal), Decimal(s))
def test_decimal_roundtrip(self):
for d in map(Decimal, self.NUMS):
# The type might not be the same (int and Decimal) but they
# should still compare equal.
self.assertEquals(
self.loads(
self.dumps(d, use_decimal=True), parse_float=Decimal),
d)
self.assertEquals(
self.loads(
self.dumps([d], use_decimal=True), parse_float=Decimal),
[d])
def test_decimal_defaults(self):
d = Decimal(1)
sio = StringIO()
# use_decimal=False is the default
self.assertRaises(TypeError, json.dumps, d, use_decimal=False)
self.assertRaises(TypeError, json.dumps, d)
self.assertRaises(TypeError, json.dump, d, sio, use_decimal=False)
self.assertRaises(TypeError, json.dump, d, sio)
|
tylerclair/py3canvas
|
py3canvas/apis/originality_reports.py
|
Python
|
mit
| 24,351
| 0.001766
|
"""OriginalityReports API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class OriginalityReportsAPI(BaseCanvasAPI):
"""OriginalityReports API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for OriginalityReportsAPI."""
super(OriginalityReportsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.OriginalityReportsAPI")
def create_originality_report(
self,
assignment_id,
originality_report_originality_score,
submission_id,
originality_report_attempt=None,
originality_report_error_message=None,
originality_report_file_id=None,
originality_report_originality_report_file_id=None,
originality_report_originality_report_url=None,
originality_report_tool_setting_resource_type_code=None,
originality_report_tool_setting_resource_url=None,
originality_report_workflow_state=None,
):
"""
Create an Originality Report.
Create a new OriginalityReport for the specified file
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - assignment_id
"""
ID
"""
path["assignment_id"] = assignment_id
# REQUIRED - PATH - submission_id
"""
ID
"""
path["submission_id"] = submission_id
# OPTIONAL - originality_report[file_id]
"""
The id of the file being given an originality score. Required
if creating a report associated with a file.
"""
if originality_report_file_id is not None:
data["originality_report[file_id]"] = originality_report_file_id
# REQUIRED - originality_report[originality_score]
"""
A number between 0 and 100 representing the measure of the
specified file's originality.
"""
data[
"originality_report[originality_score]"
] = originality_report_originality_score
# OPTIONAL - originality_report[originality_report_url]
"""
The URL where the originality report for the specified
file may be found.
"""
if originality_report_originality_report_url is not None:
data[
"originality_report[originality_report_url]"
] = originality_report_originality_report_url
# OPTIONAL - originality_report[originality_report_file_id]
"""
The ID of the file within Canvas that contains the originality
report for the submitted file provided in the request URL.
"""
if originality_report_originality_report_file_id is not None:
data[
"originality_report[originality_report_file_id]"
] = originality_report_originality_report_file_id
# OPTIONAL - originality_report[tool_setting][resource_type_code]
"""
The resource type code of the resource handler Canvas should use for the
LTI launch for viewing originality reports. If set Canvas will launch
to the message with type 'basic-lti-launch-request' in the specified
resource handler rather than using the originality_report_url.
"""
if originality_report_tool_setting_resource_type_code is not None:
data[
"originality_report[tool_setting][resource_type_code]"
] = originality_report_tool_setting_resource_type_code
# OPTIONAL - originality_report[tool_setting][resource_url]
"""
The URL Canvas should launch to when showing an LTI originality report.
Note that this value is inferred from the specified resource handler's
message "path" value (See `resource_type_code`) unless
it is specified. If this parameter is used a `resource_type_code`
must also be specified.
"""
if originality_report_tool_setting_resource_url is not None:
data[
"originality_report[tool_setting][resource_url]"
] = originality_report_tool_setting_resource_url
# OPTIONAL - originality_report[workflow_state]
"""
May be set to "pending", "error", or "scored". If an originality score
is provided a workflow state of "scored" will be inferred.
"""
if originality_report_workflow_state is not None:
data[
"originality_report[workflow_state]"
] = originality_report_workflow_state
# OPTIONAL - originality_report[error_message]
"""
A message describing the error. If set, the "workflow_state"
will be set to "error."
"""
if originality_report_error_message is not None:
data["originality_report[error_message]"] = originality_report_error_message
# OPTIONAL - originality_report[attempt]
"""
If no `file_id` is given, and no file is required for the assignment
(that is, the assignment allows an online text entry), this parameter
may be given to clarify which attempt number the report is for (in the
case of resubmissions). If this field is omitted and no `file_id` is
given, the report will be created (or updated, if it exists) for the
first submission attempt with no associated file.
"""
if originality_report_attempt is not None:
data["originality_report[attempt]"] = originality_report_attempt
self.logger.debug(
"POST /api/lti/assignments/{assignment_id}/submissions/{submission_id}/originality_report with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.gen
|
eric_request(
"POST",
"/api/lti/assignments/{assignment_id}/submissions/{submission_id}/originality_report".format(
**path
),
data=data,
params=params,
single_item=True,
)
def edit_originality_report_submissions(
self,
assignment_id,
|
id,
submission_id,
originality_report_error_message=None,
originality_report_originality_report_file_id=None,
originality_report_originality_report_url=None,
originality_report_originality_score=None,
originality_report_tool_setting_resource_type_code=None,
originality_report_tool_setting_resource_url=None,
originality_report_workflow_state=None,
):
"""
Edit an Originality Report.
Modify an existing originality report. An alternative to this endpoint is
to POST the same parameters listed below to the CREATE endpoint.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - assignment_id
"""
ID
"""
path["assignment_id"] = assignment_id
# REQUIRED - PATH - submission_id
"""
ID
"""
path["submission_id"] = submission_id
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
# OPTIONAL - originality_report[originality_score]
"""
A number between 0 and 100 representing the measure of the
specified file's originality.
"""
if originality_report_originality_score is not None:
data[
"originality_report[originality_score]"
] = originality_report_originality_score
# OPTIONAL - originality_report[originality_report_url]
"""
The URL where the originality report for the specified
file may be found.
"""
if originality_report_originality_report_url is not None:
data[
"originality_report[originality_report_url]"
] = originality_report_originality_report_ur
|
maschwanden/boxsimu
|
boxsimu/builtins.py
|
Python
|
mit
| 834
| 0.002398
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 23 2017 at 14:20
@author: Mathias Aschwanden (mathias.aschwanden@gmail.com)
Makes various Variables available to the user.
IMPORTANT: All data has been included without warranty, express or implied.
References:
Molar Masses : From Wikipedia.org
"
|
""
from . import ur
from . import entities as bs_entities
# VARIABLES
carbon = bs_entities.Variable('C', molar_mass=)
carbon_dioxide = bs_entites.Variable('CO2', molar_mass=)
methane = bs_entites.Variable('CH4', molar_
|
mass=)
phosphate = bs_entities.Variable('PO4', molar_mass=94.9714*ur.gram/ur.mole)
phosphorus = bs_entities.Variable('P', molar_mass=)
nitrate = bs_entities.Variable('NO3', molar_mass=62.00*ur.gram/ur.mole)
nitrogen = bs_entities.Variable('P', molar_mass=)
# PROCESSES
# REACTIONS
# BOXES
# SYSTEMS
|
jgmize/nucleus
|
nucleus/settings/base.py
|
Python
|
bsd-3-clause
| 4,534
| 0.000221
|
# This is your project's main settings file that can be committed to your
# repo. If you need to override a setting locally, use local.py
import dj_database_url
from funfactory.settings_base import *
# Django Settings
##############################################################################
# Note: be sure not to put any spaces in the env var
ADMINS = [('admin', email) for email in
os.environ.get('ADMIN_EMAILS', '').split(',')]
EMAIL_HOST = os.environ.get('EMAIL_HOST', 'localhost')
SERVER_EMAIL= os.environ.get('SERVER_EMAIL', 'root@localhost')
ROOT_URLCONF = 'nucleus.urls'
# Whether the app should run in debug-mode.
DEBUG = os.environ.get('DJANGO_DEBUG', False)
# Configure database from DATABASE_URL environment variable.
DATABASES = {'default': dj_database_url.config()}
# Pull secret keys from environment.
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '')
HMAC_KEYS = {'hmac_key': os.environ.get('DJANGO_HMAC_KEY', '')}
INSTALLED_APPS = [
# Nucleus and API apps.
'nucleus.base',
'rna',
# Django contrib apps.
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
# Third-party apps, patches, fixes.
'south', # Must come before django_nose.
'commonware.response.cookies',
'django_browserid',
'django_extensions',
'django_nose',
'funfactory',
'pagedown',
'rest_framework',
'rest_framework.authtoken',
'session_csrf',
]
AUTHENTICATION_BACKENDS = [
'django_browserid.auth.BrowserIDBackend',
'django.contrib.auth.backends.ModelBackend',
]
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.request',
'session_csrf.context_processor',
'django.contrib.messages.context_processors.messages',
'funfactory.context_processors.globals',
'django_browserid.context_processors.browserid',
)
MIDDLEWARE_CLASSES = (
'sslify.middleware.SSLifyMiddleware',
'multidb.middleware.PinningRouterMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'session_csrf.CsrfMiddleware', # Must be after auth middleware.
'django.contrib.messages.middleware.MessageMiddleware',
'commonware.middleware.FrameOptionsHeader',
)
LOGGING = {
'loggers': {
'playdoh': {
'level': logging.DEBUG
}
}
}
USE_TZ = True
# Needed for request.is_secure to work with stackato.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Third-party Libary Settings
##############################################################################
# Testing configuration.
NOSE_ARGS = ['--logging-clear-handlers', '--logging-filter=-south']
# Template paths that contain non-Jinja templates.
JINGO_EXCLUDE_APPS = (
'admin',
'registration',
'rest_framework',
'rna',
'browserid',
)
# Always generate a CSRF token for anonymous users.
ANON_ALWAYS = True
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rna.
|
serializers.HyperlinkedModelSerializerWithPkField',
# Use Django's standard `django.contrib.auth` permissions,
# or a
|
llow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_FILTER_BACKENDS': ('rna.filters.TimestampedFilterBackend',)
}
# django-browserid -- no spaces allowed in stackato env vars
BROWSERID_AUDIENCES = os.environ.get('BROWSERID_AUDIENCES',
'http://localhost:8000').split(',')
# Nucleus-specific Settings
##############################################################################
# Should robots.txt deny everything or disallow a calculated list of URLs we
# don't want to be crawled? Default is false, disallow everything.
ENGAGE_ROBOTS = False
# RNA (Release Notes) Configuration
RNA = {
'BASE_URL': os.environ.get(
'RNA_BASE_URL', 'https://nucleus.mozilla.org/rna/'),
'LEGACY_API': os.environ.get('RNA_LEGACY_API', False)
}
|
geraldspreer/the-maker
|
makerTemplateViewBuilder.py
|
Python
|
gpl-3.0
| 7,622
| 0.009315
|
from makerUtilities import writeFile
from makerUtilities import readFile
import os
def scaffold(systemDir, defaultTheme):
return (
"""<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<script src='file://"""
+ os.path.join(systemDir, "jquery.min.js")
+ """'></script>
<style type="text/css">
html {
background: -webkit-gradient(linear, left top, left bottom, from(#000), to(rgb(93,94,120)));
background-attachment:fixed;
}
body {
font-family: "Helvetica Neue";
font-size: 14px;
width:auto;
/* max-width:694px; */
color:#fff;
padding:20px 20px;
-webkit-transform: perspective( 600px );
}
a {
color: #ddd;
}
.thumbnail a {
text-decoration:none;
color:#000;
cursor:default;
}
p {
font-weight:lighter;
color:#fff;
letter-spacing:0.09em;
float:left;
font-size:0.9em;
line-height:1.45em;
text-align:left;
margin:-6px 0px 24px 10px;
}
h5 {
font-weight:lighter;
letter-spacing:0.050em;
margin:-28px 0px 0px 8px;
line-height:3em;
font-size:22px;
cursor:default;
}
img {
border:1px solid #333;
width:100%;
height:1
|
00%;
-webkit-box-reflect: below 0px -webkit-gradient(linear, left top, left bottom, from(transparent), color-stop(50%, transparent), to(rgba(0,0,0,0.2)));
-webkit-transform: perspective( 600px ) rotateY( 0deg);
margin-bottom:40px;
}
.row {
width:100%;
margin:0px 0px 40px 10px;
float:left;
clear:both;
|
}
.thumbnail {
width:17%;
padding:20px 20px 10px 20px;
margin:0px 20px 0px 0px;
float:left;
clear:right;
background:none;
}
.thumbnail img {
height:100px;
}
.thumbnail p {
text-align:center;
margin:-24px 0px 0px 0px;
width:100%;
font-size:14px;
cursor:default;
}
.thumbnail.selected {
border:1px solid #777;
padding:20px 20px 10px 20px;
-webkit-border-radius:10px;
background: -webkit-gradient(linear, left top, left bottom, from(rgba(140,140,140,0.1)), to(rgba(170,170,170,0.2)));
}
.info {
width:92%;
float:left;
clear:both;
display:none;
margin:40px 10px 0px 10px;
}
.info p {
float:left;
clear:right;
cursor:default;
}
.info img {
width:280px;
height:auto;
float:left;
clear:right;
margin:0px 48px 0px 8px;
-webkit-transform: perspective( 600px ) rotateY( 10deg );
/*
-webkit-transition: width, 0.5s;
*/
}
/*
.info img:hover {
width:320px;
-webkit-transform: perspective( 600px ) rotateY( 0deg );
}
*/
.info h5 {
margin-top:0px;
}
.info h5, p {
width:380px;
float:left;
}
a.button {
cursor:default;
color:#000;
}
a.button:active {
color:#000;
background: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#bbb));
}
</style>
<script type="text/javascript">
$(document).ready(function(){
$('#"""
+ defaultTheme
+ """').addClass('selected');
$('#info-"""
+ defaultTheme
+ """').show();
$('.thumbnail').click(function(){
$('.info').hide();
$('.thumbnail').removeClass('selected')
$(this).addClass('selected');
$($(this).data('info')).show();
});
});
</script>
</head>
<body>
"""
+ createThumbnails(systemDir)
+ createInfo(systemDir)
+ """
</body>
</html>
"""
)
def buildView(systemDir, viewPath):
writeFile(
os.path.join(viewPath, "yourTemplates.html"),
scaffold(systemDir, defaultTemplate()),
)
return os.path.join(viewPath, "yourTemplates.html")
def defaultTemplate():
# ===========================================================================
# This is used to set the default template for the application
# ===========================================================================
return "Simple-Markdown"
def createThumbnails(systemDir):
thumbnails = "<div class='row'>\n"
for template in os.listdir(os.path.join(systemDir, "templates")):
if not template.startswith("."):
thumbnails += makeThumbnail(systemDir, template)
thumbnails += "</div>"
return thumbnails
def createInfo(systemDir):
info = "<div class='row'>\n"
for template in os.listdir(os.path.join(systemDir, "templates")):
if not template.startswith("."):
s = readFile(
os.path.join(systemDir, "templates", template, "parts", "info.json")
)
data = eval(s)
info += makeInfo(systemDir, template, data)
info += "</div>"
return info
def makeInfo(systemDir, templateName, data):
previewImage = os.path.join(
systemDir, "templates", templateName, "parts/preview.jpg"
)
info = (
"""
<div class="info" id="info-"""
+ data["Title"]
+ """">
<img src='"""
+ previewImage
+ """' />
<h5>"""
+ data["Title"]
+ """</h5>
<p>"""
+ data["Description"]
+ """<br /><br />
Credit: """
+ data["Credit"]
+ """<br />
Support: <a href='"""
+ data["Support"]
+ """'>www.makercms.org</a><br />
</p>
</div>
"""
)
return info
def makeThumbnail(systemDir, templateName):
previewImage = os.path.join(
systemDir, "templates", templateName, "parts/preview.jpg"
)
thumbnail = (
"""
<div class='thumbnail' id='"""
+ templateName
+ """' data-info='#info-"""
+ templateName
+ """'>
<a href='--"""
+ templateName
+ """--'>
<img src='"""
+ previewImage
+ """' />
<p>"""
+ templateName
+ """</p></a>
</div>
"""
)
return thumbnail
|
sqlalchemy/alembic
|
tests/test_autogen_composition.py
|
Python
|
mit
| 16,541
| 0
|
import re
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.sql.sqltypes import DateTime
from alembic import autogenerate
from alembic.migration import MigrationContext
from alembic.testing import eq_
from alembic.testing import TestBase
from alembic.testing.suite._autogen_fixtures import _default_include_object
from alembic.testing.suite._autogen_fixtures import AutogenTest
from alembic.testing.suite._autogen_fixtures import ModelOne
class AutogenerateDiffTest(ModelOne, AutogenTest, TestBase):
__only_on__ = "sqlite"
def test_render_nothing(self):
context = MigrationContext.configure(
connection=self.bind.connect(),
opts={
"compare_type": True,
"compare_server_default": True,
"target_metadata": self.m1,
"upgrade_token": "upgrades",
"downgrade_token": "downgrades",
},
)
template_args = {}
autogenerate._render_migration_diffs(context, template_args)
eq_(
re.sub(r"u'", "'", template_args["upgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###""",
)
eq_(
re.sub(r"u'", "'", template_args["downgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###""",
)
def test_render_nothing_batch(self):
context = MigrationContext.configure(
connection=self.bind.connect(),
opts={
"compare_type": True,
"compare_server_default": True,
"target_metadata": se
|
lf.m1,
"upgrade_token": "upgrades",
"downgrade_token": "downgrades",
"alembic_module_prefix": "op.",
"sqlalchemy_module_prefix": "sa.",
"render_as_batch": True,
"include_symbo
|
l": lambda name, schema: False,
},
)
template_args = {}
autogenerate._render_migration_diffs(context, template_args)
eq_(
re.sub(r"u'", "'", template_args["upgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###""",
)
eq_(
re.sub(r"u'", "'", template_args["downgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###""",
)
def test_render_diffs_standard(self):
"""test a full render including indentation"""
template_args = {}
autogenerate._render_migration_diffs(self.context, template_args)
eq_(
re.sub(r"u'", "'", template_args["upgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
op.create_table('item',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=100), nullable=True),
sa.Column('order_id', sa.Integer(), nullable=True),
sa.CheckConstraint('len(description) > 5'),
sa.ForeignKeyConstraint(['order_id'], ['order.order_id'], ),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('extra')
op.add_column('address', sa.Column('street', sa.String(length=50), \
nullable=True))
op.create_unique_constraint('uq_email', 'address', ['email_address'])
op.add_column('order', sa.Column('user_id', sa.Integer(), nullable=True))
op.alter_column('order', 'amount',
existing_type=sa.NUMERIC(precision=8, scale=2),
type_=sa.Numeric(precision=10, scale=2),
nullable=True,
existing_server_default=sa.text('0'))
op.create_foreign_key(None, 'order', 'user', ['user_id'], ['id'])
op.alter_column('user', 'name',
existing_type=sa.VARCHAR(length=50),
nullable=False)
op.alter_column('user', 'a1',
existing_type=sa.TEXT(),
server_default='x',
existing_nullable=True)
op.drop_index('pw_idx', table_name='user')
op.drop_column('user', 'pw')
# ### end Alembic commands ###""",
)
eq_(
re.sub(r"u'", "'", template_args["downgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('pw', sa.VARCHAR(length=50), \
nullable=True))
op.create_index('pw_idx', 'user', ['pw'], unique=False)
op.alter_column('user', 'a1',
existing_type=sa.TEXT(),
server_default=None,
existing_nullable=True)
op.alter_column('user', 'name',
existing_type=sa.VARCHAR(length=50),
nullable=True)
op.drop_constraint(None, 'order', type_='foreignkey')
op.alter_column('order', 'amount',
existing_type=sa.Numeric(precision=10, scale=2),
type_=sa.NUMERIC(precision=8, scale=2),
nullable=False,
existing_server_default=sa.text('0'))
op.drop_column('order', 'user_id')
op.drop_constraint('uq_email', 'address', type_='unique')
op.drop_column('address', 'street')
op.create_table('extra',
sa.Column('x', sa.CHAR(), nullable=True),
sa.Column('uid', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['uid'], ['user.id'], )
)
op.drop_table('item')
# ### end Alembic commands ###""",
)
def test_render_diffs_batch(self):
"""test a full render in batch mode including indentation"""
template_args = {}
self.context.opts["render_as_batch"] = True
autogenerate._render_migration_diffs(self.context, template_args)
eq_(
re.sub(r"u'", "'", template_args["upgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
op.create_table('item',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=100), nullable=True),
sa.Column('order_id', sa.Integer(), nullable=True),
sa.CheckConstraint('len(description) > 5'),
sa.ForeignKeyConstraint(['order_id'], ['order.order_id'], ),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('extra')
with op.batch_alter_table('address', schema=None) as batch_op:
batch_op.add_column(sa.Column('street', sa.String(length=50), nullable=True))
batch_op.create_unique_constraint('uq_email', ['email_address'])
with op.batch_alter_table('order', schema=None) as batch_op:
batch_op.add_column(sa.Column('user_id', sa.Integer(), nullable=True))
batch_op.alter_column('amount',
existing_type=sa.NUMERIC(precision=8, scale=2),
type_=sa.Numeric(precision=10, scale=2),
nullable=True,
existing_server_default=sa.text('0'))
batch_op.create_foreign_key(None, 'user', ['user_id'], ['id'])
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.alter_column('name',
existing_type=sa.VARCHAR(length=50),
nullable=False)
batch_op.alter_column('a1',
existing_type=sa.TEXT(),
server_default='x',
existing_nullable=True)
batch_op.drop_index('pw_idx')
batch_op.drop_column('pw')
# ### end Alembic commands ###""", # noqa,
)
eq_(
re.sub(r"u'", "'", template_args["downgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.add_column(sa.Column('pw', sa.VARCHAR(length=50), nullable=True))
batch_op.create_index('pw_idx', ['pw'], unique=False)
batch_op.alter_column('a1',
existing_type=sa.TEXT(),
server_default=None,
existing_nullable=True)
batch_op.alter_column('name',
existing_type=sa.VARCHAR(length=50),
nu
|
dkm/skylines
|
skylines/lib/base36.py
|
Python
|
agpl-3.0
| 746
| 0
|
"""
base 36 encoding/decoding taken from wikipedia sample code
http://en.wikipedia.org/wiki/Base_36#Python_Conversion_Code
"""
def encode(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
"""Converts an in
|
teger to a base36 string."""
if not isinstance(number, (int, long)):
raise TypeError('number must be an integer')
if number >= 0 and number <= 9:
return alphabet[number]
base36 = ''
sign = ''
if number < 0:
|
sign = '-'
number = -number
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def decode(number):
"""Converts a base36 string to an integer."""
return int(number, 36)
|
smalls12/django_helpcenter
|
helpcenter/admin.py
|
Python
|
mit
| 883
| 0
|
from django.contrib import admin
from helpcenter import models
class ArticleAdmin(admin.ModelAdmin):
""" Admin for the Article model """
date_hierarchy = 'time_published'
fieldsets = (
(None, {
'fields': ('category', 'title', 'body')
}),
('Publishing Options', {
'classes': ('collapse',),
'fields': ('draft', 'time_published')
}))
|
list_display = (
'title', 'category', 'time_published', 'time_edited', 'draft')
search_fields =
|
('title',)
class CategoryAdmin(admin.ModelAdmin):
""" Admin for the Category model """
fieldsets = (
(None, {
'fields': ('parent', 'title')
}),)
list_display = ('title', 'parent')
search_fields = ('title',)
admin.site.register(models.Article, ArticleAdmin)
admin.site.register(models.Category, CategoryAdmin)
|
berndf/avg_q
|
python/avg_q/Presentation.py
|
Python
|
gpl-3.0
| 3,161
| 0.043341
|
# Copyright (C) 2013 Bernd Feige
# This file is part of avg_q and released under the GPL v3 (see avg_q/COPYING).
"""
Presentation utilities.
"""
from . import trgfile
class PresLog(object):
# Basic log file reading.
def __init__(self,logfile,part='events'):
'''part can be 'events' or 'trials' for the first or second part'''
self.logfile=logfile
self.log=open(self.logfile,"r")
fileheader=next(self.log).rstrip('\r\n')
if not fileheader.startswith('Scenario -'):
raise Exception("PresLog: File doesn't start with 'Scenario'")
self.scenario=fileheader[11:]
#print("Scenario: %s" % self.scenario)
fileheader2=next(self.log).rstrip('\r\n')
#print("fileheader2: %s" % fileheader2)
if fileheader2.startswith('Logfile written - '):
import datetime
self.timestamp=datetime.datetime.strptime(fileheader2[18:],"%m/%d/%Y %H:%M:%S")
#print(self.timestamp)
else:
self.timestamp=None
table_start=['Subject','Trial'] if part=='events' else ['Event Type']
self.header_fields=None
for line in self.log:
fields=line.rstrip('\r\n').split('\t')
if len(fields)<=1: continue
if self.header_fields is None:
# The first table is skipped...
if fields[0] in table_start:
self.header_fields=fields
self.atstart=True
break
def __iter__(self):
for line in self.log:
fields=line.rstrip('\r\n').split('\t')
if len(fields)<=1:
# Only at the start skip empty line(s)
if self.atstart: continue
else: break
self.atstart=False
yield fields
def __del__(self):
self.close()
def close(self):
if self.log:
self.log.close()
self.log=None
class PresLogfile(trgfile.trgfile):
def __init__(sel
|
f,logfile,part='events'):
self.PL=PresLog(logfile,part)
trgfile.trgfile.__init__(self,self.PL)
self.preamble['Sfreq']=10000.0
def rdr(self):
for fields in self.reader:
data=dict(zip(self.PL.header_fields,fields))
point=int(data['Time'])
description=data['Event Type']
|
try:
code=int(data['Code'])
except:
code= -1
description=' '.join([description,data['Code']])
yield (point, code, description)
def close(self):
if self.PL:
self.PL.close()
self.PL=None
def gettuples_abstime(self):
# We are calculating backwards from the time the log was written, which is given
# in local time, and it may happen that a DST switch occurred between start and end.
# Most plots, simply working for a given time from the start, are totally okay if you don't
# mind that the end times are still in the old frame, but since the local time here may
# already be in the new frame we have to correct to achieve this "work-from-start" behavior.
import pytz
tuples=self.gettuples()
sfreq=float(self.preamble.get('Sfreq'))
last_s=pytz.datetime.timedelta(seconds=tuples[-1][0]/sfreq)
tz_aware_end=pytz.timezone('Europe/Berlin').localize(self.PL.timestamp)
# This computes the correct local start time considering a possible DST switch and
# converts it to the TZ-unaware local time we really want...
self.start_datetime=tz_aware_end.tzinfo.normalize(tz_aware_end-last_s).replace(tzinfo=None)
return trgfile.trgfile.gettuples_abstime(self)
|
athkishore/vgr
|
migrations/versions/c626e32ddcc_.py
|
Python
|
mit
| 2,615
| 0.01262
|
"""empty message
Revision ID: c626e32ddcc
Revises: None
Create Date: 2016-01-23 14:47:09.205628
"""
# revision identifiers, used by Alembic.
revision = 'c626e32ddcc'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('default', sa.Boolean(), nullable=True),
sa.Column('permissions', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_index('ix_roles_default', 'roles', ['default'], unique=False)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('password_hash', sa
|
.String(length=128), nullable=True),
sa.Column('confirmed', sa.Boolean(), nullable=True),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('location', sa.String(length=64), nullable=True),
sa.Column('about_me', sa.Text(), nullable=True),
sa.Column('member_
|
since', sa.DateTime(), nullable=True),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.Column('avatar_hash', sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_users_email', 'users', ['email'], unique=True)
op.create_index('ix_users_username', 'users', ['username'], unique=True)
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('body_html', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_posts_timestamp', 'posts', ['timestamp'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_posts_timestamp', 'posts')
op.drop_table('posts')
op.drop_index('ix_users_username', 'users')
op.drop_index('ix_users_email', 'users')
op.drop_table('users')
op.drop_index('ix_roles_default', 'roles')
op.drop_table('roles')
### end Alembic commands ###
|
root-z/ECPP
|
hilbert.py
|
Python
|
gpl-2.0
| 4,207
| 0.002377
|
'''
Compute Hilbert Class Polynomials
'''
from mpmath import *
import mpmath
round = lambda x: mpmath.floor(x + 0.5)
def hilbert(d):
'''
Compute Hilbert Class Polynomial.
Follows pseudo code from Algorithm 7.5.8
Args:
d: fundamental discriminant
Returns:
Hilbert class number, Hilbert class polynomial, and all reduced forms
'''
# initialize
t = [1]
b = d % 2
r = floor(sqrt((-d)/3))
h = 0
red = set()
|
reduced_forms = reduced_form(d) # print h1
a_inverse_sum = sum(1/mpf(form[0]) for form in reduced_forms)
precision = round(pi*sqrt(-d)*a_inverse_sum / log(10)) + 10
mpmath.mp.dps = precision
|
# outer loop
while b <= r:
m = (b*b - d) / 4
m_sqrt = int(floor(sqrt(m)))
for a in range(1, m_sqrt+1):
if m % a != 0:
continue
c = m/a
if b > a:
continue
# optional polynomial setup
tau = (-b + 1j * sqrt(-d)) / (2*a)
f = power(dedekind_eta(2 * tau, precision) / dedekind_eta(tau, precision), 24)
j = power((256 * f + 1), 3) / f
if b==a or c==a or b==0:
# T = T * (X-j)
t = polynomial_mul(t, [-j, 1])
h += 1
red.add((a, b, c))
else:
poly = [j.real * j.real + j.imag * j.imag, -2 * j.real, 1]
t = polynomial_mul(t, poly)
h += 2
red.add((a, b, c))
red.add((a, -b, c))
b += 2
if red != reduced_forms:
raise ValueError('Reduced form inconsistent.')
return h, [int(floor(mpmath.re(p) + 0.5)) for p in t], red
def reduced_form(d):
'''
Given discriminant D compute its reduced forms. Used to calculate preicion
Args:
d:
Returns:
'''
# initialize
b = d % 2
r = floor(sqrt((-d)/3))
h = 0
red = set()
# outer loop
while b <= r:
m = (b*b - d) / 4
m_sqrt = int(floor(sqrt(m)))
for a in range(1, m_sqrt+1):
if m % a != 0:
continue
c = m / a
if b > a:
continue
# optional polynomial setup
if b==a or c==a or b==0:
# T = T * (X-j)
h += 1
red.add((a, b, c))
else:
h += 2
red.add((a, b, c))
red.add((a, -b, c))
b += 2
return red
def delta(q):
return q
def dedekind_eta(tau, precision):
"""
Implementation of dedekind's eta function.
This implementation follows the idea in NZMATH's implementation
Args:
tau:
precision: The desired position
Returns:
evalution of dedekind's eta function
"""
# a = 2 * mpmath.pi / mpmath.mpf(24)
# b = mpmath.exp(mpmath.mpc(0, a))
x = exp(mpc(0, 2 * pi / mpf(24)))
# b = e^(2pi*i/24)
outer = 1
absolute = 0
# functional equations
while absolute <= 1 - 0.1**5:
real_tau = round(tau.real)
if real_tau != 0:
tau -= real_tau
outer *= x ** real_tau
absolute = fabs(tau)
if absolute > 1 - 0.1**5:
break
ro = sqrt(power(tau, -1)*1j)
# ro = sqrt((tau^-1)*i)
if ro.real < 0:
ro = -ro
outer = outer*ro
tau = (-outer.real + outer.imag*1j) / absolute
#print 'tau=', tau, '\n p =', p
q1 = mpmath.exp((pi/12) * tau * 1j)
q = q1**24
# q = e^(2pi*tau*i)
sum = 1
qs = mpmath.mpc(1, 0)
qn = 1
bound = mpmath.mpf(10)**(-precision-2)
while fabs(qs) > bound:
t = -q*qn*qn*qs
qn *= q
qs = qn*t
sum += t + qs
return outer*q1*sum
# Compare to wolfram alpha the result is correct.
def polynomial_mul(p1, p2):
'''
Used to Compute T = T * (X-j)
'''
if len(p1) == 0 or len(p2) == 0:
raise ValueError('Polynomial Array empty.')
m = [0] * (len(p1) + len(p2) - 1)
for i in range(0, len(p1)):
for j in range(0, len(p2)):
m[i+j] += p1[i] * p2[j]
return m
|
Azure/azure-sdk-for-python
|
sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_custom_forms_async.py
|
Python
|
mit
| 6,386
| 0.004071
|
# coding: utf-8
# ---------------------------
|
----------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# License
|
d under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_recognize_custom_forms_async.py
DESCRIPTION:
This sample demonstrates how to analyze a form from a document with a custom
trained model. The form must be of the same type as the forms the custom model
was trained on. To learn how to train your own models, look at
sample_train_model_without_labels_async.py and sample_train_model_with_labels_async.py
The model can be trained using the training files found here:
https://aka.ms/azsdk/formrecognizer/sampletrainingfiles-v3.1
USAGE:
python sample_recognize_custom_forms_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
3) CUSTOM_TRAINED_MODEL_ID - the ID of your custom trained model
-OR-
CONTAINER_SAS_URL_V2 - The shared access signature (SAS) Url of your Azure Blob Storage container with your forms.
A model will be trained and used to run the sample.
"""
import os
import asyncio
class RecognizeCustomFormsSampleAsync(object):
async def recognize_custom_forms(self, custom_model_id):
path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__),
"..", "..", "..", "./sample_forms/forms/Form_1.jpg"))
# [START recognize_custom_forms_async]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import FormRecognizerClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
model_id = os.getenv("CUSTOM_TRAINED_MODEL_ID", custom_model_id)
async with FormRecognizerClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
) as form_recognizer_client:
# Make sure your form's type is included in the list of form types the custom model can recognize
with open(path_to_sample_forms, "rb") as f:
poller = await form_recognizer_client.begin_recognize_custom_forms(
model_id=model_id, form=f, include_field_elements=True
)
forms = await poller.result()
for idx, form in enumerate(forms):
print("--------Recognizing Form #{}--------".format(idx+1))
print("Form has type {}".format(form.form_type))
print("Form has form type confidence {}".format(form.form_type_confidence))
print("Form was analyzed with model with ID {}".format(form.model_id))
for name, field in form.fields.items():
# each field is of type FormField
# label_data is populated if you are using a model trained without labels,
# since the service needs to make predictions for labels if not explicitly given to it.
if field.label_data:
print("...Field '{}' has label '{}' with a confidence score of {}".format(
name,
field.label_data.text,
field.confidence
))
print("...Label '{}' has value '{}' with a confidence score of {}".format(
field.label_data.text if field.label_data else name, field.value, field.confidence
))
# iterate over tables, lines, and selection marks on each page
for page in form.pages:
for i, table in enumerate(page.tables):
print("\nTable {} on page {}".format(i + 1, table.page_number))
for cell in table.cells:
print("...Cell[{}][{}] has text '{}' with confidence {}".format(
cell.row_index, cell.column_index, cell.text, cell.confidence
))
print("\nLines found on page {}".format(page.page_number))
for line in page.lines:
print("...Line '{}' is made up of the following words: ".format(line.text))
for word in line.words:
print("......Word '{}' has a confidence of {}".format(
word.text,
word.confidence
))
if page.selection_marks:
print("\nSelection marks found on page {}".format(page.page_number))
for selection_mark in page.selection_marks:
print("......Selection mark is '{}' and has a confidence of {}".format(
selection_mark.state,
selection_mark.confidence
))
print("-----------------------------------")
# [END recognize_custom_forms_async]
async def main():
sample = RecognizeCustomFormsSampleAsync()
model_id = None
if os.getenv("CONTAINER_SAS_URL_V2"):
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import FormTrainingClient
endpoint = os.getenv("AZURE_FORM_RECOGNIZER_ENDPOINT")
key = os.getenv("AZURE_FORM_RECOGNIZER_KEY")
if not endpoint or not key:
raise ValueError("Please provide endpoint and API key to run the samples.")
form_training_client = FormTrainingClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with form_training_client:
model = await (await form_training_client.begin_training(
os.getenv("CONTAINER_SAS_URL_V2"), use_training_labels=True)).result()
model_id = model.model_id
await sample.recognize_custom_forms(model_id)
if __name__ == '__main__':
asyncio.run(main())
|
dudanogueira/microerp
|
microerp/comercial/migrations/0054_tipodeproposta_tipo_contrato_mapeado.py
|
Python
|
lgpl-3.0
| 472
| 0.002119
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('comercial', '0053_auto_20151118_1323'),
]
operations = [
m
|
igrations.AddField(
model_name='tipodeproposta',
name='tipo_contrato_mapeado',
field=models.ForeignKey(blank=True, to='comercial.TipodeContratoFechado', null=True),
),
|
]
|
CDE-UNIBE/lokp
|
lokp/review/activities.py
|
Python
|
gpl-3.0
| 314
| 0
|
from lokp.model
|
s import DBSession
from lokp.protocols.activity_protocol import ActivityProtocol
from lokp.review.review import BaseReview
class ActivityReview(BaseRevi
|
ew):
def __init__(self, request):
super(ActivityReview, self).__init__(request)
self.protocol = ActivityProtocol(DBSession)
|
sbarton272/StreetPong
|
IPC/printer.py
|
Python
|
apache-2.0
| 195
| 0.005128
|
from sys import stdin
import signal
|
# for i in xrange(1,10):
# print "Stuff", i
# print s
|
tdin.readline()
import os
pid = int(stdin.readline().strip())
print pid
os.kill(pid, signal.SIGINT)
|
jgehring/rsvndump
|
tests/db/tests/delete_add.py
|
Python
|
gpl-3.0
| 1,440
| 0.046528
|
#
# Test database for rsvndump
# written by Jonas Gehring
#
import os
import test_api
def info():
return "Add after delete test"
def setup(step, log):
if step == 0:
os.mkdir("dir1")
f = open("dir1/file1","wb")
print >>f, "hello1"
print >>f, "hello2"
f = open("dir1/file2","wb")
print >>f, "hello3"
test_api.run("svn", "add", "dir1", output = log)
return True
elif step == 1:
f = open("file1","wb")
print >>f, "hello4"
f = open("file12","wb")
print >>f, "hello5"
test_api.run("svn", "add", "file1", "file12", output = log)
return True
elif step == 2:
test_
|
api.run("svn", "rm", "file1", output=log)
return True
elif step == 3:
f = open("file12","ab")
print >>f, "hello6"
return True
elif step == 4:
test_api.run("svn", "rm", "dir1", output=log)
return True
elif step == 5:
os.mkdir("dir1")
f = open("dir1/file1","wb")
print >>f, "hello7"
f = open("dir1/file2","wb")
print >>f, "hello8"
print >>f, "hello9"
test_api.run("svn", "add", "dir1", output = log)
return True
elif step == 6:
f =
|
open("dir1/file1","ab")
print >>f, "hello10"
return True
else:
return False
# Runs the test
def run(id, args = []):
# Set up the test repository
test_api.setup_repos(id, setup)
odump_path = test_api.dump_original(id)
rdump_path = test_api.dump_rsvndump(id, args)
vdump_path = test_api.dump_reload(id, rdump_path)
return test_api.diff(id, odump_path, vdump_path)
|
metrey/b2tob3
|
setup.py
|
Python
|
mit
| 935
| 0.001071
|
#!/usr/bin/env python
# coding: utf-8
from setuptools import setup, find_packages
from b2tob3 import VERSION
with open('README
|
.rst') as f:
README = f.read()
with open('LICENSE') as f:
LICENSE = f.read()
setup(
name='b2tob3',
version=VERSION,
packages=find_packages(),
long_description=README,
license=LICENSE,
author='Ramiro Gómez',
author_email='code@ramiro.org',
description='Help migrate HTML files and templates form bootstrap 2 to 3.',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :
|
: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Text Processing',
],
entry_points={
'console_scripts': [
'b2tob3=b2tob3.b2tob3:main'
]
}
)
|
initzx/aobot
|
utils/logger.py
|
Python
|
gpl-3.0
| 3,201
| 0.002812
|
from utils.edit_configs import get_json
class Logger:
config = get_json('server_configs')
log_config = {}
_instances = {}
@staticmethod
def get_singleton(client):
if client.shard_id not in Logger._instances:
Logger._instances[client.shard_id] = Logger()
return Logger._instances[client.shard_id]
@staticmethod
async def update(client):
config = get_json('server_configs')
log_config = {k: v['logging'] for k, v in config.items() if 'logging' in v}
temp = {}
for i in log_config:
ch = client.get_channel(log_config[i])
if ch:
temp[i] = ch
Logger.log_config[client.shard_id] = temp
@staticmethod
async def register_client_events(client):
await Logger.update(client)
@client.async_event
async def on_message_delete(message):
if message.server is not None and message.server.id in Logger.log_config[client.shard_id]:
logs = Logger.log_config[client.shard_id][message.server.id]
to_send = ':x: **Message deleted**: {0.author}: {0.content}'.format(message)
await client.send_message(logs, to_send)
@client.async_event
async def on_message_edit(before, after):
if after.server is not None and after.server.id in Logger.log_config[client.shard_id]:
logs = Logger.log_config[client.shard_id][after.server.id]
if before.content != after.content:
to_send = ':speech_left: **Message edited**: {0.author}: ~~{0.content}~~ | {1.content}'.format(before, after)
await client.send_message(logs, to_send)
@client.async_event
async def on_member_join(member):
if member.server.id in Logger.log_config[client.shard_id]:
logs = Logger.log_config[client.shard_id][member.server.id]
to_send = ':bust_in_silhouette::arrow_right: **User joined**: {0}'.format(member)
await client.send_message(logs, to_send)
@client.async_event
async def on_member_remove(member):
if member.server.id in Logger.log_config[client.shard_id]:
logs = Logger.log_config[client.shard_id][member.server.id]
to_send
|
= ':bust_in_silhouette::arrow_left: **User left**: {0}'.format(member)
await client.send_message(logs, to_send)
@client.async_event
async def on_member_ban(member):
if member.server.id in Logger.log_config[client.shard_id]:
logs = Logger.log_config[client.shard_id][member.server.id]
to_send = ':bust_i
|
n_silhouette::x: **User banned**: {0}'.format(member)
await client.send_message(logs, to_send)
@client.async_event
async def on_member_unban(server, user):
if server.id in Logger.log_config[client.shard_id]:
logs = Logger.log_config[client.shard_id][server.id]
to_send = ':bust_in_silhouette::white_check_mark: **User unbanned**: {0}'.format(user)
await client.send_message(logs, to_send)
|
mailgun/flanker
|
flanker/mime/create.py
|
Python
|
apache-2.0
| 2,803
| 0
|
"""
This package is a set of utilities and methods for building mime messages.
"""
import uuid
from flanker import _email
from flanker.mime import DecodingError
from flanker.mime.message import ContentType, scanner
from flanker.mime.message.headers import WithParams
from flanker.mime.message.headers.parametrized import fix_content_type
from flanker.mime.message.part import MimePart, Body, Part, adjust_content_type
def multipart(subtype):
return MimePart(
container=Part(
ContentType(
"multipart", subtype, {"boundary": uuid.uuid4().hex})),
is_root=True)
def message_container(message):
part = MimePart(
container=Part(ContentType("message", "rfc822")),
enclosed=message)
message.set_root(False)
return part
def text(subtype, body, charset=None, disposition=None, filename=None):
return MimePart(
container=Body(
content_type=ContentType("text", subtype),
body=body,
charset=charset,
disposition=disposition,
filename=filenam
|
e),
is_root=True)
def binary(maintype, subtype, body, filename=None,
disposition=None, charset=None, trust_ctype=False):
return MimePart(
container=Body(
content_type=ContentType(maintype, subtype),
trust_ctype=trust_ctype,
body=body,
charset=charset,
disposition=disposition,
filename=filename),
is_root=True)
def attachment(content_type, body, filen
|
ame=None,
disposition=None, charset=None):
"""Smarter method to build attachments that detects the proper content type
and form of the message based on content type string, body and filename
of the attachment
"""
# fix and sanitize content type string and get main and sub parts:
main, sub = fix_content_type(
content_type, default=('application', 'octet-stream'))
# adjust content type based on body or filename if it's not too accurate
content_type = adjust_content_type(
ContentType(main, sub), body, filename)
if content_type.main == 'message':
try:
message = message_container(from_string(body))
message.headers['Content-Disposition'] = WithParams(disposition)
return message
except DecodingError:
content_type = ContentType('application', 'octet-stream')
return binary(
content_type.main,
content_type.sub,
body, filename,
disposition,
charset, True)
def from_string(string):
return scanner.scan(string)
def from_python(message):
return from_string(_email.message_to_string(message))
def from_message(message):
return from_string(message.to_string())
|
jyr/japos
|
dashboards/urls.py
|
Python
|
gpl-2.0
| 112
| 0.017857
|
f
|
rom django.conf.urls.defaults import *
urlpatterns = pattern
|
s('japos.dashboards.views',
(r'^$', 'index')
)
|
Chasego/codi
|
util/basic/quicksort.py
|
Python
|
mit
| 120
| 0.008333
|
quicksort(A, lo, hi):
if lo
|
< hi:
p := partition(A, lo, hi)
quicksort(A, lo, p -
|
1)
quicksort(A, p + 1, hi)
|
Skippern/PDF-scraper-Lorenzutti
|
creators/seletivo/common.py
|
Python
|
gpl-3.0
| 1,983
| 0.013138
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Common functions
import os, sys
lib_path = os.path.abspath( os.path.join( '..', '..', 'lib' ) )
sys.path.append(lib_path)
from commons import *
from overpasser import *
from routing import *
from feriados import *
from make_json import *
def lower_capitalized(input):
output = lower_capitalized_master(input)
# Specific place names
output = output.replace(u"P. Itapoã", u"Praia de Itapoã")
output = output.replace(u"Beira M", u"Beira Mar").replace(u"Marar", u"Mar")
output = output.replace(u"B. Mar", u"Beira Mar")
output = output.replace(u"C. Itaparica", u"Coqueiral de Itaparica")
output = output.replace(u"Exp.", u"Expedito")
output = output.replace(u"Castelandia", u"Castelândia")
output = output.replace(u"J. Camburi", u"Jardim Camburi")
output = output.replace(u"
|
P. Costa", u"Praia da Costa")
output = output.replace(u"S. Dourada", u"Serra Dourada")
output = output.replace(u"M. Noronha", u"Marcilio de Noronha")
|
output = output.replace(u"Marcilio de Noronha", u"Marcílio de Noronha")
return output.strip()
def getLines():
downloadURL = "https://sistemas.es.gov.br/webservices/ceturb/onibus/api/ConsultaLinha?Tipo_Linha=Seletivo"
routes = []
myJSON = None
r = False
while r == False:
try:
r = requests.get(downloadURL, timeout=30)
except requests.exceptions.ReadTimeout as e:
r = False
except requests.exceptions.ConnectionError as e:
r = False
try:
myJSON = json.dumps(json.loads(r.content))
except:
r = False
station = [ None, None ]
for i in json.loads(myJSON):
# if i["Terminal_Seq"] == 1:
# station[0] = i["Dest_Terminal"]
# if i["Terminal_Seq"] == 2:
# station[1] = i["Dest_Terminal"]
routes.append( [ str(int(i[u"Linha"])), lower_capitalized(unicode(i[u"Descricao"])) ] )
return routes
|
zstackorg/zstack-woodpecker
|
integrationtest/vm/virt_plus/other/test_parallel_crt_vm_to_use_all_disk.py
|
Python
|
apache-2.0
| 5,980
| 0.009532
|
'''
This case can not execute parallelly.
This case will calculate max available VMs base on 1 host available disk space.
The it will try to create all VMs at the same time to see if zstack could
handle it.
@author: Youyk
'''
import os
import sys
import threading
import time
import random
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.host_operations as host_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstacklib.utils.sizeunit as sizeunit
import apibinding.inventory as inventory
_config_ = {
'timeout' : 1000,
'noparallel' : True
}
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
original_rate = None
new_offering_uuid = None
exc_info = []
def parallelly_create_vm(vm_name, image_name, host_uuid, disk_offering_uuid):
try:
vm = test_stub.create_vm(vm_name = vm_name, \
image_name = image_name, \
host_uuid = host_uuid, \
disk_offering_uuids = [disk_offering_uuid])
test_obj_dict.add_vm(vm)
except Exception as e:
exc_info.append(sys.exc_info())
def check_thread_exception():
if exc_info:
info1 = exc_info[0][1]
info2 = exc_info[0][2]
raise info1, None, info2
def test():
global original_rate
global new_offering_uuid
global delete_policy
test_util.test_dsc('Test memory allocation and reclaiming.')
cond = res_ops.gen_query_conditions('state', '=', 'Enabled')
cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond)
hosts = res_ops.query_resource_with_num(res_ops.HOST, cond)
if not hosts:
test_util.test_skip('No Enabled/Connected host was found, skip test.' )
return True
ps = res_ops.query_resource_with_num(res_ops.PRIMARY_STORAGE, cond)
if len(ps) > 1:
test_util.test_skip('multiple Enabled/Connected primary storage was found, skip test.' )
if len(ps) == 0:
test_util.test_skip('No Enabled/Connected primary storage was found, skip test.' )
return True
if ps[0].type != inventory.LOCAL_STORAGE_TYPE:
test_util.test_skip('skip test if PS is not local storage.' )
return True
host = random.choice(hosts)
ps = ps[0]
over_provision_rate = 1
target_vm_num = 5
host_res = vol_ops.get_local_storage_capacity(host.uuid, ps.uuid)[0]
avail_cap = host_res.availableCapacity
image_name = os.environ.get('imageName_net')
image = test_lib.lib_get_image_by_name(image_name)
image_size = image.size
original_rate = test_lib.lib_set_provis
|
ion_storage_rate(over_provision_rate)
data_volume_size = int(av
|
ail_cap / target_vm_num * over_provision_rate - image_size)
if data_volume_size < 0:
test_util.test_skip('Do not have enough disk space to do test')
return True
delete_policy = test_lib.lib_set_delete_policy('vm', 'Direct')
delete_policy = test_lib.lib_set_delete_policy('volume', 'Direct')
host_res = vol_ops.get_local_storage_capacity(host.uuid, ps.uuid)[0]
avail_cap = host_res.availableCapacity
disk_offering_option = test_util.DiskOfferingOption()
disk_offering_option.set_name('vm-parallel-creation-test')
disk_offering_option.set_diskSize(data_volume_size)
data_volume_offering = vol_ops.create_volume_offering(disk_offering_option)
test_obj_dict.add_disk_offering(data_volume_offering)
rounds = 1
while (rounds <= 3):
times = 1
test_util.test_logger('test round: %s' % rounds)
while (times <= (target_vm_num)):
thread = threading.Thread(target = parallelly_create_vm, \
args = ('parallel_vm_creating_%d' % times, \
image_name, \
host.uuid, \
data_volume_offering.uuid, ))
thread.start()
times += 1
times = 1
print 'Running VM: %s ' % len(test_obj_dict.get_vm_list())
while threading.active_count() > 1:
check_thread_exception()
time.sleep(1)
if times > 60:
test_util.test_fail('creating vm time exceed 60s')
times += 1
check_thread_exception()
try:
vm = test_stub.create_vm(vm_name = 'unexpected vm', \
image_name = image_name, \
host_uuid = host.uuid)
test_obj_dict.add_vm(vm)
except:
test_util.test_logger('expect vm creation failure')
else:
test_util.test_fail('The extra vm is unexpected to be created up')
for vm in test_obj_dict.get_all_vm_list():
try:
test_lib.lib_destroy_vm_and_data_volumes_objs_update_test_dict(vm, test_obj_dict)
except Exception as e:
test_util.test_logger("VM Destroying Failure in vm parallel creation test. :%s " % e)
raise e
rounds += 1
test_lib.lib_set_provision_storage_rate(original_rate)
test_lib.lib_robot_cleanup(test_obj_dict)
test_lib.lib_set_delete_policy('vm', delete_policy)
test_lib.lib_set_delete_policy('volume', delete_policy)
test_util.test_pass('Parallel vm creation Test Pass')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
if original_rate:
test_lib.lib_set_provision_storage_rate(original_rate)
test_lib.lib_set_delete_policy('vm', delete_policy)
test_lib.lib_set_delete_policy('volume', delete_policy)
|
thopiekar/Uranium
|
plugins/UpdateChecker/UpdateCheckerJob.py
|
Python
|
lgpl-3.0
| 4,818
| 0.009755
|
# Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from UM.Application import Application
from UM.Message import Message
from UM.Version import Version
from UM.Logger import Logger
from UM.Job import Job
import urllib.request
import platform
import json
import codecs
from UM.i18n import i18nCatalog
i18n_catalog = i18nCatalog("uranium")
## This job checks if there is an update available on the provided URL.
class UpdateCheckerJob(Job):
def __init__(self, silent = False, url = None, callback = None, set_download_url_callback = None):
super().__init__()
self.silent = silent
self._url = url
self._callback = callback
self._set_download_url_callback = set_download_url_callback
def run(self):
if not self._url:
Logger.log("e", "Can not check for a new release. URL not set!")
no_new_version = True
application_name = Application.getInstance().getApplicationName()
Logger.log("i", "Checking for new version of %s" % application_name)
try:
headers = {"User-Agent": "%s - %s" % (application_name, Application.getInstance().getVersion())}
request = urllib.request.Request(self._url, headers = headers)
latest_version_file = urllib.request.urlopen(request)
except Exception as e:
Logger.log("w", "Failed to check for new version: %s" % e)
if not self.silent:
Message(i18n_catalog.i18nc("@info", "Could not access update information."),
title = i18n_catalog.i18nc("@info:title", "Version Upgrade")
).show()
return
try:
reader = codecs.getreader("utf-8")
data = json.load(reader(latest_version_file))
try:
if Application.getInstance().getVersion() is not "master":
local_version = Version(Application.getInstance().getVersion())
else:
if not self.silent:
Message(i18n_catalog.i18nc("@info", "The version you are using does not support checking for updates."), title = i18n_catalog.i18nc("@info:title", "Warning")).show()
return
except ValueError:
Logger.log("w", "Could not determine application version from string %s, not checking for updates", Application.getInstance().getVersion())
if not self.silent:
Message(i18n_catalog.i18nc("@info", "The version you are using does not support checking for updates."), title = i18n_catalog.i18nc("@info:title", "Version Upgrade")).show()
return
if application_name in data:
for key, value in data[application_name].items():
if "major" in value and "minor" in value and "revision" in value and "url" in value:
os = key
if platform.system() == os: #TODO: add architecture check
newest_version = Version([int(value["major"]), int(value["minor"]), int(value["revision"])])
if local_version < newest_version:
Logger.log("i", "Found a new version of the software. Spawning message")
message = Message(i18n_catalog.i18nc("@info", "A new version is available!"), title = i18n_catalog.i18nc("@info:title", "Version Upgrade"))
message.addAction
|
("download", i18n_catalog.i18nc
|
("@action:button", "Download"), "[no_icon]", "[no_description]")
if self._set_download_url_callback:
self._set_download_url_callback(value["url"])
message.actionTriggered.connect(self._callback)
message.show()
no_new_version = False
break
else:
Logger.log("w", "Could not find version information or download url for update.")
else:
Logger.log("w", "Did not find any version information for %s." % application_name)
except Exception:
Logger.logException("e", "Exception in update checker while parsing the JSON file.")
Message(i18n_catalog.i18nc("@info", "An exception occurred while checking for updates."), title = i18n_catalog.i18nc("@info:title", "Error")).show()
no_new_version = False # Just to suppress the message below.
if no_new_version and not self.silent:
Message(i18n_catalog.i18nc("@info", "No new version was found."), title = i18n_catalog.i18nc("@info:title", "Version Upgrade")).show()
|
ncos/hometasks
|
Lunev/programming/star_1/stable/tests/file_generator.py
|
Python
|
mit
| 193
| 0.010363
|
f_source = open(
|
'../talker/workfile', 'w')
f_gold = open('../listener/workfile', 'w')
for i in range(100000):
f_source.write('0123456789abcdef')
f_gold.write('012
|
3456789abcdef')
|
sachinkum/Bal-Aveksha
|
WebServer/Authentications/admin.py
|
Python
|
gpl-3.0
| 54
| 0
|
fro
|
m django.contrib import
|
admin
from . import models
|
DarioGT/OMS-PluginXML
|
org.modelsphere.sms/lib/jython-2.2.1/Lib/test/test_compile.py
|
Python
|
gpl-3.0
| 3,243
| 0.005242
|
from test_support import verbose, TestFailed
if verbose:
print "Testing whether compiler catches assignment to __debug__"
try:
compile('__debug__ = 1', '?', 'single')
except SyntaxError:
pass
import __builtin__
prev = __builtin__.__debug__
setattr(__builtin__, '__debug__', 'sure')
setattr(__builtin__, '__debug__', prev)
if verbose:
print 'Running tests on argument handling'
try:
exec 'def f(a, a): pass'
raise TestFailed, "duplicate arguments"
except SyntaxError:
pass
try:
exec 'def f(a = 0, a = 1): pass'
raise TestFailed, "duplicate keyword arguments"
except SyntaxError:
pass
try:
exec 'def f(a): global a; a = 1'
raise TestFailed, "variable is global and local"
except SyntaxError:
pass
if verbose:
print "testing complex args"
def comp_args((a, b)):
print a,b
comp_args((1, 2))
def comp_args((a, b)=(3, 4)):
print a, b
comp_args((1, 2))
comp_args()
def comp_args(a, (b, c)):
print a, b, c
comp_args(1, (2, 3))
def comp_args(a=2, (b, c)=(3, 4)):
print a, b, c
|
comp_args(1, (2, 3))
comp_args()
try:
exec 'def f(a=1, (b, c)): pass'
raise TestFailed, "non-default args after default"
except SyntaxError:
pass
if verbose:
print "testing bad float literals"
def expect_error(s):
try:
eval(s)
raise TestFailed("%r accepted" % s)
except SyntaxError:
pass
expect_error("2e")
expect_error("2.0e+")
expect_error("1e-")
expect_error("3-4e/21")
if verbose:
print "testing literals
|
with leading zeroes"
def expect_same(test_source, expected):
got = eval(test_source)
if got != expected:
raise TestFailed("eval(%r) gave %r, but expected %r" %
(test_source, got, expected))
expect_error("077787")
expect_error("0xj")
expect_error("0x.")
expect_error("0e")
expect_same("0777", 511)
expect_same("0777L", 511)
expect_same("000777", 511)
expect_same("0xff", 255)
expect_same("0xffL", 255)
expect_same("0XfF", 255)
expect_same("0777.", 777)
expect_same("0777.0", 777)
expect_same("000000000000000000000000000000000000000000000000000777e0", 777)
expect_same("0777e1", 7770)
expect_same("0e0", 0)
expect_same("0000E-012", 0)
expect_same("09.5", 9.5)
expect_same("0777j", 777j)
expect_same("00j", 0j)
expect_same("00.0", 0)
expect_same("0e3", 0)
expect_same("090000000000000.", 90000000000000.)
expect_same("090000000000000.0000000000000000000000", 90000000000000.)
expect_same("090000000000000e0", 90000000000000.)
expect_same("090000000000000e-0", 90000000000000.)
expect_same("090000000000000j", 90000000000000j)
expect_error("090000000000000") # plain octal literal w/ decimal digit
expect_error("080000000000000") # plain octal literal w/ decimal digit
expect_error("000000000000009") # plain octal literal w/ decimal digit
expect_error("000000000000008") # plain octal literal w/ decimal digit
expect_same("000000000000007", 7)
expect_same("000000000000008.", 8.)
expect_same("000000000000009.", 9.)
# Verify treatment of unary minus on negative numbers SF bug #660455
expect_same("0xffffffff", -1)
expect_same("-0xffffffff", 1)
|
lucaskanashiro/debile
|
tests/test_slave_cli.py
|
Python
|
mit
| 370
| 0.010811
|
# Run with nosetests tests/test_slave_cli.py
import debile.slave.cli as slave
def test_pa
|
rse_args():
args = slave.parse_args(['--auth', 'simple', '--config', \
'/etc/debile/slave.yaml', '-s', '-d'])
assert args.auth_method ==
|
'simple'
assert args.config == '/etc/debile/slave.yaml'
assert args.syslog == True
assert args.debug == True
|
elyezer/robottelo
|
robottelo/ui/locators/menu.py
|
Python
|
gpl-3.0
| 10,740
| 0
|
# -*- encoding: utf-8 -*-
"""Implements different locators for UI"""
from selenium.webdriver.common.by import By
from .model import LocatorDict
NAVBAR_PATH = (
'//div[contains(@class,"navbar-inner") and '
'not(contains(@style, "display"))]'
)
MENU_CONTAINER_PATH = NAVBAR_PATH + '//ul[@id="menu"]'
ADM_MENU_CONTAINER_PATH = NAVBAR_PATH + '//ul[@id="menu2"]'
menu_locators = LocatorDict({
# Menus
# Navbar
"navbar.spinner": (By.XPATH, ("//div[@id='turbolinks-progress']")),
# Monitor Menu
"menu.monitor": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='monitor_menu']")),
"menu.dashboard": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_dashboard']")),
"menu.reports": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_reports']")),
"menu.facts": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_fact_values']")),
"menu.statistics": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_statistics']")),
"menu.trends": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_trends']")),
"menu.audits": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_audits']")),
"menu.jobs": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_job_invocations']")),
# Content Menu
"menu.content": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='content_menu']")),
"menu.life_cycle_environments": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_environments']")),
"menu.red_hat_subscriptions": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_red_hat_subscriptions']")),
"menu.activation_keys": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_activation_keys']")),
"menu.red_hat_repositories": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_redhat_provider']")),
"menu.products": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_products']")),
"menu.gpg_keys": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_gpg_keys']")),
"menu.sync_status": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_sync_status']")),
"menu.sync_plans": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_sync_plans']")),
"menu.content_views": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_content_views']")),
"menu.errata": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_errata']")),
"menu.packages": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_packages']")),
"menu.puppet_modules": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_puppet_modules']")),
"menu.docker_tags": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a
|
[@id='menu_item_docke
|
r_tags']")),
# Containers Menu
"menu.containers": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='containers_menu']")),
"menu.all_containers": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_containers']")),
"menu.new_container": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_new_container']")),
"menu.registries": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_registries']")),
# Hosts Menu
"menu.hosts": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='hosts_menu']")),
"menu.all_hosts": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_hosts']")),
"menu.discovered_hosts": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_discovered_hosts']")),
"menu.content_hosts": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_content_hosts']")),
"menu.host_collections": (
By.XPATH,
(MENU_CONTAINER_PATH +
"//a[@id='menu_item_host_collections']")),
"menu.operating_systems": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_operatingsystems']")),
"menu.provisioning_templates": (
By.XPATH,
(MENU_CONTAINER_PATH +
"//a[@id='menu_item_provisioning_templates']")),
"menu.partition_tables": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_partition_tables']")),
"menu.job_templates": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_job_templates']")),
"menu.installation_media": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_media']")),
"menu.hardware_models": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_models']")),
"menu.architectures": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_architectures']")),
"menu.oscap_policy": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_compliance_policies']")),
"menu.oscap_content": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_compliance_contents']")),
"menu.oscap_reports": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_compliance_reports']")),
# Configure Menu
"menu.configure": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='configure_menu']")),
"menu.host_groups": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_hostgroups']")),
"menu.discovery_rules": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_discovery_rules']")),
"menu.global_parameters": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_common_parameters']")),
"menu.environments": (
By.XPATH,
(MENU_CONTAINER_PATH +
"//li[contains(@class,'menu_tab_environments')]"
"/a[@id='menu_item_environments']")),
"menu.puppet_classes": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_puppetclasses']")),
"menu.smart_variables": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_variable_lookup_keys']")),
"menu.configure_groups": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_config_groups']")),
# Infrastructure Menu
"menu.infrastructure": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='infrastructure_menu']")),
"menu.smart_proxies": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_smart_proxies']")),
"menu.compute_resources": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_compute_resources']")),
"menu.compute_profiles": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_compute_profiles']")),
"menu.subnets": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_subnets']")),
"menu.domains": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_domains']")),
# Access Insights menu
"menu.insights": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='redhat_access_top_menu']")),
"insights.overview": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@href='/redhat_access/insights']")),
"insights.rules": (
By.XPATH,
(MENU_CONTAINER_PATH +
"//a[@href='/redhat_access/insights/rules/']")),
"insights.systems": (
By.XPATH,
(MENU_CONTAINER_PATH +
"//a[@href='/redhat_access/insights/systems/']")),
"insights.manage": (
By.XPATH,
(MENU_CONTAINER_PATH +
"//a[@href='/redhat_access/insights/manage']")),
# Administer Menu
"menu.administer": (
By.XPATH,
(ADM_MENU_CONTAINER_PATH + "//a[@id='administer_menu']")),
"menu.ldap_auth": (
By.XPATH,
(ADM_MENU_CONTAINER_PATH + "//a[@id='menu_item_auth_source_ldaps']")),
"menu.users": (
By.XPATH,
(ADM_MENU_CONTAINER_PATH + "//a[@id='menu_item_users']")),
"menu.user_groups": (
By.XPATH,
(ADM_MENU_CONTAINER_PATH + "//a[@id='menu_item_usergroups']")),
"menu.roles": (
By.XP
|
crypto101/clarent
|
clarent/exercise.py
|
Python
|
isc
| 1,105
| 0.002715
|
"""
Public exercise API.
"""
from twisted.protocols import amp
from txampext.errors import Error
class UnknownExercise(Error):
"""The exercise was not recognized.
"""
class GetExercises(amp.Command):
"""
Gets the identifiers and titles of some exercises.
"""
arguments = [
(b"solved", amp.Boolean())
]
response = [
(b"exercises", amp.AmpList([
(b"identifier", amp.String()),
(b"title",
|
amp.Unicode())
]))
]
class GetExerciseDetails(amp.Command):
"""
Gets the details of a partiucular exercise.
"""
arguments = [
(b"identifier", amp.String())
]
response = [
(b"title", amp.Unicode()),
(b"description", amp.Unicode()),
(b"solved", amp.Boolean())
]
errors = dict([
Unkn
|
ownExercise.asAMP()
])
class NotifySolved(amp.Command):
"""Notify the client that they have solved an exercise.
"""
arguments = [
(b"identifier", amp.String()),
(b"title", amp.Unicode())
]
response = []
requiresAnswer = False
|
lecaoquochung/ddnb.django
|
django/contrib/gis/db/models/fields.py
|
Python
|
bsd-3-clause
| 12,573
| 0.000954
|
from django.db.models.fields import Field
from django.db.models.sql.expressions import SQLEvaluator
from django.utils.translation import ugettext_lazy as _
from django.contrib.gis import forms
from django.contrib.gis.db.models.constants import GIS_LOOKUPS
from django.contrib.gis.db.models.lookups import GISLookup
from django.contrib.gis.db.models.proxy import GeometryProxy
from django.contrib.gis.geometry.backend import Geometry, GeometryException
from django.utils import six
# Local cache of the spatial_ref_sys table, which holds SRID data for each
# spatial database alias. This cache exists so that the database isn't queried
# for SRID info each time a distance query is constructed.
_srid_cache = {}
def get_srid_info(srid, connection):
"""
Returns the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
"""
global _srid_cache
try:
# The SpatialRefSys model for the spatial backend.
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
# No `spatial_ref_sys` table in spatial backend (e.g., MySQL).
return None, None, None
if connection.alias not in _srid_cache:
# Initialize SRID dictionary for database if it doesn't exist.
_srid_cache[connection.alias] = {}
if srid not in _srid_cache[connection.alias]:
# Use `SpatialRefSys` model to query for spatial reference info.
sr = SpatialRefSys.objects.using(connection.alias).get(srid=srid)
units, units_name = sr.units
spheroid = SpatialRefSys.get_spheroid(sr.wkt)
_srid_cache[connection.alias][srid] = (units, units_name, spheroid)
return _srid_cache[connection.alias][srid]
class GeometryField(Field):
"The base GIS field -- maps to the OpenGIS Specification Geometry type."
# The OpenGIS Geometry name.
geom_type = 'GEOMETRY'
form_class = forms.GeometryField
# Geodetic units.
geodetic_units = ('decimal degree', 'degree')
description = _("The base GIS field -- maps to the OpenGIS Specification Geometry type.")
def __init__(self, verbose_name=None, srid=4326, spatial_index=True, dim=2,
geography=False, **kwargs):
"""
The initialization function for geometry fields. Takes the following
as keyword arguments:
srid:
The spatial reference system identifier, an OGC standard.
Defaults to 4326 (WGS84).
spatial_index:
Indicates whether to create a spatial index. Defaults to True.
Set this instead of 'db_index' for geographic fields since index
creation is different for geometry columns.
dim:
The number of dimensions for this geometry. Defaults to 2.
extent:
Customize the extent, in a 4-tuple of WGS 84 coordinates, for the
geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults
to (-180.0, -90.0, 180.0, 90.0).
tolerance:
Define the tolerance, in meters, to use for the geometry field
entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05.
"""
# Setting the index flag with the value of the `spatial_index` keyword.
self.spatial_index = spatial_index
# Setting the SRID and getting the units. Unit information must be
# easily available in the field instance for distance queries.
self.srid = srid
# Setting the dimension of the geometry field.
self.dim = dim
# Setting the verbose_name keyword argument with the positional
# first parameter, so this works like normal fields.
kwargs['verbose_name'] = verbose_name
# Is this a geography rather than a geometry column?
self.geography = geography
# Oracle-specific private attributes for creating the entry in
# `USER_SDO_GEOM_METADATA`
self._extent = kwargs.pop('extent', (-180.0, -90.0, 180.0, 90.0))
self._tolerance = kwargs.pop('tolerance', 0.05)
super(GeometryField, self).__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super(GeometryField, self).deconstruct()
# Always include SRID for less fragility; include others if they're
# not the default values.
kwargs['srid'] = self.srid
if self.dim != 2:
kwargs['dim'] = self.dim
if self.spatial_index is not True:
kwargs['spatial_index'] = self.spatial_index
if self.geography is not False:
kwargs['geography'] = self.geography
return name, path, args, kwargs
# The following functions are used to get the units, their name, and
# the spheroid corresponding to the SRID of the GeometryField.
def _get_srid_info(self, connection):
# Get attributes from `get_srid_info`.
self._units, self._units_name, self._spheroid = get_srid_info(self.srid, connection)
def spheroid(self, connection):
if not hasattr(self, '_spheroid'):
self._get_srid_info(connection)
return self._spheroid
def units(self, connection):
if not hasattr(self, '_units'):
self._get_srid_info(connection)
return self._units
def units_name(self, connection):
if not hasattr(self, '_units_name'):
self._get_srid_info(connection)
return self._units_name
### Routines specific to GeometryField ###
def geodetic(self, connection):
"""
Returns true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
"""
return self.units_name(connection).lower() in self.geodetic_units
def get_distance(self, value, lookup_type, connection):
"""
Returns a distance number in units of the field. For example, if
`D(km=1)` was passed in and the units of the field were in meters,
then 1000 would be returned.
"""
return connection.ops.get_distance(self, value, lookup_type)
def get_prep_value(self, value)
|
:
"""
Spatial lookup values are either a parameter that is (or may be
converted to) a geometry, or a sequence of lookup values that
begins with a geometry. This routine will setup the geometry
value properly, and preserve any other lookup parameters before
returning to the caller.
"""
value = super(GeometryField, self).get_prep_value(value)
if isinstance(value, SQLEvaluat
|
or):
return value
elif isinstance(value, (tuple, list)):
geom = value[0]
seq_value = True
else:
geom = value
seq_value = False
# When the input is not a GEOS geometry, attempt to construct one
# from the given string input.
if isinstance(geom, Geometry):
pass
elif isinstance(geom, (bytes, six.string_types)) or hasattr(geom, '__geo_interface__'):
try:
geom = Geometry(geom)
except GeometryException:
raise ValueError('Could not create geometry from lookup value.')
else:
raise ValueError('Cannot use object with type %s for a geometry lookup parameter.' % type(geom).__name__)
# Assigning the SRID value.
geom.srid = self.get_srid(geom)
if seq_value:
lookup_val = [geom]
lookup_val.extend(value[1:])
return tuple(lookup_val)
else:
return geom
def get_srid(self, geom):
"""
Returns the default SRID for the given geometry, taking into account
the SRID set for the field. For example, if the input geometry
has no SRID, then that of the field will be returned.
"""
gsrid = geom.srid # SRID of given geometry.
if gsrid is None or self.srid == -1 or (gsrid == -1 and self.srid != -1):
return self.srid
else:
return gs
|
jjshoe/ansible-modules-core
|
cloud/vmware/vsphere_guest.py
|
Python
|
gpl-3.0
| 65,239
| 0.001456
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE
|
. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# TODO:
# Ability to set CPU/Memory reservations
try:
import json
except ImportError:
import simplejson as json
H
|
AS_PYSPHERE = False
try:
from pysphere import VIServer, VIProperty, MORTypes
from pysphere.resources import VimService_services as VI
from pysphere.vi_task import VITask
from pysphere import VIException, VIApiException, FaultTypes
HAS_PYSPHERE = True
except ImportError:
pass
import ssl
DOCUMENTATION = '''
---
module: vsphere_guest
short_description: Create/delete/manage a guest VM through VMware vSphere.
description:
- Create/delete/reconfigure a guest VM through VMware vSphere. This module has a dependency on pysphere >= 1.7
version_added: "1.6"
options:
vcenter_hostname:
description:
- The hostname of the vcenter server the module will connect to, to create the guest.
required: true
default: null
aliases: []
validate_certs:
description:
- Validate SSL certs. Note, if running on python without SSLContext
support (typically, python < 2.7.9) you will have to set this to C(no)
as pysphere does not support validating certificates on older python.
Prior to 2.1, this module would always validate on python >= 2.7.9 and
never validate on python <= 2.7.8.
required: false
default: yes
choices: ['yes', 'no']
version_added: 2.1
guest:
description:
- The virtual server name you wish to manage.
required: true
username:
description:
- Username to connect to vcenter as.
required: true
default: null
password:
description:
- Password of the user to connect to vcenter as.
required: true
default: null
resource_pool:
description:
- The name of the resource_pool to create the VM in.
required: false
default: None
cluster:
description:
- The name of the cluster to create the VM in. By default this is derived from the host you tell the module to build the guest on.
required: false
default: None
esxi:
description:
- Dictionary which includes datacenter and hostname on which the VM should be created. For standalone ESXi hosts, ha-datacenter should be used as the datacenter name
required: false
default: null
state:
description:
- Indicate desired state of the vm. 'reconfigured' only applies changes to 'memory_mb' and 'num_cpus' in vm_hardware parameter, and only when hot-plugging is enabled for the guest.
default: present
choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured']
from_template:
version_added: "1.9"
description:
- Specifies if the VM should be deployed from a template (mutually exclusive with 'state' parameter). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template.
default: no
choices: ['yes', 'no']
template_src:
version_added: "1.9"
description:
- Name of the source template to deploy from
default: None
snapshot_to_clone:
description:
- A string that when specified, will create a linked clone copy of the VM. Snapshot must already be taken in vCenter.
version_added: "2.0"
required: false
default: none
power_on_after_clone:
description:
- Specifies if the VM should be powered on after the clone.
required: false
default: yes
choices: ['yes', 'no']
vm_disk:
description:
- A key, value list of disks and their sizes and which datastore to keep it in.
required: false
default: null
vm_hardware:
description:
- A key, value list of VM config settings. Must include ['memory_mb', 'num_cpus', 'osid', 'scsi'].
required: false
default: null
vm_nic:
description:
- A key, value list of nics, their types and what network to put them on.
required: false
default: null
vm_extra_config:
description:
- A key, value pair of any extra values you want set or changed in the vmx file of the VM. Useful to set advanced options on the VM.
required: false
default: null
vm_hw_version:
description:
- Desired hardware version identifier (for example, "vmx-08" for vms that needs to be managed with vSphere Client). Note that changing hardware version of existing vm is not supported.
required: false
default: null
version_added: "1.7"
vmware_guest_facts:
description:
- Gather facts from vCenter on a particular VM
required: false
default: null
force:
description:
- Boolean. Allows you to run commands which may alter the running state of a guest. Also used to reconfigure and destroy.
default: "no"
choices: [ "yes", "no" ]
notes:
- This module should run from a system that can access vSphere directly.
Either by using local_action, or using delegate_to.
author: "Richard Hoop (@rhoop) <wrhoop@gmail.com>"
requirements:
- "python >= 2.6"
- pysphere
'''
EXAMPLES = '''
# Create a new VM on an ESX server
# Returns changed = False when the VM already exists
# Returns changed = True and a adds ansible_facts from the new VM
# State will set the power status of a guest upon creation. Use powered_on to create and boot.
# Options ['state', 'vm_extra_config', 'vm_disk', 'vm_nic', 'vm_hardware', 'esxi'] are required together
# Note: vm_floppy support added in 2.0
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
state: powered_on
vm_extra_config:
vcpu.hotadd: yes
mem.hotadd: yes
notes: This is a test VM
folder: MyFolder
vm_disk:
disk1:
size_gb: 10
type: thin
datastore: storage001
# VMs can be put into folders. The value given here is either the full path
# to the folder (e.g. production/customerA/lamp) or just the last component
# of the path (e.g. lamp):
folder: production/customerA/lamp
vm_nic:
nic1:
type: vmxnet3
network: VM Network
network_type: standard
nic2:
type: vmxnet3
network: dvSwitch Network
network_type: dvs
vm_hardware:
memory_mb: 2048
num_cpus: 2
osid: centos64Guest
scsi: paravirtual
vm_cdrom:
type: "iso"
iso_path: "DatastoreName/cd-image.iso"
vm_floppy:
type: "image"
image_path: "DatastoreName/floppy-image.flp"
esxi:
datacenter: MyDatacenter
hostname: esx001.mydomain.local
# Reconfigure the CPU and Memory on the newly created VM
# Will return the changes made
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
state: reconfigured
vm_extra_config:
vcpu.hotadd: yes
mem.hotadd: yes
notes: This is a test VM
vm_disk:
disk1:
size_gb: 10
type: thin
datastore: storage001
vm_nic:
nic1:
type: vmxnet3
network: VM Network
network_type: standard
vm_hardware:
memory_mb: 4096
num_cpus: 4
osid: centos64Guest
scsi: paravirtual
esxi:
datacenter: MyDatacenter
hostname: esx001.mydomain.local
# Deploy a guest from a template
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
from_template: yes
te
|
3stack-software/credsmash
|
credsmash/__init__.py
|
Python
|
apache-2.0
| 686
| 0
|
#!/usr/bin/env python
# Copyright 2015 Luminal, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkg_resources
|
__version__ = pkg_resources.resource_string(__name
|
__, 'VERSION')
|
benoitc/pywebmachine
|
pywebmachine/resource.py
|
Python
|
mit
| 3,011
| 0.004982
|
# -*- coding: utf-8 -*-
#
# This file is part of pywebmachine released under the MIT license.
# See the NOTICE for more information.
class Resource(object):
def __init__(self, req, rsp):
pass
def allowed_methods(self, req, rsp):
return ["GET", "HEAD"]
def allow_missing_post(self, req, rsp):
return False
def auth_required(self, req, rsp):
return True
def charsets_provided(self, req, rsp):
"""\
return [("iso-8859-1", lambda x: x)]
Returning None prevents the character set negotiation
logic.
"""
return None
def content_types_accepted(self, req, rsp):
return None
def content_types_provided(se
|
lf, req, rsp):
return [
("text/html", self.to_html)
]
def created_location(self, req, rsp):
return None
def delete_completed(self, req, rsp):
return True
def delete_resource(self, req, rsp):
return False
def encodings_provided(self, req, rsp):
"""\
return [("identity", lambda x: x)]
Returning None prevents the encoding negot
|
iation logic.
"""
return None
def expires(self, req, rsp):
return None
def finish_request(self, req, rsp):
return True
def forbidden(self, req, rsp):
return False
def generate_etag(self, req, rsp):
return None
def is_authorized(self, req, rsp):
return True
def is_conflict(self, req, rsp):
return False
def known_content_type(self, req, rsp):
return True
def known_methods(self, req, rsp):
return set([
"GET", "HEAD", "POST", "PUT", "DELETE",
"TRACE", "CONNECT", "OPTIONS"
])
def languages_provided(self, req, rsp):
"""\
return ["en", "es", "en-gb"]
returning None short circuits the language negotiation
"""
return None
def last_modified(self, req, rsp):
return None
def malformed_request(self, req, rsp):
return False
def moved_permanently(self, req, rsp):
return False
def moved_temporarily(self, req, rsp):
return False
def multiple_choices(self, req, rsp):
return False
def options(self, req, rsp):
return []
def ping(self, req, rsp):
return True
def post_is_create(self, req, rsp):
return False
def previously_existed(self, req, rsp):
return False
def process_post(self, req, rsp):
return False
def resource_exists(self, req, rsp):
return True
def service_available(self, req, rsp):
return True
def uri_too_long(self, req, rsp):
return False
def valid_content_headers(self, req, rsp):
return True
def valid_entity_length(self, req, rsp):
return True
def variances(self, req, rsp):
return []
|
72squared/redpipe
|
docs/conf.py
|
Python
|
mit
| 5,400
| 0
|
# -*- coding: utf-8 -*-
#
# RedPipe documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 19 13:22:45 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
import os
import sys
from os import path
ROOTDIR = path.abspath(os.path.dirname(os.path.dirname(__file__)))
sys.path.insert(0, ROOTDIR)
import redpipe # noqa
extensions = [
'alabaster',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RedPipe'
copyright = u'2017, John Loehrer'
author = u'John Loehrer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = redpipe.__version__
# The full version, including alpha/beta/rc tags.
release = redpipe.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_p
|
ath
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML out
|
put ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo': 'redpipe-logo.gif',
'github_banner': True,
'github_user': '72squared',
'github_repo': 'redpipe',
'travis_button': True,
'analytics_id': 'UA-98626018-1',
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'RedPipedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'RedPipe.tex', u'%s Documentation' % project,
u'John Loehrer', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, project, u'%s Documentation' % project,
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, u'%s Documentation' % project,
author, project, 'making redis pipelines easy in python',
'Miscellaneous'),
]
suppress_warnings = ['image.nonlocal_uri']
|
SergeyMakarenko/fbthrift
|
thrift/lib/py3/__init__.py
|
Python
|
apache-2.0
| 800
| 0
|
#!/usr/bin/env python3
__all__ = [
'get_client', 'Client', 'ThriftServer', 'Struct', 'BadEnum', 'Error',
'ApplicationError', 'TransportError', 'SSLPolicy',
]
try:
from thrift.py3.client import get_client, Client
except ImportError:
__all__.remove('Client')
__all__.remove('get_client')
try:
from thrift.py3.server import ThriftServer, SSLPolicy
exce
|
pt ImportError:
__all__.remove('ThriftServer')
__all__.remove('SSLPolicy')
try:
from thrift.py3.types import Struct, BadEnum
except ImportError:
__all__.remove('Struct')
__all__.remove('BadEnum')
try:
from thr
|
ift.py3.exceptions import Error, ApplicationError, TransportError
except ImportError:
__all__.remove('Error')
__all__.remove('ApplicationError')
__all__.remove('TransportError')
|
fxstein/SentientHome
|
rules/plugin.rules.py
|
Python
|
apache-2.0
| 3,196
| 0.000626
|
#!/usr/local/bin/python3 -u
"""
Author: Oliver Ratzesberger <https://github.com/fxstein>
Copyright: Copyright (C) 2016 Oliver Ratzesberger
License: Apache License, Version 2.0
"""
# Make sure we have access to SentientHome commons
import os
import sys
try:
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/..')
except:
exit(1)
import time
from ce
|
ment.core import hook
def process_event(app, event_type, event):
app.log.debug('process_event() Event: %s %s' %
(event_type, event), __name__)
try:
if event_type == 'isy' and event['Event.node'] is not None:
# Lookup name for easy rules coding
nodename = app.isy._nodedict[event['Event.node']]['name']
app.log.warn('ISY Node Event: %s %s: %s' %
|
(event['Event.node'], nodename, event), __name__)
if nodename == 'Master - Lights' and\
event['Event.control'] == 'DON':
app.log.error('Auto Off for: %s %s' %
(event['Event.node'], nodename), __name__)
time.sleep(5)
app.isy[event['Event.node']].off()
except Exception as e:
app.log.error(e)
# if event_type == 'isy' and event['Event.node'] == '24 0 93 1':
# app.log.warn('!!!!!!!!!!FOUNTAIN!!!!!!!!!!!')
# elif etype == 'isy' and event['Event.node'] == '29 14 86 1':
# app.log.debug('!!!!!!!!!!LIVING - WINDOW - OUTLET!!!!!!!!!!!')
# elif etype == 'isy' and state['control'] == 'DON':
# app.log.debug('Node: %s TURNED ON!!!!!!!!!!!!!!!!' %
# event['Event.node'])
# elif etype == 'isy' and state['control'] == 'ST':
# app.log.debug('Node: %s SET TARGET!!!!!!!!!!!!!!!' %
# event['Event.node'])
#
# if etype == 'ubnt.mfi.sensor':
# # Slow test workload for async task
# app.log.debug('mFi Sensor event: %s' % event)
# # log.debug('Pause for 10 sec')
# # yield from asyncio.sleep(10)
# # log.debug('Back from sleep')
#
# # Test mFi Sensor rule
# if etype == 'ubnt.mfi.sensor' and event['label'] == 'Well.Well.Pump':
# if event['amps'] < 21 and event['amps'] > 15:
# # Turn off the well pump for set amount of time
# app.log.info('!!!!!!!! WELL PUMP SAVER ACTION !!!!!!!!!')
#
# # First put pump to sleep
# well_pump = app.isy.get_node("Well - Well Pump")
# if well_pump:
# well_pump.off()
# # yield from asyncio.sleep(2)
# # well_pump.off()
# #
# # # Then schedule wakeup at a later time
# # yield from asyncio.sleep(900)
# # well_pump.on()
# # yield from asyncio.sleep(2)
# # well_pump.on()
def load(app):
hook.register('process_event', process_event)
app.log.info('Succeful Rules Plugin registration', __name__)
#
# Do nothing
# (syntax check)
#
if __name__ == "__main__":
import __main__
print(__main__.__file__)
print("syntax ok")
exit(0)
|
XiaofanZhang/ROPgadget
|
ropgadget/ropparse/arch/parserx86.py
|
Python
|
gpl-2.0
| 17,183
| 0.013967
|
#!/usr/bin/env python2
##
## We define Instrution as two types "Computing instruction" and "Control Transfer instruction"
## for computing instruction
## "NAME" : [ Operand_Number , [ Formula_that_modify_reg ], [ FLAG_reg_modified]]
## for control transfter instruciton
## "NAME" : [ Operand_Number , [ Formula_that_modify_reg ], [ DST_Addr_on_condition]]
##
from capstone import *
from expression import Exp
from semantic import Semantic
from copy import deepcopy
class X86:
FLAG = ["CF", "PF", "AF", "ZF", "SF", "TF", "IF", "DF", "OF"]
regs64 = ["rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp", "r8", "r9", "r10", "r11", "r12",
"r13", "r14", "r15", "cs", "ds", "es", "fs", "gs", "ss"]
regs32 = ["eax", "ebx", "ecx", "edx", "cs", "ds", "es", "fs", "gs", "ss", "esi", "edi", "ebp", "esp", "eip"]
Tregs64 = {
"eax" : ["rax $ 0 : 31", "rax = ( rax $ 32 : 63 ) # eax", 32],
"ax" : ["rax $ 0 : 15", "rax = ( rax $ 16 : 63 ) # ax", 16],
"ah" : ["rax $ 8 : 15", "rax = ( rax $ 16 : 63 ) # ah # ( rax $ 0 : 7 )", 8],
"al" : ["rax $ 0 : 7", "rax = ( rax $ 8 : 63 ) # al", 8],
"ebx" : ["rbx $ 0 : 31", "rbx = ( rbx $ 32 : 63 ) # ebx", 32],
"bx" : ["rbx $ 0 : 15", "rbx = ( rbx $ 16 : 63 ) # bx", 16],
"bh" : ["rbx $ 8 : 15", "rbx = ( rbx $ 16 : 63 ) # bh # ( rbx $ 0 : 7 )", 8],
"bl" : ["rbx $ 0 : 7", "rbx = ( rbx $ 8 : 63 ) # bl", 8],
"ecx" : ["rcx $ 0 : 31", "rcx = ( rcx $ 32 : 63 ) # ecx", 32],
"cx" : ["rcx $ 0 : 15", "rcx = ( rcx $ 16 : 63 ) # cx", 16],
"ch" : ["rcx $ 8 : 15", "rcx = ( rcx $ 16 : 63 ) # ch # ( rcx $ 0 : 7 )", 8],
"cl" : ["rcx $ 0 : 7", "rcx = ( rcx $ 8 : 63 ) # cl", 8],
"edx" : ["rdx $ 0 : 31", "rdx = ( rdx $ 32 : 63 ) # edx", 32],
"dx" : ["rdx $ 0 : 15", "rdx = ( rdx $ 16 : 63 ) # dx", 16],
"dh" : ["rdx $ 8 : 15", "rdx = ( rdx $ 16 : 63 ) # dh # ( rdx $ 0 : 7 )", 8],
"dl" : ["rdx $ 0 : 7", "rdx = ( rdx $ 8 : 63 ) # dl", 8],
}
Tregs32 = {
"ax" : ["eax $ 0 : 15", "eax = ( eax $ 16 : 31 ) # ax", 16],
"ah" : ["eax $ 8 : 15", "eax = ( eax $ 16 : 31 ) # ah # ( eax $ 0 : 7 )", 8],
"al" : ["eax $ 0 : 7", "eax = ( eax $ 8 : 31 ) # al", 8],
"bx" : ["ebx $ 0 : 15", "ebx = ( ebx $ 16 : 31 ) # bx", 16],
"bh" : ["ebx $ 8 : 15", "ebx = ( ebx $ 16 : 31 ) # bh # ( ebx $ 0 : 7 )", 8],
"bl" : ["ebx $ 0 : 7", "ebx = ( ebx $ 8 : 31 ) # bl", 8],
"cx" : ["ecx $ 0 : 15", "ecx = ( ecx $ 16 : 31 ) # cx", 16],
"ch" : ["ecx $ 8 : 15", "ecx = ( ecx $ 16 : 31 ) # ch # ( ecx $ 0 : 7 )", 8],
"cl" : ["ecx $ 0 : 7", "ecx = ( ecx $ 8 : 31 ) # cl", 8],
"dx" : ["edx $ 0 : 15", "edx = ( edx $ 16 : 31 ) # dx", 16],
"dh" : ["edx $ 8 : 15", "edx = ( edx $ 16 : 31 ) # dh # ( edx $ 0 : 7 )", 8],
"dl" : ["edx $ 0 : 7", "edx = ( edx $ 8 : 31 ) # dl", 8],
}
# Instructions that modifty the execution path
Control = ["ret", "iret", "int", "into", "enter", "leave", "call", "jmp", "ja", "jae", "jb", "jbe", "jc", "je","jnc", "jne", "jnp", "jp", "jg", "jge", "jl", "jle", "jno", "jns", "jo", "js"]
insn = {
# data transfer
"mov": [2, ["operand1 = operand2"], []],
"cmove": [2, ["operand1 = ( ZF == 1 ) ? operand2 : operand1"], []],
"cmovne": [2, ["operand1 = ( ZF == 0 ) ? operand2 : operand1"], []],
"cmova": [2, ["operand1 = ( ( ZF == 0 ) & ( CF == 0 ) ) ? operand2 : operand1"], []],
"cmovae": [2, ["operand1 = ( CF == 0 ) ? operand2 : operand1"], []],
"cmovb": [2, ["operand1 = ( CF == 1 ) ? operand2 : operand1"], []],
"cmovbe": [2, ["operand1 = ( ( ZF == 1 ) | ( CF == 1 ) ) ? operand2 : operand1"], []],
"cmovg": [2, ["operand1 = ( ( ZF == 0 ) & ( SF == OF ) ) ? operand2 : operand1"], []],
"cmovge": [2, ["operand1 = ( SF == OF ) ? operand2 : operand1"], []],
"cmovl": [2, ["operand1 = ( SF != OF ) ? operand2 : operand1"], []],
"cmovle": [2, ["operand1 = ( ( ZF == 1 ) & ( SF != OF ) ) ? operand2 : operand1"], []],
"cmovs": [2, ["operand1 = ( SF == 1 ) ? operand2 : operand1"], []],
"cmovp": [2, ["operand1 = ( PF == 1 ) ? operand2 : operand1"], []],
"push": [1, ["* ssp = operand1"], []],
"pop": [1, ["operand1 = * ssp"], []],
#"movsx": [2, ["operand1 = operand2 > 0 ? operand2 : operand2 & 0xffffffffffffffff"], []],
#"movzx": [2, ["operand1 = 0 & operand2"], []],
# flag control instuctions
"stc": [0, [], ["CF = 1"]],
"clc": [0, [], ["CF = 0"]],
"cmc": [0, [], ["CF = ~ CF"]],
"cld": [0, [], ["DF = 0"]],
"std": [0, [], ["DF = 1"]],
"sti": [0, [], ["IF = 1"]],
"cli": [0, [], ["IF = 0"]],
# arithmetic
"xchg": [2, ["FIXME"], []],
"cmp": [2, ["temp = operand1 - operand2"], ["CF", "OF", "SF", "ZF", "AF", "PF"]],
"add": [2, ["operand1 = operand1 + operand2"], ["OF", "SF", "ZF", "AF", "CF", "PF"]],
"adc": [2, ["operand1 = operand1 + operand2 + CF"], ["OF", "SF", "ZF", "AF", "CF", "PF"]],
"sub": [2, ["operand1 = operand1 - operand2"], ["OF", "SF", "ZF", "AF", "CF", "PF"]],
"sbb": [2, ["operand1 = operand1 - operand2 - CF"], ["OF", "SF", "ZF", "AF", "CF", "PF"]],
"inc": [1, ["operand1 = operand1 + 1"], ["OF", "SF", "ZF", "AF", "PF"]],
"dec": [1, ["operand1 = operand1 - 1"], ["OF", "SF", "ZF", "AF", "PF"]],
"neg": [1, ["operand1 = - operand1"], ["CF", "OF", "SF", "ZF", "AF", "PF"]],
# control transfer
"ret": [1, [], ["* ssp"]],
"call": [1, [], ["* operand1"]],
"jmp": [1, [], ["* operand1"]],
"ja": [1, [], ["( ( CF == 0 ) & ( ZF == 0 ) ) ? * operand1 : 0"]],
"jae": [1, [], ["CF == 0 ? * operand1 : 0"]],
"jb": [1, [] , ["CF == 1 ? * operand1 : 0"]],
"jbe": [1, [] , ["( ( CF == 1 ) | ( ZF == 1 ) ) ? * operand1 : 0"]],
"jc": [1, [], ["CF == 1 ? *
|
operand1 : 0"]],
"je": [1, [], ["ZF == 1 ?
|
* operand1 : 0"]],
"jnc": [1, [], ["CF == 0 ? * operand1 : 0"]],
"jne": [1, [], ["ZF == 0 ? * operand1 : 0"]],
"jnp": [1, [], ["PF == 0 ? * operand1 : 0"]],
"jp": [1, [], ["PF == 1 ? * operand1 : 0"]],
"jg": [1, [], ["( ( ZF == 0 ) & ( SF == OF ) ) ? * operand1 : 0"]],
"jge": [1, [], ["SF == OF ? * operand1 : 0"]],
"jl": [1, [], ["SF != OF ? * operand1 : 0"]],
"jle": [1, [], ["( ( ZF == 1 ) | ( SF != OF ) ) ? * operand1 : 0"]],
"jno": [1, [], ["OF == 0 ? * operand1 : 0"]],
"jns": [1, [], ["SF == 0 ? * operand1 : 0"]],
"jo": [1, [], ["OF == 1 ? * operand1 : 0"]],
"js": [1, [], ["SF == 1 ? * operand1 : 0"]],
# logic
"and": [2, ["operand1 = operand1 & operand2"], ["CF = 0", "OF = 0", "SF", "ZF", "PF"]],
"or": [2, ["operand1 = operand1 | operand2"], ["CF = 0", "OF = 0", "SF", "ZF", "PF"]],
"xor": [2, ["operand1 = operand1 ^ operand2"], ["CF = 0","OF = 0", "SF", "ZF", "PF"]],
"not": [1, ["operand1 = ~ operand1"], []],
"test": [2, ["temp = operand1 & operand2"], ["OF = 0", "CF = 0", "SF", "ZF", "PF"]],
# segment
# others
"lea": [2, ["operand1 = & operand2"], []],
"nop": [0, [], []]
}
class ROPParserX86:
def __init__(self, gadgets, mode):
self.gadgets = gadgets
self.addrs = dict()
self.mode = mode
self.aligned = 0
self.memLoc = []
self.writeMem = {}
if mode == CS_MODE_32:
self.regs = X86.regs32 + X86.FLAG
self.Tregs = X86.Tregs32
self.aligned = 4
self.default = 32
self.sp = "esp"
self.ip = "eip"
else:
self.regs = X86.regs64 + X86.FLAG
self.Tregs = X86.Tregs64
self.aligned = 8
self.default = 64
self.sp = "rsp"
self.ip = "rip"
for k, v in X86.insn.items():
for i, s in enumerate(v[1]):
v[1][i] = s.replace("ssp", self.sp)
for i, s in enumerate(v[2]):
v[2][i] = s.replace("ssp", sel
|
MarauderXtreme/sipa
|
sipa/backends/types.py
|
Python
|
mit
| 268
| 0
|
from typing_extensions import Protoco
|
l
# noinspection PyPropertyDefinition
class UserLike(Protocol):
@property
def is_active(self) -> bool: ...
@property
def is_authenticated(self) -> bool: ...
@property
def is_anonymous(self) -> bo
|
ol: ...
|
jolynch/mit-tab
|
mittab/apps/tab/management/commands/load_test.py
|
Python
|
mit
| 4,345
| 0.001611
|
import re
from threading import Thread
import time
from django.core.management.base import BaseCommand
import requests
from mittab.apps.tab.models import Round, TabSettings
from mittab.apps.tab.management.commands import utils
class Command(BaseCommand):
help = "Load test the tournament, connecting via localhost and hitting the server"
def add_arguments(self, parser):
parser.add_argument(
"--host",
dest="host",
help="The hostname of the server to hit",
nargs="?",
default="localhost:8000")
parser.add_argument(
"--connections",
dest="connections",
help="The number of concurrent connections to open",
nargs="?",
default=10,
type=int)
def handle(self, *args, **options):
cur_round = TabSettings.get("cur_round") - 1
host = options["host"]
csrf_threads = []
rounds = Round.objects.filter(round_number=cur_round, victor=Round.NONE)
for round_obj in rounds:
judge = round_obj.chair
csrf_threads.append(GetCsrfThread(host, judge.ballot_code, round_obj))
num_errors = 0
while csrf_threads:
cur_csrf_threads = []
for _ in range(min(len(csrf_threads), options["connections"])):
cur_csrf_threads.append(csrf_threads.pop())
for thr in cur_csrf_threads:
thr.start()
for thr in cur_csrf_threads:
thr.join()
result_threads = []
for thr in cur_csrf_threads:
num_errors += num_errors
csrf_token, num_errors = thr.result
if csrf_token is None:
print("no csrf token")
result_thread = SubmitResultThread(
thr.host,
thr.ballot_code,
csrf_token,
thr.round_obj)
result_threads.append(result_thread)
for thr in result_threads:
thr.start()
for thr in result_threads:
thr.join()
for thr in result_threads:
num_errors += thr.num_errors
print("Done with one batch! Sleeping!")
time.sleep(2)
print("Done!")
pri
|
nt("Total errors: %s" % num_errors)
class SubmitResultThread(Thread):
MAX_ERRORS = 10
def __init__(self, host, ballot_code, csrf_token, round_obj):
super(
|
SubmitResultThread, self).__init__()
self.host = host
self.ballot_code = ballot_code
self.csrf_token = csrf_token
self.round_obj = round_obj
self.num_errors = 0
self.resp = None
def run(self):
self.resp = self.get_resp()
def get_resp(self):
if self.num_errors >= self.MAX_ERRORS:
return None
result = utils.generate_random_results(self.round_obj, self.ballot_code)
result["csrfmiddlewaretoken"] = self.csrf_token
resp = requests.post("http://%s/e_ballots/%s/" % (self.host, self.ballot_code),
result,
cookies={"csrftoken": self.csrf_token})
if resp.status_code > 299:
self.num_errors += 1
return self.get_resp()
else:
return resp.text
class GetCsrfThread(Thread):
REGEX = "name=\"csrfmiddlewaretoken\" value=\"([^\"]+)\""
MAX_ERRORS = 10
def __init__(self, host, ballot_code, round_obj):
super(GetCsrfThread, self).__init__()
self.num_errors = 0
self.host = host
self.ballot_code = ballot_code
self.round_obj = round_obj
self.result = (None, None)
def run(self):
resp = self.get_resp()
if resp is None:
self.result = (None, self.num_errors)
else:
csrf = re.search(self.REGEX, resp).group(1)
self.result = (csrf, self.num_errors)
def get_resp(self):
if self.num_errors >= self.MAX_ERRORS:
return None
resp = requests.get("http://%s/e_ballots/%s" % (self.host, self.ballot_code))
if resp.status_code > 299:
self.num_errors += 1
return self.get_resp()
else:
return resp.text
|
OSUrobotics/peac_bridge
|
src/peac_bridge/peac_client.py
|
Python
|
bsd-3-clause
| 3,420
| 0.003509
|
#!/usr/bin/env python
import json
import requests
from requests.auth import HTTPBasicAuth
import urlparse
import time
class PEACInfo:
def __init__(self, url, method):
self.url = url
self.method = method
self.headers = {
'accept': 'application/json',
'Content-Type': 'application/json'
}
LOCATION_INFO = PEACInfo('/service/locations.json', 'GET')
DEVICES_INFO = PEACInfo('/service/locations/%(locationId)s/devices.json', 'GET')
CONTROLS_INFO = PEACInfo('/service/devices/%(deviceId)s/controls.json', 'GET')
UPDATE_INFO = PEACInfo('/service/controls/update.json', 'PUT')
class PEAC(object):
def __init__(self, server, user, password
|
, proxies={}):
self.server = server
self.user = user
self.password = password
self.proxies = proxies
def _make_url(self, peacinfo):
urlparts = list(urlparse.urlparse(self.server + peacinfo.url))
return urlparse.urlunparse(urlparts)
def _PEAC_request(self, peacinfo, payload=None, url_args=dict()):
url = self._make_url(peacinfo)
if payload:
resp = requests.request(peacinfo.method, url % url_arg
|
s, data=json.dumps(payload), headers=peacinfo.headers, auth=HTTPBasicAuth(self.user, self.password), proxies=self.proxies)
else:
resp = requests.request(peacinfo.method, url % url_args, headers=peacinfo.headers, auth=HTTPBasicAuth(self.user, self.password), proxies=self.proxies)
return resp
def list_locations(self):
'''
This requests retrieves all locations.
Request Type: GET
Parameters: none
Response: JSON array with Location Objects.
'''
return self._PEAC_request(LOCATION_INFO).json()
def list_devices(self, location_id):
'''
This requests gets the list of devices in location location_id
Request Type: GET
Parameters: locationId, the id retrieved by the previous call to locations.json
Response: JSON Array of Device objects.
'''
return self._PEAC_request(DEVICES_INFO, url_args=dict(locationId=location_id)).json()
def get_device_info(self, device_id):
'''
Retrieves the controls associated with device deviceId.
Request Type: GET
Parameters: deviceId, the id retrieved from the device.json call.
Response: JSON Array of Control objects.
'''
return self._PEAC_request(CONTROLS_INFO, url_args=dict(deviceId=device_id)).json()
def update_control(self, controlId, numval):
'''
Updates the control value. This call is used to 'press' a button.
Method: PUT
Params: JSON Control Update Object
Response: Control object
'''
# import pdb; pdb.set_trace()
return self._PEAC_request(UPDATE_INFO, payload=dict(id=controlId, numVal=numval)).json()
def test_credentials(self):
'''
Tests credentials against PEAC server
'''
return self._PEAC_request(LOCATION_INFO).status_code == 200
def test_server_responses():
import os
peac = PEAC('http://localhost:8000', os.environ['PEAC_USER'], os.environ['PEAC_PASSWORD'])
# print peac.list_locations()
# print peac.list_devices(83)
# print peac.get_device_info(1955)
print peac.update_control(5000,0)
if __name__ == '__main__':
test_server_responses()
|
CitrineInformatics/lolo
|
python/lolopy/learners.py
|
Python
|
apache-2.0
| 23,987
| 0.004586
|
from abc import abstractmethod, ABCMeta
import numpy as np
from lolopy.loloserver import get_java_gateway
from lolopy.utils import send_feature_array, send_1D_array
from sklearn.base import BaseEstimator, RegressorMixin, ClassifierMixin, is_regressor
from sklearn.exceptions import NotFittedError
__all__ = ['RandomForestRegressor', 'RandomForestClassifier', 'ExtraRandomTreesRegressor', 'ExtraRandomTreesClassifier']
class BaseLoloLearner(BaseEstimator, metaclass=
|
ABCMeta):
"""Base object for all leaners that use Lolo.
Contains logic for starting the JVM gateway, and the fit operations.
It is only necessary to implement the `_make_learner` object and create an `__init__` function
to adapt a learner from the Lolo library for use in lolopy.
The logic for making predictions (i.e., `predict` and `predict_proba`) is specific to whether the learner
is a classification or regression model.
In lolo, learners are not
|
specific to a regression or classification problem and the type of problem is determined
when fitting data is provided to the algorithm.
In contrast, Scikit-learn learners for regression or classification problems are different classes.
We have implemented `BaseLoloRegressor` and `BaseLoloClassifier` abstract classes to make it easier to create
a classification or regression version of a Lolo base class.
The pattern for creating a scikit-learn compatible learner is to first implement the `_make_learner` and `__init__`
operations in a special "Mixin" class that inherits from `BaseLoloLearner`, and then create a regression- or
classification-specific class that inherits from both `BaseClassifier` or `BaseRegressor` and your new "Mixin".
See the RandomForest models as an example of this approach.
"""
def __init__(self):
self.gateway = get_java_gateway()
# Create a placeholder for the model
self.model_ = None
self._num_outputs = None
self._compress_level = 9
self.feature_importances_ = None
def __getstate__(self):
# Get the current state
try:
state = super(BaseLoloLearner, self).__getstate__()
except AttributeError:
state = self.__dict__.copy()
# Delete the gateway data
del state['gateway']
# If there is a model set, replace it with the JVM copy
if self.model_ is not None:
state['model_'] = self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.serializeObject(self.model_,
self._compress_level)
return state
def __setstate__(self, state):
# Unpickle the object
super(BaseLoloLearner, self).__setstate__(state)
# Get a pointer to the gateway
self.gateway = get_java_gateway()
# If needed, load the model into memory
if state['model_'] is not None:
bytes = state.pop('model_')
self.model_ = self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.deserializeObject(bytes)
def fit(self, X, y, weights=None):
# Instantiate the JVM object
learner = self._make_learner()
# Determine the number of outputs
y_shape = np.asarray(y).shape
if len(y_shape) == 1:
self._num_outputs = 1
elif len(y_shape) == 2:
self._num_outputs = y.shape[1]
else:
raise ValueError("Output array must be either 1- or 2-dimensional")
# Convert all of the training data to Java arrays
train_data, weights_java = self._convert_train_data(X, y, weights)
assert train_data.length() == len(X), "Array copy failed"
assert train_data.head()._1().length() == len(X[0]), "Wrong number of features"
assert weights_java.length() == len(X), "Weights copy failed"
# Train the model
result = learner.train(train_data, self.gateway.jvm.scala.Some(weights_java))
# Unlink the training data, which is no longer needed (to save memory)
self.gateway.detach(train_data)
self.gateway.detach(weights_java)
# Get the model out
self.model_ = result.getModel()
# Store the feature importances
feature_importances_java = result.getFeatureImportance().get()
feature_importances_bytes = self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.send1DArray(feature_importances_java)
self.feature_importances_ = np.frombuffer(feature_importances_bytes, 'float')
return self
@abstractmethod
def _make_learner(self):
"""Instantiate the learner used by Lolo to train a model
Returns:
(JavaObject) A lolo "Learner" object, which can be used to train a model"""
pass
def clear_model(self):
"""Utility operation for deleting model from JVM when no longer needed"""
if self.model_ is not None:
self.gateway.detach(self.model_)
self.model_ = None
def _convert_train_data(self, X, y, weights=None):
"""Convert the training data to a form accepted by Lolo
Args:
X (ndarray): Input variables
y (ndarray): Output variables
weights (ndarray): Wegihts for each sample
Returns
train_data (JavaObject): Pointer to the training data in Java
"""
# Make some default weights
if weights is None:
weights = np.ones(len(y))
# Convert y and w to float64 or int32 with native ordering
y = np.array(y, dtype=np.float64 if is_regressor(self) else np.int32)
weights = np.array(weights, dtype=np.float64)
# Convert X, y, and w to Java Objects
X_java = send_feature_array(self.gateway, X)
if self._num_outputs == 1:
y_java = send_1D_array(self.gateway, y, is_regressor(self))
else:
y_java = send_feature_array(self.gateway, y)
assert y_java.length() == len(y) == len(X)
w_java = send_1D_array(self.gateway, weights, True)
assert w_java.length() == len(weights)
return self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.zipTrainingData(X_java, y_java), w_java
def _convert_run_data(self, X):
"""Convert the data to be run by the model
Args:
X (ndarray): Input data
Returns:
(JavaObject): Pointer to run data in Java
"""
if not isinstance(X, np.ndarray):
X = np.array(X)
return self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.getFeatureArray(X.tobytes(), X.shape[1], False)
def get_importance_scores(self, X):
"""Get the importance scores for each entry in the training set for each prediction
Args:
X (ndarray): Inputs for each entry to be assessed
"""
pred_result = self._get_prediction_result(X)
y_import_bytes = self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.getImportanceScores(pred_result)
y_import = np.frombuffer(y_import_bytes, 'float').reshape(len(X), -1)
return y_import
def _get_prediction_result(self, X):
"""Get the PredictionResult from the lolo JVM
The PredictionResult class holds methods that will generate the expected predictions, uncertainty intervals, etc
Args:
X (ndarray): Input features for each entry
Returns:
(JavaObject): Prediction result produced by evaluating the model
"""
# Check that the model is fitted
if self.model_ is None:
raise NotFittedError()
# Convert the data to Java
X_java = self._convert_run_data(X)
# Get the PredictionResult
pred_result = self.model_.transform(X_java)
# Unlink the run data, which is no longer needed (to save memory)
self.gateway.detach(X_java)
return pred_result
class BaseLoloRegressor(BaseLoloLearner, RegressorMixin):
"""Abstract class for models that produce regression models.
As written, this allows for
|
shoyer/xarray
|
xarray/coding/cftime_offsets.py
|
Python
|
apache-2.0
| 35,760
| 0.001091
|
"""Time offset classes for use with cftime.datetime objects"""
# The offset classes and mechanisms for generating time ranges defined in
# this module were copied/adapted from those defined in pandas. See in
# particular the objects and methods defined in pandas.tseries.offsets
# and pandas.core.indexes.datetimes.
# For reference, here is a copy of the pandas copyright notice:
# (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team
# All rights reserved.
# Copyright (c) 2008-2011 AQR Capital Management, LLC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the copyright holder nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFI
|
TS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from datetime import timedelta
from distutils.version import LooseVersion
from functools import pa
|
rtial
from typing import ClassVar, Optional
import numpy as np
from ..core.pdcompat import count_not_none
from .cftimeindex import CFTimeIndex, _parse_iso8601_with_reso
from .times import format_cftime_datetime
def get_date_type(calendar):
"""Return the cftime date type for a given calendar name."""
try:
import cftime
except ImportError:
raise ImportError("cftime is required for dates with non-standard calendars")
else:
calendars = {
"noleap": cftime.DatetimeNoLeap,
"360_day": cftime.Datetime360Day,
"365_day": cftime.DatetimeNoLeap,
"366_day": cftime.DatetimeAllLeap,
"gregorian": cftime.DatetimeGregorian,
"proleptic_gregorian": cftime.DatetimeProlepticGregorian,
"julian": cftime.DatetimeJulian,
"all_leap": cftime.DatetimeAllLeap,
"standard": cftime.DatetimeGregorian,
}
return calendars[calendar]
class BaseCFTimeOffset:
_freq: ClassVar[Optional[str]] = None
_day_option: ClassVar[Optional[str]] = None
def __init__(self, n=1):
if not isinstance(n, int):
raise TypeError(
"The provided multiple 'n' must be an integer. "
"Instead a value of type {!r} was provided.".format(type(n))
)
self.n = n
def rule_code(self):
return self._freq
def __eq__(self, other):
return self.n == other.n and self.rule_code() == other.rule_code()
def __ne__(self, other):
return not self == other
def __add__(self, other):
return self.__apply__(other)
def __sub__(self, other):
import cftime
if isinstance(other, cftime.datetime):
raise TypeError("Cannot subtract a cftime.datetime " "from a time offset.")
elif type(other) == type(self):
return type(self)(self.n - other.n)
else:
return NotImplemented
def __mul__(self, other):
return type(self)(n=other * self.n)
def __neg__(self):
return self * -1
def __rmul__(self, other):
return self.__mul__(other)
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
if isinstance(other, BaseCFTimeOffset) and type(self) != type(other):
raise TypeError("Cannot subtract cftime offsets of differing " "types")
return -self + other
def __apply__(self):
return NotImplemented
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
test_date = (self + date) - self
return date == test_date
def rollforward(self, date):
if self.onOffset(date):
return date
else:
return date + type(self)()
def rollback(self, date):
if self.onOffset(date):
return date
else:
return date - type(self)()
def __str__(self):
return "<{}: n={}>".format(type(self).__name__, self.n)
def __repr__(self):
return str(self)
def _get_offset_day(self, other):
# subclass must implement `_day_option`; calling from the base class
# will raise NotImplementedError.
return _get_day_of_month(other, self._day_option)
def _get_day_of_month(other, day_option):
"""Find the day in `other`'s month that satisfies a BaseCFTimeOffset's
onOffset policy, as described by the `day_option` argument.
Parameters
----------
other : cftime.datetime
day_option : 'start', 'end'
'start': returns 1
'end': returns last day of the month
Returns
-------
day_of_month : int
"""
if day_option == "start":
return 1
elif day_option == "end":
days_in_month = _days_in_month(other)
return days_in_month
elif day_option is None:
# Note: unlike `_shift_month`, _get_day_of_month does not
# allow day_option = None
raise NotImplementedError()
else:
raise ValueError(day_option)
def _days_in_month(date):
"""The number of days in the month of the given date"""
if date.month == 12:
reference = type(date)(date.year + 1, 1, 1)
else:
reference = type(date)(date.year, date.month + 1, 1)
return (reference - timedelta(days=1)).day
def _adjust_n_months(other_day, n, reference_day):
"""Adjust the number of times a monthly offset is applied based
on the day of a given date, and the reference day provided.
"""
if n > 0 and other_day < reference_day:
n = n - 1
elif n <= 0 and other_day > reference_day:
n = n + 1
return n
def _adjust_n_years(other, n, month, reference_day):
"""Adjust the number of times an annual offset is applied based on
another date, and the reference day provided"""
if n > 0:
if other.month < month or (other.month == month and other.day < reference_day):
n -= 1
else:
if other.month > month or (other.month == month and other.day > reference_day):
n += 1
return n
def _shift_month(date, months, day_option="start"):
"""Shift the date to a month start or end a given number of months away.
"""
import cftime
delta_year = (date.month + months) // 12
month = (date.month + months) % 12
if month == 0:
month = 12
delta_year = delta_year - 1
year = date.year + delta_year
if day_option == "start":
day = 1
elif day_option == "end":
reference = type(date)(year, month, 1)
day = _days_in_month(reference)
else:
raise ValueError(day_option)
if LooseVersion(cftime.__version__) < LooseVersion("1.0.4"):
# dayofwk=-1 is required to update the dayofwk and dayofyr attributes of
# the returned date object in versions of cftime between 1.0.2 and
# 1.0.
|
stevenmizuno/QGIS
|
tests/src/python/test_qgsmaplayer.py
|
Python
|
gpl-2.0
| 4,223
| 0.000474
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsMapLayer
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '1/02/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import tempfile
from qgis.core import (QgsReadWriteContext,
QgsVectorLayer,
QgsProject)
from qgis.testing import start_app, unittest
from qgis.PyQt.QtXml import QDomDocument
start_app()
class TestQgsMapLayer(unittest.TestCase):
def testUniqueId(self):
"""
Test that layers created quickly with same name get a unique ID
"""
# make 1000 layers quickly
layers = []
for i in range(1000):
layer = QgsVectorLayer(
'Point?crs=epsg:4326&field=name:string(20)',
'test',
'memory')
layers.append(layer)
# make sure all ids are unique
ids = set()
for l in layers:
self.assertFalse(l.id() in ids)
ids.add(l.id())
def copyLayerViaXmlReadWrite(self, source, dest):
# write to xml
doc = QDomDocument("testdoc")
elem = doc.createElement("maplayer")
self.assertTrue(source.writeLayerXml(elem, doc, QgsReadWriteContext()))
self.assertTrue(dest.readLayerXml(elem, QgsReadWriteContext()), QgsProject.instance())
def testGettersSetters(self):
# test auto refresh getters/setters
layer = QgsVectorLayer("Point?field=fldtxt:string",
"layer", "memory")
self.assertFalse(layer.hasAutoRefres
|
hEnabled())
self.assertEqual(layer.autoRefreshInterval(), 0)
layer.setAutoRefreshInterval(5)
self.assertFalse(layer.hasAutoRefreshEnabled())
self.assertEqual(layer.autoRefreshInterval(), 5)
layer.setAutoRefreshEnabled(True)
|
self.assertTrue(layer.hasAutoRefreshEnabled())
self.assertEqual(layer.autoRefreshInterval(), 5)
layer.setAutoRefreshInterval(0) # should disable auto refresh
self.assertFalse(layer.hasAutoRefreshEnabled())
self.assertEqual(layer.autoRefreshInterval(), 0)
def testSaveRestoreAutoRefresh(self):
""" test saving/restoring auto refresh to xml """
layer = QgsVectorLayer("Point?field=fldtxt:string",
"layer", "memory")
layer2 = QgsVectorLayer("Point?field=fldtxt:string",
"layer", "memory")
self.copyLayerViaXmlReadWrite(layer, layer2)
self.assertFalse(layer2.hasAutoRefreshEnabled())
self.assertEqual(layer2.autoRefreshInterval(), 0)
layer.setAutoRefreshInterval(56)
self.copyLayerViaXmlReadWrite(layer, layer2)
self.assertFalse(layer2.hasAutoRefreshEnabled())
self.assertEqual(layer2.autoRefreshInterval(), 56)
layer.setAutoRefreshEnabled(True)
self.copyLayerViaXmlReadWrite(layer, layer2)
self.assertTrue(layer2.hasAutoRefreshEnabled())
self.assertEqual(layer2.autoRefreshInterval(), 56)
def testReadWriteMetadata(self):
layer = QgsVectorLayer("Point?field=fldtxt:string", "layer", "memory")
m = layer.metadata()
# Only abstract, more tests are done in test_qgslayermetadata.py
m.setAbstract('My abstract')
layer.setMetadata(m)
self.assertTrue(layer.metadata().abstract(), 'My abstract')
destination = tempfile.NamedTemporaryFile(suffix='.qmd').name
message, status = layer.saveNamedMetadata(destination)
self.assertTrue(status, message)
layer2 = QgsVectorLayer("Point?field=fldtxt:string", "layer", "memory")
message, status = layer2.loadNamedMetadata(destination)
self.assertTrue(status)
self.assertTrue(layer2.metadata().abstract(), 'My abstract')
if __name__ == '__main__':
unittest.main()
|
sfriesel/suds
|
suds/sax/parser.py
|
Python
|
lgpl-3.0
| 4,378
| 0.000228
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Librar
|
y Lesser General Pub
|
lic License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The sax module contains a collection of classes that provide a
(D)ocument (O)bject (M)odel representation of an XML document.
The goal is to provide an easy, intuative interface for managing XML
documents. Although, the term, DOM, is used above, this model is
B{far} better.
XML namespaces in suds are represented using a (2) element tuple
containing the prefix and the URI. Eg: I{('tns', 'http://myns')}
"""
import suds
from suds import *
from suds.sax import *
from suds.sax.attribute import Attribute
from suds.sax.document import Document
from suds.sax.element import Element
from suds.sax.text import Text
from logging import getLogger
import sys
from xml.sax import make_parser, InputSource, ContentHandler
from xml.sax.handler import feature_external_ges
log = getLogger(__name__)
class Handler(ContentHandler):
""" sax hanlder """
def __init__(self):
self.nodes = [Document()]
def startElement(self, name, attrs):
top = self.top()
node = Element(unicode(name))
for a in attrs.getNames():
n = unicode(a)
v = unicode(attrs.getValue(a))
attribute = Attribute(n,v)
if self.mapPrefix(node, attribute):
continue
node.append(attribute)
node.charbuffer = []
top.append(node)
self.push(node)
def mapPrefix(self, node, attribute):
skip = False
if attribute.name == 'xmlns':
if len(attribute.value):
node.expns = unicode(attribute.value)
skip = True
elif attribute.prefix == 'xmlns':
prefix = attribute.name
node.nsprefixes[prefix] = unicode(attribute.value)
skip = True
return skip
def endElement(self, name):
name = unicode(name)
current = self.top()
if len(current.charbuffer):
current.text = Text(u''.join(current.charbuffer))
del current.charbuffer
if len(current):
current.trim()
if name == current.qname():
self.pop()
else:
raise Exception('malformed document')
def characters(self, content):
text = unicode(content)
node = self.top()
node.charbuffer.append(text)
def push(self, node):
self.nodes.append(node)
return node
def pop(self):
return self.nodes.pop()
def top(self):
return self.nodes[len(self.nodes)-1]
class Parser:
""" SAX Parser """
@classmethod
def saxparser(cls):
p = make_parser()
p.setFeature(feature_external_ges, 0)
h = Handler()
p.setContentHandler(h)
return (p, h)
def parse(self, file=None, string=None):
"""
SAX parse XML text.
@param file: Parse a python I{file-like} object.
@type file: I{file-like} object.
@param string: Parse string XML.
@type string: str
"""
timer = suds.metrics.Timer()
timer.start()
sax, handler = self.saxparser()
if file is not None:
sax.parse(file)
timer.stop()
suds.metrics.log.debug('sax (%s) duration: %s', file, timer)
return handler.nodes[0]
if string is not None:
source = InputSource(None)
source.setByteStream(suds.BytesIO(string))
sax.parse(source)
timer.stop()
suds.metrics.log.debug('%s\nsax duration: %s', string, timer)
return handler.nodes[0]
|
alenasf/Pythontest
|
test/test_add_group.py
|
Python
|
apache-2.0
| 813
| 0.00492
|
# -*- coding: utf-8 -*-
from model.group import Group
def test_add_group(app):
old_groups = app.group.get_group_list()
group = Group(name="hjhj", header="jhjh", footer="jhjjhhj")
app.group.create(group)
new_groups = app.group.get_group_list()
assert len(old_groups) + 1 == len(new_groups)
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
def test_add_empty_group(app):
old_groups = app.group.get_group_list()
group = Group(name="", header="", footer="")
|
app.group.create(group)
new_groups = ap
|
p.group.get_group_list()
assert len(old_groups) + 1 == len(new_groups)
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
|
fengz10/ICN_SCM
|
Zipf.py
|
Python
|
gpl-2.0
| 821
| 0.015834
|
#!/usr/bin/python
import random
import math
imp
|
ort bisect
############################Zipf Generater######################
|
##########
# The library of numpy.random.zipf or scipy.stats.zipf only work when
# alph > 1
class ZipfGenerator:
def __init__(self, n, alpha):
# Calculate Zeta values from 1 to n:
tmp = [1. / (math.pow(float(i), alpha)) for i in range(1, n+1)]
zeta = reduce(lambda sums, x: sums + [sums[-1] + x], tmp, [0])
# Store the translation map:
self.distMap = [x / zeta[-1] for x in zeta]
def next(self):
# Take a uniform 0-1 pseudo-random value:
u = random.random()
# Translate the Zipf variable:
return bisect.bisect(self.distMap, u) - 1
#########################################################################
|
quietcoolwu/learn-python3-master
|
imooc/10_3.py
|
Python
|
gpl-2.0
| 365
| 0.023166
|
#条件过滤的要点是综合语句的构造。利用 if 剔除掉非字符串的元
|
素。e.g:而列表生成式则可以用一行语句代替循环生成上面的list:
#>>> [x * x for x in range(1, 11)]
#[1
|
, 4, 9, 16, 25, 36, 49, 64, 81, 100]
def toUppers(L):
return [x.upper() for x in L if isinstance(x,str)]
print (toUppers(['Hello', 'world', 101]))
|
CenterForOpenScience/osf-sync
|
osfsync/gui/qt/tray.py
|
Python
|
lgpl-3.0
| 9,674
| 0.001654
|
import logging
import os
import sys
import threading
from queue import Empty, Queue
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtCore import QMutex
from PyQt5.QtCore import QThread
from PyQt5.QtCore import QEvent
from PyQt5.QtWidgets import QTextEdit
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtWidgets import QSystemTrayIcon
from sqlalchemy.orm.exc import NoResultFound
from osfsync.application.background import BackgroundHandler
from osfsync.client.osf import OSFClient
from osfsync.database import Session
from osfsync.database import drop_db
from osfsync.database.models import User
from osfsync.gui.qt.login import LoginScreen
from osfsync.gui.qt.menu import OSFSyncMenu
from osfsync.utils.log import remove_user_from_sentry_logs
from osfsync import settings
from osfsync.tasks.notifications import group_events, Level
from osfsync.utils.validators import validate_containing_folder
logger = logging.getLogger(__name__)
ON_WINDOWS = sys.platform == 'win32'
ON_MAC = sys.platform == 'darwin'
class QResizableMessageBox(QMessageBox):
QWIDGETSIZE_MAX = 16777215
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setMouseTracking(True)
self.setSizeGripEnabled(True)
def event(self, e):
if e.type() in (QEvent.MouseMove, QEvent.MouseButtonPress):
self.setMaximumSize(self.QWIDGETSIZE_MAX, self.QWIDGETSIZE_MAX)
details_box = self.findChild(QTextEdit)
if details_box is not None:
details_box.setFixedSize(details_box.sizeHint())
return QMessageBox.event(self, e)
class OSFSyncQT(QSystemTrayIcon):
def __init__(self, application):
if ON_WINDOWS:
super().__init__(QIcon(':/tray_icon_win.png'), application)
else:
super().__init__(QIcon(':/tray_icon_mac.png'), application)
self._context_menu = OSFSyncMenu(self)
self.setContextMenu(self._context_menu)
self.show()
self.intervention_handler = SyncEventHandler()
self.notification_handler = SyncEventHandler()
# [ (signal, slot) ]
signal_slot_pairs = [
# preferences
# (self.preferences.ui.desktopNotifications.stateChanged, self.preferences.alerts_changed),
# (self.preferences.preferences_closed_signal, self.resume),
(self._context_menu.preferences.accountLogOutButton.clicked, self.logout),
(self.intervention_handler.notify_signal, self.on_intervention),
(self.notification_handler.notify_signal, self.on_notification),
]
for signal, slot in signal_slot_pairs:
signal.connect(slot)
def ensure_folder(self, user):
containing_folder = os.path.dirname(user.folder or '')
while not validate_containing_folder(containing_folder):
logger.warning('Invalid containing folder: "{}"'.format(containing_folder))
res = QFileDialog.getExistingDirectory(caption='Choose where to place OSF folder')
if not res:
# Do not accept an empty string (dialog box dismissed without selection)
# FIXME: This fixes overt errors, but user gets folder picker endlessly until they select a folder
continue
else:
containing_folder = os.path.abspath(res)
with Session() as session:
|
user.folder = os.path.join(containing_folder, 'OSF')
os.makedirs(user.folder, exist_ok=True)
session.add(user)
session.commit()
def start(sel
|
f):
logger.debug('Start in main called.')
self.hide()
user = LoginScreen().get_user()
if user is None:
return False
self.ensure_folder(user)
self.show()
logger.debug('starting background handler from main.start')
BackgroundHandler().set_intervention_cb(self.intervention_handler.enqueue_signal.emit)
BackgroundHandler().set_notification_cb(self.notification_handler.enqueue_signal.emit)
BackgroundHandler().start()
if user.first_boot:
self._context_menu.preferences.on_first_boot()
self._context_menu.open_settings()
return True
def on_intervention(self, intervention):
message = QResizableMessageBox()
message.setWindowTitle('OSF Sync')
message.setIcon(QMessageBox.Question)
message.setText(intervention.title)
message.setInformativeText(intervention.description)
for option in intervention.options:
option_language = str(option).split('.')[1]
message.addButton(" ".join(option_language.split('_')), QMessageBox.YesRole)
idx = message.exec()
intervention.set_result(intervention.options[idx])
self.intervention_handler.done()
def on_notification(self, notification):
"""
Display user-facing event notifications.
:param notification: An individual notification event
:return:
"""
if not self.supportsMessages():
return
# Wait for more notifications, then grab all events and display
t = threading.Timer(settings.ALERT_DURATION, self._consolidate_notifications, args=[notification])
t.start()
# def resume(self):
# logger.debug('resuming')
# if self.background_handler.is_alive():
# raise RuntimeError('Resume called without first calling pause')
# self.background_handler = BackgroundHandler()
# self.background_handler.start()
# def pause(self):
# logger.debug('pausing')
# if self.background_handler and self.background_handler.is_alive():
# self.background_handler.stop()
def _consolidate_notifications(self, first_notification):
"""
Consolidates notifications and groups them together. Releases a burst of all notifications that occur in
a given window of time after the first message is received.
Error messages are always displayed individually.
:param first_notification: The first notification that triggered the consolidation cycle
:return:
"""
# Grab all available events, including the one that kicked off this consolidation cycle
available_notifications = [first_notification]
while True:
try:
event = self.notification_handler.queue.get_nowait()
except Empty:
break
else:
available_notifications.append(event)
# Display notifications
if len(available_notifications) == 1:
# If there's only one message, show it regardless of level
self._show_notifications(available_notifications)
else:
consolidated = group_events(available_notifications)
for level, notification_list in consolidated.items():
# Group info notifications, but display errors and warnings individually
if level > Level.INFO:
self._show_notifications(notification_list)
else:
self.showMessage(
'Updated multiple',
'Updated {} files and folders'.format(len(notification_list)),
QSystemTrayIcon.NoIcon,
msecs=settings.ALERT_DURATION / 1000.
)
self.notification_handler.done()
def _show_notifications(self, notifications_list):
"""Show a message bubble for each notification in the list provided"""
for n in notifications_list:
self.showMessage(
'Synchronizing...',
n.msg,
QSystemTrayIcon.NoIcon,
msecs=settings.ALERT_DURATION / 1000.
)
def quit(self):
BackgroundHandler().stop()
with Session() as session:
try:
user = session.query(User).one()
except NoResul
|
victorshch/axiomatic
|
integration_test_frequecy_axioms.py
|
Python
|
gpl-3.0
| 1,653
| 0.005445
|
# coding=UTF-8
import pandas as pd
import numpy as np
import pickle
from axiomatic.base import AxiomSystem, MinMaxAxiom, MaxAxiom, MinAxiom, ChangeAxiom, IntegralAxiom
from axiomatic.base import RelativeChangeAxiom, FirstDiffAxiom, SecondDiffAxiom, TrainingPipeline
from axiomatic.axiom_training_stage import FrequencyECTrainingStage, FrequencyAxiomTrainingStage
from axiomatic.recognizer_training_stage import DummyRecognizerTrainingStage
from axiomatic.objective_function import ObjectiveFunction
from axiomatic.abnormal_behavior_recognizer import AbnormalBehaviorRecognizer
with open('datasets/debug_dataset.pickle', 'rb') as f:
dataset = pickle.load(f)
axiom_list = [MinMaxAxiom, MaxAxiom, MinAxiom, ChangeAxiom, IntegralAxiom, RelativeChangeAxiom, FirstDiffAxiom, SecondDiffAxiom]
frequency_ec_stage = FrequencyECTrainingStage({'num_part': 5, 'left_window': 2, 'right_window': 2, 'num_axioms': 10, 'axiom_list': axiom_list, 'enable_cache': True})
frequency_axiom_stage = FrequencyAxiomTrainingStage({'num_axioms': 10, 'max_depth': 5, 'num_step_axioms': 10})
dummy_recognizer_stage = DummyRe
|
cognizerTrainingStage()
training_pipeline = TrainingPipeline([frequency_ec_stage, frequency_axiom_stage, dum
|
my_recognizer_stage])
artifacts = training_pipeline.train(dataset, dict())
print("Artifacts after training: ", artifacts)
recognizer = AbnormalBehaviorRecognizer(artifacts['axiom_system'], artifacts['abn_models'],
dict(bound=0.1,maxdelta=0.5))
obj_fn = ObjectiveFunction(1, 20)
obj_fn_value = obj_fn.calculate(recognizer, dataset['test'])
print("Recognizer objective function: ", obj_fn_value)
|
aequitas/home-assistant
|
homeassistant/components/remote_rpi_gpio/binary_sensor.py
|
Python
|
apache-2.0
| 3,259
| 0
|
"""Support for binary sensor using RPi GPIO."""
import logging
import voluptuous as vol
import requests
from homeassistant.const import CONF_HOST
from homeassistant.components.binary_sensor import (
BinarySensorDevice, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
from . import (CONF_BOUNCETIME, CONF_PULL_MODE, CONF_INVERT_LOGIC,
DEFAULT_BOUNCETIME, DEFAULT_INVERT_LOGIC, DEFAULT_PULL_MODE)
from .. import remote_rpi_gpio
_LOGGER = logging.getLogger(__name__)
CONF_PORTS = 'ports'
_SENSORS_SCHEMA = vol.Schema({
cv.positive_int: cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORTS): _SENSORS_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC,
default=DEFAULT_INVERT_LOGIC): cv.boolean,
vol.Optional(CONF_BOUNCETIME,
default=DEFAULT_BOUNCETIME): cv.positive_int,
vol.Optional(CONF_PULL_MODE,
default=DEFAULT_PULL_MODE): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Raspberry PI GPIO devices."""
address = config['host']
invert_logic = config[CONF_INVERT_LOGIC]
pull_mode = config[CONF_PULL_MODE]
ports = config['ports']
bouncetime = config[CONF_BOUNCETIME]/1000
devices = []
for port_num, port_name in ports.items():
try:
button = remote_rpi_gpio.setup_input(address,
port_num,
pull_mode,
bouncetime)
except (ValueError, IndexError, KeyError, IOError):
return
new_sensor = Rem
|
oteRPiGPIOBinarySensor(port_name, button, invert_logic)
devices.append(new_sensor)
add_entities(devices, True)
class RemoteRPiGPIOBinarySensor(BinarySensorDevice):
"""Represent a binary sensor that uses a Remote Raspberry Pi GPIO."""
def __init__(self,
|
name, button, invert_logic):
"""Initialize the RPi binary sensor."""
self._name = name
self._invert_logic = invert_logic
self._state = False
self._button = button
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
def read_gpio():
"""Read state from GPIO."""
self._state = remote_rpi_gpio.read_input(self._button)
self.schedule_update_ha_state()
self._button.when_released = read_gpio
self._button.when_pressed = read_gpio
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the entity."""
return self._state != self._invert_logic
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return
def update(self):
"""Update the GPIO state."""
try:
self._state = remote_rpi_gpio.read_input(self._button)
except requests.exceptions.ConnectionError:
return
|
RevansChen/online-judge
|
Codewars/8kyu/freudian-translator/Python/solution1.py
|
Python
|
mit
| 91
| 0.010989
|
# Python - 2.7.6
to_freud = lambda sentence: ' '.join(['sex'] * le
|
n(s
|
entence.split(' ')))
|
newvem/pytz
|
pytz/zoneinfo/Asia/Ashkhabad.py
|
Python
|
mit
| 1,535
| 0.160261
|
'''tzinfo timezone information for Asia/Ashkhabad.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Ashkhabad(DstTzInfo):
'''Asia/Ashkhabad timezone definition. See datetime.tzinfo for details'''
zone = 'Asia/Ashkhabad'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1924,5,1,20,6,28),
d(1930,6,20,20,0,0),
d(1981,3,31,19,0
|
,0),
d(1981,9,30,18,0,0),
d(1982,3,31,19,0,0),
d(1982,9,30,18,0,0),
d(1983,3,31,19,0,0),
d(1983,9,30,18,0,0),
d(1984,3,31,19,0,0),
d(1984,9,29,21,0,0),
d(1985,3,30,21,
|
0,0),
d(1985,9,28,21,0,0),
d(1986,3,29,21,0,0),
d(1986,9,27,21,0,0),
d(1987,3,28,21,0,0),
d(1987,9,26,21,0,0),
d(1988,3,26,21,0,0),
d(1988,9,24,21,0,0),
d(1989,3,25,21,0,0),
d(1989,9,23,21,0,0),
d(1990,3,24,21,0,0),
d(1990,9,29,21,0,0),
d(1991,3,30,21,0,0),
d(1991,9,28,22,0,0),
d(1991,10,26,20,0,0),
d(1992,1,18,22,0,0),
]
_transition_info = [
i(14040,0,'LMT'),
i(14400,0,'ASHT'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(18000,0,'ASHST'),
i(14400,0,'ASHT'),
i(14400,0,'TMT'),
i(18000,0,'TMT'),
]
Ashkhabad = Ashkhabad()
|
danigm/sweetter
|
sweetter/contrib/karma/urls.py
|
Python
|
agpl-3.0
| 136
| 0.007353
|
from django.conf.urls.defau
|
lts import *
urlpatterns = patterns('contrib.karma.views',
(r'^$', 'index'),
|
(r'index', 'index'),
)
|
TejasM/wisely
|
wisely_project/get_courses_file.py
|
Python
|
mit
| 1,721
| 0.004648
|
import sys
import os
import traceback
from django import db
sys.path.append('/root/wisely/wisely_project/')
os.environ['DJANGO_SETTINGS_MODULE'] = 'wisely_project.settings.production'
from django.db.models import F, Q
from django.utils import timezone
from users.tasks import get_coursera_courses, get_edx_courses, get_udemy_courses
__author__ = 'tmehta'
from users.models impo
|
rt CourseraProfile, EdxProfile, UdemyProfile
while True:
try:
for connection in db.connections.all():
if len(connection.queries) > 100:
db.reset_queries()
for user in CourseraProfile.objects.filter(last_updated__lt=F('user__last_lo
|
gin')).filter(~Q(username='')).filter(
incorrect_login=False):
print user.username
print "Start coursera"
get_coursera_courses(user)
user.last_updated = timezone.now()
print "Done Coursera"
user.save()
for user in EdxProfile.objects.filter(last_updated__lt=F('user__last_login')).filter(~Q(email='')).filter(
incorrect_login=False):
print user.email
print "Start edx"
get_edx_courses(user)
print "Done EDx"
user.last_updated = timezone.now()
user.save()
for user in UdemyProfile.objects.filter(last_updated__lt=F('user__last_login')).filter(~Q(email='')).filter(
incorrect_login=False):
print user.email
print "Start udemy"
get_udemy_courses(user)
print "Done Udemy"
user.last_updated = timezone.now()
user.save()
except Exception as e:
print traceback.format_exc()
|
cycloidio/cyclosible
|
cyclosible/playbook/migrations/0004_playbookrunhistory_log_url.py
|
Python
|
gpl-3.0
| 441
| 0
|
# -*- coding: utf-8 -*-
fro
|
m __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('playbook', '0003_auto_20151028_1735'),
]
operations = [
migrations.AddField(
model_name='playbookrunhistory',
name='log_url',
field=models.CharField
|
(default=b'', max_length=1024, blank=True),
),
]
|
tryggvib/datapackage
|
datapackage/schema.py
|
Python
|
gpl-3.0
| 11,045
| 0.000181
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .util import Specification
from . import compat
class Field(Specification):
"""
Field object for adding fields to a resource schema.
Currently this is built around the Tabular Data Package.
"""
SPECIFICATION = {'name': compat.str,
'title': compat.str,
'type': compat.str,
'format': compat.str,
'constraints': dict}
REQUIRED = ('name',)
class Constraints(Specification):
"""
Constraints object which can be added to a field in a resource schema
in order to represent the constraints put on that particular field.
"""
SPECIFICATION = {'required': bool,
'minLength': int,
'maxLength': int,
'unique': bool,
'pattern': compat.str,
'minimum': None,
'maximum': None}
class Reference(Specification):
"""
Reference object which can be added to a ForeignKey object to represent
the reference to the other datapackage.
"""
SPECIFICATION = {'datapackage': compat.str,
'resource': compat.str,
'fields': (compat.str, list)}
REQUIRED = ('fields',)
def __setattr__(self, attribute, value):
if attribute == 'fields':
# We need to make sure all fields are represented with by their
# names if it is a list
if type(value) == list:
modified_value = []
for single_value in value:
if type(single_value) == compat.str:
modified_value.append(single_value)
elif isinstance(single_value, Field):
modified_value.append(single_value.name)
else:
raise TypeError(
'Field type ({0}) is not supported'.format(
type(single_value)))
value = modified_value
elif type(value) == compat.str:
# We don't need to do anything with a str
pass
elif isinstance(value, Field):
# Set the name from the field as the value
value = value.name
else:
raise TypeError("Type of field ({0}) is not supported".format(
type(value)))
super(Reference, self).__setattr__(attribute, value)
class ForeignKey(Specification):
"""
ForeignKey object which can be added to a resource schema object to
represent a foreign key in another data package.
"""
SPECIFICATION = {'fields': (compat.str, list),
'reference': Reference}
REQUIRED = ('fields', 'reference')
def __setattr__(self, attribute, value):
# If the attribute is 'reference' we need to check if there is a
# fields attribute and do some checks to see if they are inconsistent
# because they shouldn't be
if attribute == 'reference' and 'fields' in self:
fields = self['fields']
if type(fields) != type(value.fields):
raise TypeError(
'Reference fields must have the same type as fields')
if type(value.fields) == list:
if len(value.fields) != len(fields):
raise ValueError(
'Reference fields and fields are inconsistent')
if attribute == 'fields':
value_type = type(value)
# We only want to show the names of the fields so we add we need
# to g
|
o through a list and get out the names and use them as the
# value
if value_type == list:
modified_value = []
for single_value in value:
if type(single_value) == compat.str:
modified_value.append(single_value)
elif isinstance(single_value, Field):
modified_value.append(single_value.name)
|
else:
raise TypeError(
'Foreign key type ({0}) is not supported'.format(
type(single_value)))
value = modified_value
elif value_type == compat.str:
# We don't need to do anything if the value is a str
pass
elif isinstance(value, Field):
value = value.name
else:
raise TypeError("Type of field ({0}) is not supported".format(
value_type))
# Same check as before about inconsistencies but just the other
# way around
if 'reference' in self:
reference_fields = self['reference'].fields
if type(reference_fields) != value_type:
raise TypeError(
'Fields must have the same type as Reference fields')
if type(reference_fields) == list:
if len(reference_fields) != len(value):
raise ValueError(
'Reference fields and fields are inconsistent')
super(ForeignKey, self).__setattr__(attribute, value)
class Schema(Specification):
"""
Schema object which holds the representation of the schema for a
Tabular Data Package (using the JSON Table Schema protocol). The
schema can be used just like a dictionary which means it is ready
for json serialization and export as part of a data package
descriptor (when added to a resource).
"""
SPECIFICATION = {'fields': list,
'primaryKey': (compat.str, list),
'foreignKeys': list}
def __init__(self, *args, **kwargs):
# We need to initialize an empty fields array (this is a required
# field but we don't require it, we create it)
self['fields'] = []
# We add the fields using the internal method so we can do
# validation of each field
self.add_fields(kwargs.pop('fields', []))
super(Schema, self).__init__(self, *args, **kwargs)
def __setattr__(self, attribute, value):
if attribute == 'primaryKey' and value is not None:
# Primary Keys must be a reference to existing fields so we
# need to check if the primary key is in the fields array
field_names = [f.name for f in self.get('fields', [])]
if type(value) == list:
modified_value = []
for single_value in value:
if type(single_value) == compat.str:
if single_value in field_names:
modified_value.append(single_value)
else:
raise AttributeError(
"Unknown '{0}' cannot be primaryKey".format(
single_value))
elif isinstance(single_value, Field):
if single_value.name in field_names:
modified_value.append(single_value.name)
else:
raise AttributeError(
"Unknown '{0}' cannot be primaryKey".format(
single_value.name))
else:
raise TypeError(
'primaryKey type ({0}) is not supported'.format(
type(single_value)))
value = modified_value
elif type(value) == compat.str:
if value not in field_names:
raise AttributeError(
"Unknown '{0}' cannot be primaryKey".format(
value))
elif isinstance(value, Field):
|
zstang/learning-python-the-hard-way
|
ex5.py
|
Python
|
mit
| 635
| 0.009449
|
# -*- coding: utf-8 -*-
#
# exercise 5: more vari
|
ables and printing
#
# string formating
name = 'Zed A. Shaw'
ages = 35 # not a lie
height = 74 # inched
weight = 180 # lbs
eyes = 'Blue'
teeth = 'White'
hair = 'Brown'
print "Let's talk about %s." % name
print "He's %d inched tall." % height
print "He's %d pounds heavy." % weight
print
|
"Actually that's not too heavy."
print "He's got %s eyes and %s hair." % (eyes, hair)
print "His teeth are usually %s depending on the coffee." % teeth
# this line is tricky, try to get it exactly right
print "If I add %d, %d, and %d I get %d." %(
ages, height, weight, ages + height + weight)
|
sileht/python-gnocchiclient
|
gnocchiclient/tests/functional/test_benchmark.py
|
Python
|
apache-2.0
| 4,537
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from gnocchiclient.tests.functional import base
class BenchmarkMetricTest(base.ClientTestBase):
def test_benchmark_metric_create_wrong_workers(self):
result = self.gnocchi(
u'benchmark', params=u"metric create -n 0",
fail_ok=True, merge_stderr=True)
self.assertIn("0 must be greater than 0", result)
def test_benchmark_metric_create(self):
apname = str(uuid.uuid4())
# PREPARE AN ARCHIVE POLICY
self.gnocchi("archive-policy", params="create %s "
"--back-window 0 -d granularity:1s,points:86400" % apname)
result = self.gnocchi(
u'benchmark', params=u"metric create -n 10 -a %s" % apname)
result = json.loads(result)
self.assertEqual(10, int(result['create executed']))
self.assertLessEqual(int(result['create failures']), 10)
self.assertLessEqual(int(result['delete executed']),
int(result['create executed']))
result = self.gnocchi(
u'benchmark', params=u"metric create -k -n 10 -a %s" % apname)
result = json.loads(result)
self.assertEqual(10, int(result['create executed']))
self.assertLessEqual(int(result['create failures']), 10)
self.assertNotIn('delete executed', result)
def test_benchmark_metric_get(self):
apname = str(uuid.uuid4())
# PREPARE AN ARCHIVE POLICY
self.gnocchi("archive-policy", params="create %s "
"--back-window 0 -d granularity:1s,points:86400" % apname)
result = self.gnocchi(
u'metric', params=u"create -a %s" % apname)
metric = json.loads(result)
result = self.gnocchi(
u'benchmark', params=u"metric show -n 10 %s" % metric['id'])
result = json.loads(result)
self.assertEqual(10, int(result['show executed']))
self.assertLessEqual(int(result['show failures']), 10)
def test_benchmark_measures_add(self):
apname = str(uuid.uuid4())
# PREPARE AN ARCHIVE POLICY
self.gnocchi("archive-policy", params="create %s "
"--back-window 0 -d granularity:1s,points:86400" % apname)
|
result =
|
self.gnocchi(
u'metric', params=u"create -a %s" % apname)
metric = json.loads(result)
result = self.gnocchi(
u'benchmark', params=u"measures add -n 10 -b 4 %s" % metric['id'])
result = json.loads(result)
self.assertEqual(2, int(result['push executed']))
self.assertLessEqual(int(result['push failures']), 2)
result = self.gnocchi(
u'benchmark',
params=u"measures add -s 2010-01-01 -n 10 -b 4 %s"
% metric['id'])
result = json.loads(result)
self.assertEqual(2, int(result['push executed']))
self.assertLessEqual(int(result['push failures']), 2)
result = self.gnocchi(
u'benchmark',
params=u"measures add --wait -s 2010-01-01 -n 10 -b 4 %s"
% metric['id'])
result = json.loads(result)
self.assertEqual(2, int(result['push executed']))
self.assertLessEqual(int(result['push failures']), 2)
self.assertIn("extra wait to process measures", result)
def test_benchmark_measures_show(self):
apname = str(uuid.uuid4())
# PREPARE AN ARCHIVE POLICY
self.gnocchi("archive-policy", params="create %s "
"--back-window 0 -d granularity:1s,points:86400" % apname)
result = self.gnocchi(
u'metric', params=u"create -a %s" % apname)
metric = json.loads(result)
result = self.gnocchi(
u'benchmark',
params=u"measures show -n 2 %s"
% metric['id'])
result = json.loads(result)
self.assertEqual(2, int(result['show executed']))
self.assertLessEqual(int(result['show failures']), 2)
|
quentinbodinier/custom_gnuradio_blocks
|
python/qa_test_interp.py
|
Python
|
gpl-3.0
| 36,971
| 0.003489
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# GNU GENERAL PUBLIC LICENSE
# Version 3, 29 June 2007
#
# Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
# Everyone is permitted to copy and distribute verbatim copies
# of this license document, but changing it is not allowed.
#
# Preamble
#
# The GNU General Public License is a free, copyleft license for
# software and other kinds of works.
#
# The licenses for most software and other practical works are designed
# to take away your freedom to share and change the works. By contrast,
# the GNU General Public License is intended to guarantee your freedom to
# share and change all versions of a program--to make sure it remains free
# software for all its users. We, the Free Software Foundation, use the
# GNU General Public License for most of our software; it applies also to
# any other work released this way by its authors. You can apply it to
# your programs, too.
#
# When we speak of free software, we are referring to freedom, not
# price. Our General Public Licenses are designed to make sure that you
# have the freedom to distribute copies of free software (and charge for
# them if you wish), that you receive source code or can get it if you
# want it, that you can change the software or use pieces of it in new
# free programs, and that you know you can do these things.
#
# To protect your rights, we need to prevent others from denying you
# these rights or asking you to surrender the rights. Therefore, you have
# certain responsibilities if you distribute copies of the software, or if
# you modify it: responsibilities to respect the freedom of others.
#
# For example, if you distribute copies of such a program, whether
# gratis or for a fee, you must pass on to the recipients the same
# freedoms that you received. You must make sure that they, too, receive
# or can get the source code. And you must show them these terms so they
# know their rights.
#
# Developers that use the GNU GPL protect your rights with two steps:
# (1) assert copyright on the software, and (2) offer you this License
# giving you legal permission to copy, distribute and/or modify it.
#
# For the developers' and authors' protection, the GPL clearly explains
# that there is no warranty for this free software. For both users' and
# authors' sake, the GPL requires that modified versions be marked as
# changed, so that their problems will not be attributed erroneously to
# authors of previous versions.
#
# Some devices are designed to deny users access to install or run
# modified versions of the software inside them, although the manufacturer
# can do so. This is fundamentally incompatible with the aim of
# protecting users' freedom to change the software. The systematic
# pattern of such abuse occurs in the area of products for individuals to
# use, which is precisely where it is most unacceptable. Therefore, we
# have designed this version of the GPL to prohibit the practice for those
# products. If such problems arise substantially in other domains, we
# stand ready to extend this p
|
rovision to those domains in future versions
# of the GPL, as needed to protect the freedom of users.
#
# Finally, every program is threatened constantly by software patents.
# States should not allow patents to restrict development and use of
# software on general-purpose compu
|
ters, but in those that do, we wish to
# avoid the special danger that patents applied to a free program could
# make it effectively proprietary. To prevent this, the GPL assures that
# patents cannot be used to render the program non-free.
#
# The precise terms and conditions for copying, distribution and
# modification follow.
#
# TERMS AND CONDITIONS
#
# 0. Definitions.
#
# "This License" refers to version 3 of the GNU General Public License.
#
# "Copyright" also means copyright-like laws that apply to other kinds of
# works, such as semiconductor masks.
#
# "The Program" refers to any copyrightable work licensed under this
# License. Each licensee is addressed as "you". "Licensees" and
# "recipients" may be individuals or organizations.
#
# To "modify" a work means to copy from or adapt all or part of the work
# in a fashion requiring copyright permission, other than the making of an
# exact copy. The resulting work is called a "modified version" of the
# earlier work or a work "based on" the earlier work.
#
# A "covered work" means either the unmodified Program or a work based
# on the Program.
#
# To "propagate" a work means to do anything with it that, without
# permission, would make you directly or secondarily liable for
# infringement under applicable copyright law, except executing it on a
# computer or modifying a private copy. Propagation includes copying,
# distribution (with or without modification), making available to the
# public, and in some countries other activities as well.
#
# To "convey" a work means any kind of propagation that enables other
# parties to make or receive copies. Mere interaction with a user through
# a computer network, with no transfer of a copy, is not conveying.
#
# An interactive user interface displays "Appropriate Legal Notices"
# to the extent that it includes a convenient and prominently visible
# feature that (1) displays an appropriate copyright notice, and (2)
# tells the user that there is no warranty for the work (except to the
# extent that warranties are provided), that licensees may convey the
# work under this License, and how to view a copy of this License. If
# the interface presents a list of user commands or options, such as a
# menu, a prominent item in the list meets this criterion.
#
# 1. Source Code.
#
# The "source code" for a work means the preferred form of the work
# for making modifications to it. "Object code" means any non-source
# form of a work.
#
# A "Standard Interface" means an interface that either is an official
# standard defined by a recognized standards body, or, in the case of
# interfaces specified for a particular programming language, one that
# is widely used among developers working in that language.
#
# The "System Libraries" of an executable work include anything, other
# than the work as a whole, that (a) is included in the normal form of
# packaging a Major Component, but which is not part of that Major
# Component, and (b) serves only to enable use of the work with that
# Major Component, or to implement a Standard Interface for which an
# implementation is available to the public in source code form. A
# "Major Component", in this context, means a major essential component
# (kernel, window system, and so on) of the specific operating system
# (if any) on which the executable work runs, or a compiler used to
# produce the work, or an object code interpreter used to run it.
#
# The "Corresponding Source" for a work in object code form means all
# the source code needed to generate, install, and (for an executable
# work) run the object code and to modify the work, including scripts to
# control those activities. However, it does not include the work's
# System Libraries, or general-purpose tools or generally available free
# programs which are used unmodified in performing those activities but
# which are not part of the work. For example, Corresponding Source
# includes interface definition files associated with source files for
# the work, and the source code for shared libraries and dynamically
# linked subprograms that the work is specifically designed to require,
# such as by intimate data communication or control flow between those
# subprograms and other parts of the work.
#
# The Corresponding Source need not include anything that users
# can regenerate automatically from other parts of the Corresponding
# Source.
#
# The Corresponding Source for a work in source code form is that
# same work.
#
# 2. Basic Permissions.
#
# All rights granted under this License are granted for the term of
# copyright on the Program, and are irrev
|
midnightradio/gensim
|
gensim/sklearn_api/hdp.py
|
Python
|
gpl-3.0
| 8,719
| 0.004358
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Scikit learn interface for :class:`~gensim.models.hdpmodel.HdpModel`.
Follows scikit-learn API conventions to facilitate using gensim along with scikit-learn.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_dictionary, common_corpus
>>> from gensim.sklearn_api import HdpTransformer
>>>
>>> # Lets extract the distribution of each document in topics
>>> model = HdpTransformer(id2word=common_dictionary)
>>> distr = model.fit_transform(common_corpus)
"""
import numpy as np
from scipy import sparse
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim import matutils
class HdpTransformer(TransformerMixin, BaseEstimator):
"""Base HDP module, wraps :class:`~gensim.models.hdpmodel.HdpModel`.
The inner workings of this class heavily depends on `Wang, Paisley, Blei: "Online Variational
Inference for the Hierarchical Dirichlet Process, JMLR (2011)"
<http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
"""
def __init__(self, id2word, max_chunks=None, max_time=None, chunksize=256, kappa=1.0, tau=64.0, K=15, T=150,
alpha=1, gamma=1, eta=0.01, scale=1.0, var_converge=0.0001, outputdir=None, random_state=None):
"""
Parameters
----------
id2word : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Mapping between a words ID and the word itself in the vocabulary.
max_chunks : int, optional
Upper bound on how many chunks to process.It wraps around corpus beginning in another corpus pass,
if there are not enough chunks in the corpus.
max_time : int, optional
Upper bound on time in seconds for which model will be trained.
chunksize : int, optional
Number of documents to be processed by the model in each mini-batch.
kappa : float, optional
Learning rate, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical Dirichlet
Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
tau : float, optional
Slow down parameter, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
K : int, optional
Second level truncation level, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
T : int, optional
Top level truncation level, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
alpha : int, optional
Second level concentration, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
gamma : int, optional
First level concentration, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
eta : float, optional
The topic Dirichlet, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
scale : float, optional
Weights information from the mini-chunk of corpus to calculate rhot.
var_converge : float, optional
Lower bound on the right side of convergence. Used when updating variational parameters
for a single document.
outputdir : str, optional
|
Path to a directory where topic and options information will be stored.
random_st
|
ate : int, optional
Seed used to create a :class:`~np.random.RandomState`. Useful for obtaining reproducible results.
"""
self.gensim_model = None
self.id2word = id2word
self.max_chunks = max_chunks
self.max_time = max_time
self.chunksize = chunksize
self.kappa = kappa
self.tau = tau
self.K = K
self.T = T
self.alpha = alpha
self.gamma = gamma
self.eta = eta
self.scale = scale
self.var_converge = var_converge
self.outputdir = outputdir
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {iterable of list of (int, number), scipy.sparse matrix}
A collection of documents in BOW format used for training the model.
Returns
-------
:class:`~gensim.sklearn_api.hdp.HdpTransformer`
The trained model.
"""
if sparse.issparse(X):
corpus = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
else:
corpus = X
self.gensim_model = models.HdpModel(
corpus=corpus, id2word=self.id2word, max_chunks=self.max_chunks,
max_time=self.max_time, chunksize=self.chunksize, kappa=self.kappa, tau=self.tau,
K=self.K, T=self.T, alpha=self.alpha, gamma=self.gamma, eta=self.eta, scale=self.scale,
var_converge=self.var_converge, outputdir=self.outputdir, random_state=self.random_state
)
return self
def transform(self, docs):
"""Infer a matrix of topic distribution for the given document bow, where a_ij
indicates (topic_i, topic_probability_j).
Parameters
----------
docs : {iterable of list of (int, number), list of (int, number)}
Document or sequence of documents in BOW format.
Returns
-------
numpy.ndarray of shape [`len(docs), num_topics`]
Topic distribution for `docs`.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(docs[0], tuple):
docs = [docs]
distribution, max_num_topics = [], 0
for doc in docs:
topicd = self.gensim_model[doc]
distribution.append(topicd)
max_num_topics = max(max_num_topics, max(topic[0] for topic in topicd) + 1)
# returning dense representation for compatibility with sklearn
# but we should go back to sparse representation in the future
distribution = [matutils.sparse2full(t, max_num_topics) for t in distribution]
return np.reshape(np.array(distribution), (len(docs), max_num_topics))
def partial_fit(self, X):
"""Train model over a potentially incomplete set of documents.
Uses the parameters set in the constructor.
This method can be used in two ways:
* On an unfitted model in which case the model is initialized and trained on `X`.
* On an already fitted model in which case the model is **updated** by `X`.
Parameters
----------
X : {iterable of list of (int, number), scipy.sparse matrix}
A collection of documents in BOW format used for training the model.
Returns
-------
:class:`~gensim.sklearn_api.hdp.HdpTransformer`
The trained model.
"""
if sparse.issparse(X):
X = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
|
betrisey/home-assistant
|
homeassistant/components/media_player/yamaha.py
|
Python
|
mit
| 7,019
| 0
|
"""
Support for Yamaha Receivers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.yamaha/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_SELECT_SOURCE, SUPPORT_PLAY_MEDIA,
MEDIA_TYPE_MUSIC,
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.const import (CONF_NAME, CONF_HOST, STATE_OFF, STATE_ON)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['rxv==0.2.0']
_LOGGER = logging.getLogger(__name__)
SUPPORT_YAMAHA = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE | \
SUPPORT_PLAY_MEDIA
CONF_SOURCE_NAMES = 'source_names'
CONF_SOURCE_IGNORE = 'source_ignore'
CONF_ZONE_IGNORE = 'zone_ignore'
DEFAULT_NAME = 'Yamaha Receiver'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_SOURCE_IGNORE, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_ZONE_IGNORE, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_SOURCE_NAMES, default={}): {cv.string: cv.string},
})
# pylint: disable=too-many-locals
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Yamaha platform."""
import rxv
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
source_ignore = config.get(CONF_SOURCE_IGNORE)
source_names = config.get(CONF_SOURCE_NAMES)
zone_ignore = config.get(CONF_ZONE_IGNORE)
if discovery_info is not None:
name = discovery_info[0]
model = discovery_info[1]
ctrl_url = discovery_info[2]
desc_url = discovery_info[3]
receivers = rxv.RXV(
ctrl_url,
model_name=model,
friendly_name=name,
unit_desc_url=desc_url).zone_controllers()
_LOGGER.info("Receivers: %s", receivers)
elif host is None:
receivers = []
for recv in rxv.find():
receivers.extend(recv.zone_controllers())
else:
ctrl_url = "http://{}:80/YamahaRemoteControl/ctrl".format(host)
receivers = rxv.RXV(ctrl_url, name).zone_controllers()
for receiver in receivers:
if receiver.zone not in zone_ignore:
add_devices([
YamahaDevice(name, receiver, source_ignore, source_names)])
class YamahaDevice(MediaPlayerDevice):
"""Representation of a Yamaha device."""
# pylint: disable=too-many-public-methods, abstract-method
# pylint: disable=too-many-instance-attributes
def __init__(self, name, receiver, source_ignore, source_names):
"""Initialize the Yamaha Receiver."""
self._receiver = receiver
self._muted = False
self._volume = 0
self._pwstate = STATE_OFF
self._current_source = None
self._source_list = None
self._source_ignore = source_ignore or []
self._source_names = source_names or {}
self._reverse_mapping = None
self.update()
self._name = name
self._zone = receiver.zone
def update(self):
"""Get the latest details from the device."""
if self._receiver.on:
self._pwstate = STATE_ON
else:
self._pwstate = STATE_OFF
self._muted = self._receiver.mute
self._volume = (self._receiver.volume / 100) + 1
if self.source_list is None:
self.build_source_list()
current_source = self._receiver.input
self._current_source = self._source_names.get(
current_source, current_source)
def build_source_list(self):
"""Build the source list."""
self._reverse_mapping = {alias: source for source, alias in
self._source_names.items()}
self._source_list = sorted(
self._source_names.get(source, source) for source in
self._receiver.inputs()
if source not in self._source_ignore)
@property
def name(self):
"""Return the name of the device."""
name = self._name
if self._zone != "Main_Zone":
# Zone will be one of Main_Zone, Zone_2, Zone_3
name += " " + self._zone.replace('_', ' ')
return name
@property
def state(self):
"""Return the state of the device."""
return self._pwstate
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def source(self):
"""Return the current input source."""
return self._current_source
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
return SUPPORT_YAMAHA
def turn_off(self):
"""Turn off media player."""
self._receiver.on = Fa
|
lse
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
receiver_vol = 100 - (volume * 100)
negative_receiver_vol = -receiver_vol
self._receiver.volume = negative_receiver_vol
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self._receiver.mu
|
te = mute
def turn_on(self):
"""Turn the media player on."""
self._receiver.on = True
self._volume = (self._receiver.volume / 100) + 1
def select_source(self, source):
"""Select input source."""
self._receiver.input = self._reverse_mapping.get(source, source)
def play_media(self, media_type, media_id, **kwargs):
"""Play media from an ID.
This exposes a pass through for various input sources in the
Yamaha to direct play certain kinds of media. media_type is
treated as the input type that we are setting, and media id is
specific to it.
"""
if media_type == "NET RADIO":
self._receiver.net_radio(media_id)
@property
def media_content_type(self):
"""Return the media content type."""
if self.source == "NET RADIO":
return MEDIA_TYPE_MUSIC
@property
def media_title(self):
"""Return the media title.
This will vary by input source, as they provide different
information in metadata.
"""
if self.source == "NET RADIO":
info = self._receiver.play_status()
if info.song:
return "%s: %s" % (info.station, info.song)
else:
return info.station
|
nishimotz/NVDARemote
|
addon/globalPlugins/remoteClient/input.py
|
Python
|
gpl-2.0
| 3,588
| 0.032609
|
import ctypes.wintypes as ctypes
import braille
import brailleInput
import globalPluginHandler
import scriptHandler
import inputCore
import api
INPUT_MOUSE = 0
INPUT_KEYBOARD = 1
INPUT_HARDWARE = 2
MAPVK_VK_TO_VSC = 0
KEYEVENTF_EXTENDEDKEY = 0x0001
KEYEVENTF_KEYUP = 0x0002
KEYEVENT_SCANCODE = 0x0008
KEYEVENTF_UNICODE = 0x0004
class MOUSEINPUT(ctypes.Structure):
_fields_ = (
('dx', ctypes.c_long),
('dy', ctypes.c_lo
|
ng),
('mouseData', ctypes.DWORD),
('dwFlags', ctypes.DWORD),
('time', ctypes.DWORD),
('dwExtraInfo', ctypes.POINTER(ctypes.c_ulong)),
)
class KEYBDINPUT(cty
|
pes.Structure):
_fields_ = (
('wVk', ctypes.WORD),
('wScan', ctypes.WORD),
('dwFlags', ctypes.DWORD),
('time', ctypes.DWORD),
('dwExtraInfo', ctypes.POINTER(ctypes.c_ulong)),
)
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (
('uMsg', ctypes.DWORD),
('wParamL', ctypes.WORD),
('wParamH', ctypes.WORD),
)
class INPUTUnion(ctypes.Union):
_fields_ = (
('mi', MOUSEINPUT),
('ki', KEYBDINPUT),
('hi', HARDWAREINPUT),
)
class INPUT(ctypes.Structure):
_fields_ = (
('type', ctypes.DWORD),
('union', INPUTUnion))
class BrailleInputGesture(braille.BrailleDisplayGesture, brailleInput.BrailleInputGesture):
def __init__(self, **kwargs):
super(BrailleInputGesture, self).__init__()
for key, value in kwargs.iteritems():
setattr(self, key, value)
self.source="remote{}{}".format(self.source[0].upper(),self.source[1:])
self.scriptPath=getattr(self,"scriptPath",None)
self.script=self.findScript() if self.scriptPath else None
def findScript(self):
if not (isinstance(self.scriptPath,list) and len(self.scriptPath)==3):
return None
module,cls,scriptName=self.scriptPath
focus = api.getFocusObject()
if not focus:
return None
if scriptName.startswith("kb:"):
# Emulate a key press.
return scriptHandler._makeKbEmulateScript(scriptName)
import globalCommands
# Global plugin level.
if cls=='GlobalPlugin':
for plugin in globalPluginHandler.runningPlugins:
if module==plugin.__module__:
func = getattr(plugin, "script_%s" % scriptName, None)
if func:
return func
# App module level.
app = focus.appModule
if app and cls=='AppModule' and module==app.__module__:
func = getattr(app, "script_%s" % scriptName, None)
if func:
return func
# Tree interceptor level.
treeInterceptor = focus.treeInterceptor
if treeInterceptor and treeInterceptor.isReady:
func = getattr(treeInterceptor , "script_%s" % scriptName, None)
# We are no keyboard input
return func
# NVDAObject level.
func = getattr(focus, "script_%s" % scriptName, None)
if func:
return func
for obj in reversed(api.getFocusAncestors()):
func = getattr(obj, "script_%s" % scriptName, None)
if func and getattr(func, 'canPropagate', False):
return func
# Global commands.
func = getattr(globalCommands.commands, "script_%s" % scriptName, None)
if func:
return func
return None
def send_key(vk=None, scan=None, extended=False, pressed=True):
i = INPUT()
i.union.ki.wVk = vk
if scan:
i.union.ki.wScan = scan
else: #No scancode provided, try to get one
i.union.ki.wScan = ctypes.windll.user32.MapVirtualKeyW(vk, MAPVK_VK_TO_VSC)
if not pressed:
i.union.ki.dwFlags |= KEYEVENTF_KEYUP
if extended:
i.union.ki.dwFlags |= KEYEVENTF_EXTENDEDKEY
i.type = INPUT_KEYBOARD
ctypes.windll.user32.SendInput(1, ctypes.byref(i), ctypes.sizeof(INPUT))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.