repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ibagrak/algae
|
gae/tests/test_i18n.py
|
1
|
3855
|
import webapp2
import webtest
import unittest2
import copy
import urllib
import logging
from google.appengine.ext import testbed
from google.appengine.ext import db
from webapp2_extras import json
from webapp2_extras.appengine.auth import models as users
import app
import settings
import handlers
import re
from core import model
class I18NTest(unittest2.TestCase):
# Language-Accept: header values for tests
hdr_english_accept = {'Accept-Language': 'en'}
hdr_other_accept = {'Accept-Language': 'da, fr'}
hdr_german_accept = {'Accept-Language': 'de'}
hdr_english_prefer = {'Accept-Language': 'en, de'}
hdr_german_prefer = {'Accept-Language': 'de, en'}
# text to check if english response
txt_in_english = r'was created by'
txt_in_german = r'ist ein Werk von'
def setUp(self):
# Create a WSGI application.
application = webapp2.WSGIApplication(app.routes, debug = True, config = settings.app_config)
application.error_handlers[404] = handlers.common.handle_404
application.error_handlers[500] = handlers.common.handle_500
# Wrap the app with WebTest's TestApp.
self.testapp = webtest.TestApp(application)
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
# test with 'only english'
def test_english(self):
response = self.testapp.get('/', headers=self.hdr_english_accept)
self.assertEqual(response.status_int, 200)
self.assertIn(self.txt_in_english, response.body)
self.assertNotIn(self.txt_in_german, response.body)
# test with 'only german'
def test_german(self):
response = self.testapp.get('/', headers=self.hdr_german_accept)
self.assertEqual(response.status_int, 200)
self.assertIn(self.txt_in_german, response.body)
self.assertNotIn(self.txt_in_english, response.body)
# test with 'english preferred'
def test_english_preferred(self):
response = self.testapp.get('/', headers=self.hdr_english_prefer)
self.assertEqual(response.status_int, 200)
self.assertIn(self.txt_in_english, response.body)
self.assertNotIn(self.txt_in_german, response.body)
# test with 'german preferred'
def test_german(self):
response = self.testapp.get('/', headers=self.hdr_german_prefer)
self.assertEqual(response.status_int, 200)
self.assertIn(self.txt_in_german, response.body)
self.assertNotIn(self.txt_in_english, response.body)
# test with 'other'
def test_other(self):
response = self.testapp.get('/', headers=self.hdr_other_accept)
self.assertEqual(response.status_int, 200)
self.assertIn(self.txt_in_english, response.body)
self.assertNotIn(self.txt_in_german, response.body)
# test with 'english', then request german
def test_german_explicit(self):
response = self.testapp.get('/', headers=self.hdr_english_accept)
response = self.testapp.get('/locale/de_DE', headers=self.hdr_english_accept)
self.assertEqual(response.status_int, 302)
response = self.testapp.get('/', headers=self.hdr_english_accept)
self.assertEqual(response.status_int, 200)
self.assertIn(self.txt_in_german, response.body)
self.assertNotIn(self.txt_in_english, response.body)
# test with 'german', then request english
def test_english_explicit(self):
response = self.testapp.get('/', headers=self.hdr_german_accept)
response = self.testapp.get('/locale/en_US', headers=self.hdr_german_accept)
self.assertEqual(response.status_int, 302)
response = self.testapp.get('/', headers=self.hdr_german_accept)
self.assertEqual(response.status_int, 200)
self.assertIn(self.txt_in_english, response.body)
self.assertNotIn(self.txt_in_german, response.body)
|
mit
| -3,105,536,637,220,046,300
| 35.367925
| 95
| 0.746304
| false
| 3.144372
| true
| false
| false
|
Mr-meet/PythonApplets
|
spiders_packege/xunlei_girl/PageProcessor.py
|
1
|
1671
|
from bs4 import BeautifulSoup
import time
from spiders_packege.unit.ResourcesDowmloader import ResourcesDownloader
from spiders_packege.unit.ResourcesProcessor import ResourcesProcessor
class PageProcessor:
def __init__(self):
pass
def dealAllImg(basePath,list,webDriver):
baseDetailUrl='http://meitu.xunlei.com/detail.html?id='
for oneGirlDict in list:
block_detailid=oneGirlDict['block_detailid']
girlDetailUrl=baseDetailUrl+block_detailid
imgCount=0
if '花絮' not in oneGirlDict['title']:
girlPhotoList=PageProcessor.dealGirlPage(girlDetailUrl,webDriver)
for girlPhoto in girlPhotoList:
imgCount=imgCount+1
path=basePath+'\\【'+oneGirlDict['name']+'】'+'身高:'+oneGirlDict['height']
name=str(imgCount)+'.jpg'
ResourcesProcessor.saveFile(girlPhoto,path,name)
def dealGirlPage(url,webDriver):
imgResoures=list()
webDriver.get(url)
time.sleep(1.5)
webDriver.page_source.encode('utf-8','ignore') #这个函数获取页面的html
#webDriver.get_screenshot_as_file("1.jpg") #获取页面截图
soup = BeautifulSoup(webDriver.page_source, "html5lib")
imgItems = soup.find_all('img', class_='portrait')
imgLen=len(imgItems)/2
baseImgUrl=imgItems[0]['src'][0:-5]
i=1
while i<int(imgLen):
imgUrl=baseImgUrl+str(i)+'.jpg'
imgObj=ResourcesDownloader.downloadResource(imgUrl)
imgResoures.append(imgObj)
i=i+1
return imgResoures
|
mit
| 5,768,753,674,798,561,000
| 36.860465
| 91
| 0.62815
| false
| 3.215415
| false
| false
| false
|
d1b/python-nmap-xml-output-parser
|
nmap_xml_to_sqlite.py
|
1
|
4251
|
#!/usr/bin/env python
from lxml import etree
import sqlite3
import os
import datetime
from shows_hosts_with_open_port_and_service_desc import parse_opts
__program__ = 'python_convert_nmap_xml_to_sqlite_db'
___author__ = 'dave b. <db@d1b.org>'
__license__ = 'GPL v2'
class nmap_xml_to_sqlite:
def __init__(self, filename, store_p=os.path.expanduser('~/.nmap_pdb/')):
self.filename = filename
self.store_p = store_p
self._db_name = "nmap.db"
self.conn = None
self.cursor = None
def create_store_dir(self):
""" create the store directory if it doesn't exist """
if not os.path.exists(self.store_p):
os.mkdir(self.store_p, 16832)
def connect_to_db(self):
""" connect to the database """
self.conn = sqlite3.connect(self.store_p + self._db_name)
self.cursor = self.conn.cursor()
def create_db(self):
""" create the database tables if they don't exist """
self.cursor.execute("""create table if not exists
hosts(addr text, hostname text, scan_time datetime,
unique(addr, hostname, scan_time))""")
self.cursor.execute("""create table if not exists
open_port (addr text, port integer, product text,
protocol text, scan_time datetime, name text,
servicefp text, version text,
unique(protocol, port, addr, scan_time))""")
self.cursor.execute("""create table if not exists scan
(scan_time datetime, args text, unique (scan_time, args))""")
def insert_scan_into_db(self, time_of_scan, args):
""" insert a scan into the database """
sql_statement = """insert or ignore into scan (scan_time, args) VALUES (?, ?) """
self.cursor.execute(sql_statement, (time_of_scan, args))
def insert_host_into_db(self, addr, hostname, time_of_scan):
""" insert a host into the database """
sql_statement = """insert or ignore into hosts (addr, hostname, scan_time) VALUES (?, ?, ?) """
self.cursor.execute(sql_statement, (addr, hostname, time_of_scan))
def insert_port_into_db(self, addr, protocol, serv_d, time_of_scan):
""" insert a port into the database """
sql_statement = """insert or ignore into open_port (addr, port, product, protocol, scan_time,
name, servicefp, version) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"""
self.cursor.execute(sql_statement, (addr, serv_d["portid"], serv_d["product"], \
protocol, time_of_scan, serv_d["name"], serv_d["servicefp"], serv_d["version"] ))
def insert_all_scan_info_into_db(self):
"""
XXX: make this method cleaner!
insert every host that has open ports in the nmap xml file and
a description for it (the port) into the database
"""
self._doc = etree.parse(self.filename)
time_of_scan, args = "", ""
for x in self._doc.xpath("//nmaprun"):
time_of_scan = datetime.datetime.fromtimestamp(float(x.attrib['start']))
args = x.attrib['args']
self.insert_scan_into_db(time_of_scan, args)
for x in self._doc.xpath("//host"):
hostname = "" #this will be the value of the last hostname node's name element
address = ""
desc = ""
protocol = ""
for host_n in x.xpath("hostnames/hostname/@name"):
hostname = host_n
for addr in x.xpath("address/@addr[@addrtype!='mac']"):
address = addr
break
self.insert_host_into_db(address, hostname, time_of_scan)
for open_p in x.xpath("ports/port[state[@state='open']]"):
protocol = open_p.attrib['protocol']
wrap_service_dict = self._service_wrap_attrib(list(open_p)[1].attrib)
wrap_service_dict["portid"] = open_p.attrib["portid"]
self.insert_port_into_db(address, protocol, wrap_service_dict, time_of_scan)
def _service_wrap_attrib(self, child_attrib):
""" some fields are optional - so enter a blank value for a key if it doesn't exist """
wrapped_dict_result = {}
for key in ["version", "product", "name", "servicefp"]:
if key in child_attrib.keys():
wrapped_dict_result[key] = child_attrib[key]
else:
wrapped_dict_result[key] = ""
return wrapped_dict_result
def close_and_commit_to_db(self):
""" commit to the database and close the cursor """
self.conn.commit()
self.cursor.close()
def main():
filename = parse_opts()
s = nmap_xml_to_sqlite(filename)
s.create_store_dir()
s.connect_to_db()
s.create_db()
s.insert_all_scan_info_into_db()
s.close_and_commit_to_db()
if __name__ == "__main__":
main()
|
bsd-2-clause
| -8,696,299,061,923,030,000
| 35.025424
| 97
| 0.669019
| false
| 3.027778
| false
| false
| false
|
hyphaltip/cndtools
|
util/genbank2fa.py
|
1
|
1140
|
#!/usr/bin/env python
# Copyright (c) 2006
# Colin Dewey (University of Wisconsin-Madison)
# cdewey@biostat.wisc.edu
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
from Bio import GenBank
from Bio import Fasta
parser = GenBank.RecordParser()
iterator = GenBank.Iterator(sys.stdin, parser)
farec = Fasta.Record()
while 1:
gbrec = iterator.next()
if gbrec is None:
break
farec.sequence = gbrec.sequence
farec.title = gbrec.locus
print farec
|
gpl-2.0
| -2,270,592,912,808,029,700
| 26.142857
| 75
| 0.742982
| false
| 3.596215
| false
| false
| false
|
cgrebeld/pymel
|
maya/app/startup/basic.py
|
1
|
4322
|
"""
This module is always imported during Maya's startup. It is imported from
both the maya.app.startup.batch and maya.app.startup.gui scripts
"""
import atexit
import os.path
import sys
import traceback
import maya
import maya.app
import maya.app.commands
from maya import cmds, utils
def setupScriptPaths():
"""
Add Maya-specific directories to sys.path
"""
# Extra libraries
#
try:
# Tkinter libraries are included in the zip, add that subfolder
p = [p for p in sys.path if p.endswith('.zip')][0]
sys.path.append( os.path.join(p,'lib-tk') )
except:
pass
# Per-version prefs scripts dir (eg .../maya8.5/prefs/scripts)
#
prefsDir = cmds.internalVar( userPrefDir=True )
sys.path.append( os.path.join( prefsDir, 'scripts' ) )
# Per-version scripts dir (eg .../maya8.5/scripts)
#
scriptDir = cmds.internalVar( userScriptDir=True )
sys.path.append( os.path.dirname(scriptDir) )
# User application dir (eg .../maya/scripts)
#
appDir = cmds.internalVar( userAppDir=True )
sys.path.append( os.path.join( appDir, 'scripts' ) )
def executeSetup(filename):
"""
Look for the given file name in the search path and execute it in the "__main__"
namespace
"""
try:
for path in sys.path:
scriptPath = os.path.join( path, filename )
if os.path.isfile( scriptPath ):
import __main__
execfile( scriptPath, __main__.__dict__ )
except Exception, err:
# err contains the stack of everything leading to execfile,
# while sys.exc_info returns the stack of everything after execfile
try:
# extract the stack trace for the current exception
etype, value, tb = sys.exc_info()
tbStack = traceback.extract_tb(tb)
finally:
del tb # see warning in sys.exc_type docs for why this is deleted here
sys.stderr.write("Failed to execute %s\n" % filename)
sys.stderr.write("Traceback (most recent call last):\n")
# format the traceback, excluding our current level
result = traceback.format_list( tbStack[1:] ) + traceback.format_exception_only(etype, value)
sys.stderr.write(''.join(result))
def executeUserSetup():
executeSetup('userSetup.py')
def executeSiteSetup():
executeSetup('siteSetup.py')
# Set up sys.path to include Maya-specific user script directories.
setupScriptPaths()
# Set up string table instance for application
maya.stringTable = utils.StringTable()
# Set up auto-load stubs for Maya commands implemented in libraries which are not yet loaded
maya.app.commands.processCommandList()
# Set up the maya logger before userSetup.py runs, so that any custom scripts that
# use the logger will have it available
utils.shellLogHandler()
if not os.environ.has_key('MAYA_SKIP_USERSETUP_PY'):
# Run the user's userSetup.py if it exists
executeSiteSetup()
executeUserSetup()
# Register code to be run on exit
atexit.register( maya.app.finalize )
# Copyright (C) 1997-2010 Autodesk, Inc., and/or its licensors.
# All rights reserved.
#
# The coded instructions, statements, computer programs, and/or related
# material (collectively the "Data") in these files contain unpublished
# information proprietary to Autodesk, Inc. ("Autodesk") and/or its licensors,
# which is protected by U.S. and Canadian federal copyright law and by
# international treaties.
#
# The Data is provided for use exclusively by You. You have the right to use,
# modify, and incorporate this Data into other products for purposes authorized
# by the Autodesk software license agreement, without fee.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND. AUTODESK
# DOES NOT MAKE AND HEREBY DISCLAIMS ANY EXPRESS OR IMPLIED WARRANTIES
# INCLUDING, BUT NOT LIMITED TO, THE WARRANTIES OF NON-INFRINGEMENT,
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR ARISING FROM A COURSE
# OF DEALING, USAGE, OR TRADE PRACTICE. IN NO EVENT WILL AUTODESK AND/OR ITS
# LICENSORS BE LIABLE FOR ANY LOST REVENUES, DATA, OR PROFITS, OR SPECIAL,
# DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES, EVEN IF AUTODESK AND/OR ITS
# LICENSORS HAS BEEN ADVISED OF THE POSSIBILITY OR PROBABILITY OF SUCH DAMAGES.
|
bsd-3-clause
| 1,507,193,115,536,711,700
| 36.912281
| 101
| 0.696668
| false
| 3.745234
| false
| false
| false
|
loxodes/fairbanks_hackathon_landsat_viewer
|
landsat_theater.py
|
1
|
6073
|
# jon klein, jtklein@alaska.edu
# utility to display landsat-8 images for the decision theater north
# created during fairbanks 2015 hackathon
# mit license
import datetime
import time
import subprocess
import pdb
import re
import ast
import os
import argparse
from PIL import Image, ImageDraw, ImageFont
from geopy.geocoders import Nominatim
LATITUDE = 0
LONGITUDE = 1
LANDSAT_DATA_PATH = "/home/kleinjt/landsat/"
LABEL_COLOR = '#FFFFFF'
LABEL_FONT = 'FreeMono.ttf'
LABEL_BASE = (500, 500)
LABEL_SIZE = 400
PROCESSED_DIR = 'processed'
ANNOTATED_DIR = 'annotated'
TILE_DIR = '/home/kleinjt/repos/fairbanks_hackathon_landsat_viewer/Leaflet.Zoomify/tile_data'
ansi_escape = re.compile(r'\x1b[^m]*m') # regular expression to strip coloring from landsat return
# return (latitude, longitude) tuple from an address or place name
def get_latlong(place):
geolocator = Nominatim()
location = geolocator.geocode(place)
return (location.latitude, location.longitude)
# search for landsat records centered on a location
def landsat_search(location, startdate = None, enddate = None, maxcloud = None, maxreturns = 16, matchpath = True):
latlong_tuple = get_latlong(location)
latitude = str(latlong_tuple[LATITUDE])
longitude = str(latlong_tuple[LONGITUDE])
command = ['landsat', 'search']
command.append('--lat')
command.append(latitude)
command.append('--lon')
command.append(longitude)
if maxcloud:
command.append('--cloud')
command.append(str(maxcloud))
if maxreturns:
command.append('--limit')
command.append(str(maxreturns))
if startdate:
command.append('--start')
startdate = startdate.strftime("%m/%d/%Y")
command.append(startdate)
if enddate:
command.append('--end')
enddate = enddate.strftime("%m/%d/%Y")
command.append(enddate)
print ' '.join(command)
search = subprocess.check_output(command)
search = ansi_escape.sub('', search)
scene_dict = ast.literal_eval('\n'.join(search.split('\n')[1:-4]))
assert scene_dict['status'] == 'SUCCESS'
landsat_results = scene_dict['results']
landsat_result_dates = [time.strptime(lr['date'], "%Y-%m-%d") for lr in landsat_results]
# sort landsat results by date
landsat_records = [landsat_results for landsat_result_dates, landsat_results in sorted(zip(landsat_result_dates, landsat_results))]
# the landsat may fly over a spot using different paths, we might want to limit the search to records that use the same path
if matchpath:
path_matches = []
latest_path = landsat_records[-1]['path']
for record in landsat_records:
if record['path'] == latest_path:
path_matches.append(record)
landsat_records = path_matches
print('finished search')
return landsat_records
def landsat_download(landsat_records, bands = None, process = True, pansharpen = False):
command = ['landsat', 'download']
if process:
command.append('--process')
if pansharpen:
command.append('--pansharpen')
if bands:
command.append('--bands')
command.append(bands)
for record in landsat_records:
print('adding sceneID {} to download list'.format(record['sceneID']))
command.append(record['sceneID'])
print ' '.join(command)
print('starting download and processing, this may take some time...')
download = subprocess.check_output(command)
print('download and processing complete')
# find filename for landsat record image, create directory structure if it doesn't exist
def record_image_filename(record, imgdir, band = '432'):
ext = 'TIF'
if imgdir == ANNOTATED_DIR:
ext = 'PNG'
filename = '{}_bands_{}.{}'.format(record['sceneID'], band, ext)
directory = os.path.join(LANDSAT_DATA_PATH, imgdir, record['sceneID'])
if not os.path.exists(directory):
os.makedirs(directory)
full_filename = os.path.join(directory, filename)
return full_filename
# annotate processed images with date and location, then save them to ANNOTATED_DIR
def annotate_landsat_images(landsat_records, bands = '432', location = '', downsize = False, tile = False):
for record in landsat_records:
print('annotating {}'.format(record['date']))
filename = record_image_filename(record, PROCESSED_DIR)
outfile = record_image_filename(record, ANNOTATED_DIR)
record_file = open(filename, 'rb')
record_image = Image.open(filename)
draw = ImageDraw.Draw(record_image)
font = ImageFont.truetype(LABEL_FONT, 144)
label = 'Landsat {}\n{}, Band {}\n{}'.format(record['sat_type'], record['date'], bands, location)
draw.text(LABEL_BASE, label, fill = LABEL_COLOR, font = font)
# resize image for less memory usage..
if downsize:
newsize = (record_image.width * downsize, record_image.height * downsize)
record_image.resize(newsize)
record_image.save(outfile, 'png')
if tile:
tilename = record['sceneID']
tiledir = os.path.join(TILE_DIR, tilename)
if not os.path.exists(tiledir):
os.makedirs(tiledir)
command = ['tileup', '--in', outfile, '--output-dir', tiledir, '--prefix', tilename, '--verbose', '--auto-zoom', '6']
output = subprocess.check_output(command)
if __name__ == '__main__':
# see https://pyglet.readthedocs.org/en/latest/programming_guide/windowing.html
#display = platform.get_display(display_name)
#window = pyglet.window.Window(display = display)
#screens = display.get_screens()
#img = pyglet.image.load('test.jpg')
location = 'Chiniak, AK'
startdate = datetime.datetime(2014, 1, 1)
records = landsat_search(location, startdate = startdate, maxreturns = 20)
landsat_download(records)
annotate_landsat_images(records, location = location)
pdb.set_trace()
|
mit
| 6,940,265,505,476,265,000
| 33.117978
| 135
| 0.65503
| false
| 3.61919
| false
| false
| false
|
SweetPalma/Perver
|
perver.py
|
1
|
18147
|
#!/usr/bin/python
# coding: utf-8
# Perver - tiny Python 3 server for perverts.
# Check README and LICENSE for details.
from sys import platform as os_platform
from hashlib import sha1 as hash_id
from urllib.parse import unquote
from mimetypes import guess_type
from traceback import format_exc
from functools import wraps
import threading as thread
import concurrent.futures
import logging as log
import asyncio
import base64
import time
import sys
import os
import re
# Version control:
__author__ = 'SweetPalma'
__version__ = '0.25'
# Custom internal exceptions:
class PerverException(Exception):
def __init__(self, message):
self.message = str(message)
# Handling HTTP requests:
class PerverHandler:
# Path substitution pattern:
path_pattern = re.compile(r'(\{.+?\})')
# Making server link:
def __init__(self, server):
self.server = server
# Handling requests:
@asyncio.coroutine
def handle_request(self, reader, writer):
# Preparing basic values:
peername = writer.get_extra_info('peername')
ip, port = peername[0], peername[1]
# Client basic values:
self.ip = ip
self.port = port
self.reader = reader
self.writer = writer
self.time = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
# Client info, used in logging:
client_info = ' '.join([
self.time,
self.ip,
])
# Terminator shortcut:
killer = PerverException
# Handling:
try:
# Reading header until EOF:
header, length = b'', 0
while True:
try:
# Reading:
line = yield from reader.readline()
# Setting request type and maximal request size at start:
if len(header) == 0:
if line.startswith(b'POST'):
request_type = b'POST'
request_max = self.server.post_max
else:
request_type = b'GET'
request_max = self.server.get_max
# Setting break:
if line == b'\r\n' or not line:
break
# Reading content length:
if line.startswith(b'Content-Length'):
length = int(line.split(b':')[1])
# Reading header:
header = header + line
# Some spooky errors during reading:
except:
break
# Reading content:
content = b''
if 0 < length < request_max:
content = yield from reader.readexactly(length)
# Close connection in case of big file:
elif length > request_max:
self.writer.close()
raise killer('REQUEST IS TOO BIG')
# Parsing data:
self.client = yield from self.build_client(header, content)
client = self.client
# In case of disconnection:
if not client:
self.writer.close()
raise killer('CLIENT CLOSED CONNECTION')
# Logging full information:
client_info = client_info + ' ' + ' '.join([
client.type,
client.path,
])
# Checking routing:
route_post = self.check_route(client.path, self.server.route_post)
route_get = self.check_route(client.path, self.server.route_get)
if client.type == 'POST' and route_post:
raise killer((yield from self.respond_script(*route_post)))
if client.type == 'GET' and route_get:
raise killer((yield from self.respond_script(*route_get)))
# Checking static files:
for dir, real in self.server.route_static.items():
if client.path.startswith(dir):
filepath = client.path.replace(dir, real, 1)
raise killer((yield from self.respond_file(filepath[1:])))
# Routing 404 error:
raise killer((yield from self.respond_error(404)))
# Timeout/Cancelled:
except concurrent.futures._base.CancelledError:
yield from self.respond_error(500)
log.info(client_info + ' TIMED OUT')
# Terminator:
except killer as exception:
log.info(client_info + ' ' + exception.message)
# Sending file:
@asyncio.coroutine
def respond_file(self, path):
try:
with open(path, "rb") as file:
size = os.path.getsize(path)
return (yield from self.respond(
status = 200,
content = file.read(),
type = self.get_mime(path),
length = size
))
# No file found:
except IOError:
return (yield from self.respond_error(404))
# Sending error message:
@asyncio.coroutine
def respond_error(self, number, custom=None):
error = {
400: 'Bad Request',
404: 'Not Found',
500: 'Internal Error',
}
error_text = number in error and error[number] or 'Unknown Error'
error_cont = str(number) + ' ' + error_text
return (yield from self.respond(number, error_cont))
# Executing client script and sending it response:
@asyncio.coroutine
def respond_script(self, script, keys={}):
script_result = (yield from script(self.client, **keys)) or b''
return (yield from self.respond(
status = self.client.status,
content = script_result,
header = self.client.header,
type = self.client.mime
))
# Pure data response:
@asyncio.coroutine
def respond(self, status, content=b'', type='text/html', length=None, header={}):
# Forming header:
encoding = self.server.encoding
self.header = 'HTTP/1.1 ' + str(status) + '\r\n'
self.form_header('Accept-Charset', encoding)
self.form_header('Server', 'Perver/' + __version__)
# Setting mime type (and encoding for text):
if type.startswith('text/'):
ctype = type + ';charset=' + encoding
else:
ctype = type
self.form_header('Content-Type', ctype)
# Working with custom headers:
for key, value in header.items():
self.form_header(key, value)
# Encoding unicode content:
if not isinstance(content, bytes):
content = content.encode(encoding)
# Forming content length:
length = length or len(content)
self.form_header('Content-Length', str(length))
# Forming response:
header = self.header.encode(encoding)
response = header + b'\r\n' + content + b'\r\n'
# Go:
self.writer.write(response)
self.writer.write_eof()
# Done:
return status
# Making client ID using cut SHA hash on client IP and User-Agent:
def get_id(self, clnt):
ident = str(clnt.ip) + str(clnt.agent)
ident_encoded = ident.encode(self.server.encoding)
hashed = hash_id(ident_encoded).digest()[:self.server.length_id]
cooked = base64.urlsafe_b64encode(hashed).decode(self.server.encoding)
return cooked[:-2] # Removed two last minuses for better readibility.
# Power of regexp!
def check_route(self, path, map):
# Pure path:
if path in map:
return (map[path], {})
# Path with substitutions:
right_path, groups = None, sys.maxsize
for route in map:
# Removing retarded slash in the end of path:
path = path.endswith('/') and path[:-1] or path
# Patterns:
path_pattern = '^' + self.path_pattern.sub('([^/]+)', route) + '$'
matched = re.match(path_pattern, path)
# Testing route:
if matched:
keys = [key[1:-1] for key in self.path_pattern.findall(route)]
values = list(matched.groups())
if len(values) < groups:
groups = len(values)
right_path = (map[route], dict(zip(keys, values)))
# In case of fail:
return right_path
# Appending certain header lines:
def form_header(self, arg, var):
self.header = self.header + arg + ': ' + var + '\r\n'
# Retrieving type:
def get_mime(self, path):
fname, extension = os.path.splitext(path)
if extension == '':
return guess_type(path)[0] or 'text/html'
else:
return guess_type(path)[0] or 'application'
# Parsing GET and COOKIES:
@asyncio.coroutine
def parse(self, path):
# Preparing %key%=%value% regex:
get_word = '[^=;&?]'
pattern = '(%s+)=(%s+)' % (get_word, get_word)
# Unquoting map:
unq = lambda x: map(unquote, x)
# Replacing retarded pluses to spaces in path:
path = path.replace('+', ' ')
# Working:
matched = [unq(x) for x in re.findall(pattern, path)]
return dict(matched)
# Parsing POST multipart:
@asyncio.coroutine
def parse_post(self, content, type, boundary):
# Establishing default encoding:
encoding = self.server.encoding
# Parsing multipart:
if type == 'multipart/form-data':
# Splitting request to fields:
fields = content.split(boundary)
fields_dict = {}
# Turning `em to dictionary:
for field in fields:
# Checking:
field_rows = field.split(b'\r\n\r\n')
if len(field_rows) == 2:
header, value = field_rows
value = value[:-2]
# Decoding key:
key = re.findall(b';[ ]*name="([^;]+)"', header)[0]
key = key.decode(encoding)
# Checking content-type:
ctype = re.search(b'Content-Type: ([^;]+)$', header)
# File upload field:
if ctype:
if value == b'' or value == b'\r\n':
continue
ctype = ctype.group()
fname = re.findall(b';[ ]*filename="([^;]+)"', header)
fname = len(fname) == 1 and fname[0] or b'unknown'
fields_dict[key] = {
'filename': fname.decode(encoding),
'mime': ctype.decode(encoding),
'file': value,
}
# Text field:
else:
fields_dict[key] = value.decode(encoding)
return fields_dict
# Parsing average urlencoded:
else:
if isinstance(content, bytes):
content = content.decode(encoding)
return self.parse(content)
# Parsing client data:
@asyncio.coroutine
def build_client(self, header_raw, content_raw=b''):
# Safe dict values:
def safe_dict(dictionary, value, default):
if value in dictionary:
return dictionary[value]
else:
return default
# Decoding:
try:
# Decoding header:
header_decoded = header_raw.decode(self.server.encoding)
# Three basic values: request type, path and version:
pattern = r'^(GET|POST) ([A-Za-z0-9_.~?&%/\-]+) (HTTP/1.1|HTTP/1.0)'
unpacked = re.findall(pattern, header_decoded)
if len(unpacked) > 0:
type, path, version = re.findall(pattern, header_decoded)[0]
else:
raise PerverException('WRONG CLIENT HEAD')
# Splitting GET and PATH:
if '?' in path:
path, GET = path.split('?')
else:
GET = ''
# Raw header to header dictionary:
pattern = '([^:]+):[ ]*(.+)\r\n'
header = dict(re.findall(pattern, header_decoded))
# Basic client variables:
client = PerverClient()
client.version = version
client.type, client.path = type, unquote(path)
client.path_dir = '/'.join(unquote(path).split('/')[:-1])
# Client header:
client.header_raw, client.content_raw = header_raw, content_raw
client.content_type = safe_dict(header, 'Content-Type', '')
client.content_length = safe_dict(header, 'Content-Length', 0)
client.agent = safe_dict(header, 'User-Agent', 'Unknown')
client.mime = self.get_mime(client.path)
client.form_type = client.content_type.split(';')[0]
# Server client values:
client.ip, client.port, client.time = self.ip, self.port, self.time
client.id = self.get_id(client)
# POST boundary:
boundary = re.findall('boundary=(-*[0-9]*)', client.content_type)
if len(boundary) > 0:
boundary = boundary[0].encode(self.server.encoding)
else:
boundary = b''
# POST/GET/COOKIES:
client.get = yield from self.parse(GET)
client.post = yield from self.parse_post(content_raw, client.form_type, boundary)
client.cookie = yield from self.parse(safe_dict(header, 'Cookie', ''))
# Client ID cookie, can be overrided later:
client.header['Set-Cookie'] = 'id=' + client.id
# Client server-side container:
if not client.id in self.server.client:
self.server.client[client.id] = {}
client.container = self.server.client[client.id]
# Fixing client path dir:
if client.path_dir == '':
client.path_dir = '/'
# Done!
return client
# In case of fail:
except BaseException as exc:
log.warning('Error parsing user request.')
yield from self.respond_error(400)
raise exc
# Script client:
class PerverClient:
# GET/POST arguments:
get = {}
post = {}
# Client headers:
status = 200
header = {}
cookie = {}
mime = 'text/html'
# Redirection:
def redirect(self, page):
""" Redirects client to a certain page using 302 status code. """
self.header['Location'] = page
self.status = 302
return 'Redirecting...'
# Templating:
def template(self, text, **replace):
""" Used in templating - works same as str.format. """
return text.format(**replace)
# Rendering page:
def render(self, filename, **replace):
""" Same as template, but used in files. Returns templated file. """
file = open(filename, 'r')
return self.template(file.read(), **replace)
# Retrieving file:
def file(self, filename):
""" Simply returns file contents, binary. """
self.mime = guess_type(filename)[0]
file = open(filename, 'rb')
return file.read()
# Own header:
def set_header(self, key, value):
""" Sets custom client HTTP header. """
self.header[key] = value
# Cookies:
def set_cookie(self, name, value):
""" Sets custom client cookie, overriding default Perver ID Cookie. """
self.header['Set-Cookie'] = name + '=' + value +';'
# Status:
def set_status(self, status):
""" Sets custom response status, overriding default 200. """
self.status = status
# Mime:
def set_mime(self, mime):
""" Sets custom mime response. """
self.mime = mime
# Making HTML template:
def html(self, body, head='', doctype='html'):
""" HTML-correct template for nice pages. """
doctype = '<!DOCTYPE %s>' % doctype
head = '\r\n'.join(['<head>', head, '</head>'])
body = '\r\n'.join(['<body>', body, '</body>'])
return '\r\n'.join([doctype, head, body])
# Making forms:
def form(self, action, method, *inputs, id='', multipart=False):
""" Used for building forms. """
if multipart:
enctype='multipart/form-data'
else:
enctype='application/x-www-form-urlencoded'
form_desc = (action, method, id, enctype)
html = '<form action="%s" method="%s" id="%s" enctype="%s">' % form_desc
inputs = [list(inp.items()) for inp in inputs]
for input in inputs:
args = ' '.join('%s="%s"' % arg for arg in input)
html = '\r\n'.join([html, '<input %s><br>' % args])
return ''.join([html, '</form>'])
# Multipart form:
def form_multipart(self, *args, **kargs):
""" Works same as previous, but with multipart argument set to True."""
kargs['multipart'] = True
return self.form(*args, **kargs)
# Part of the previous function:
def input(self, name, **kargs):
""" Single form input. """
return dict(name=name, **kargs)
# Input submit:
def input_submit(self, value='Submit', **kargs):
""" Form submit button. """
return dict(type='submit', value=value, **kargs)
# Perver Server itself:
class Perver:
# PARAMETERS:
# Main server values:
encoding = 'utf-8'
backlog = 5
timeout = 30
# Maximal requests length:
get_max = 1024 * 8
post_max = 1024 * 1024 * 100
# Client ID length:
length_id = 10
# I highly recommend not to change this value.
# Routing paths:
route_get = {}
route_post = {}
route_static = {}
# Active clients list:
client = {}
# METHODS:
# Routing GET:
# DECORATOR:
def get(self, path):
""" Binds all GET requests from path to certain function. """
def decorator(func):
@wraps(func)
def wrapper(*args, **kwds):
return asyncio.coroutine(func)(*args, **kwds)
self.route_get[path] = wrapper
return wrapper
return decorator
# Routing POST:
# DECORATOR:
def post(self, path):
""" Binds all POST requests from path to certain function. """
def decorator(func):
@wraps(func)
def wrapper(*args, **kwds):
return asyncio.coroutine(func)(*args, **kwds)
self.route_post[path] = wrapper
return wrapper
return decorator
# Global routing:
# DECORATOR:
def route(self, path):
""" Binds all POST/GET requests from path to certain function. """
def decorator(func):
@wraps(func)
def wrapper(*args, **kwds):
return asyncio.coroutine(func)(*args, **kwds)
self.route_post[path] = wrapper
self.route_get[path] = wrapper
return wrapper
return decorator
# Adding static route:
def static(self, web, local):
""" Uses local path for serving static files for web requests. """
local = local.replace('\\', '/')
if not (local.startswith('/') and os.path.isabs(local)):
local = '/' + local
if not local.endswith('/'):
local = local + '/'
self.route_static[web] = local
# Starting:
def start(self, host='', port=80):
""" Starts the (mostly) infinite loop of server. """
# Configuring output:
self.host, self.port = host, port
log.basicConfig(level=log.INFO, format='%(levelname)s: %(message)s')
# Nice header for Windows:
if os_platform == 'win32':
os.system('title Perver v' + __version__)
# Trying running:
try:
self._loop = asyncio.get_event_loop()
self._server = asyncio.start_server(
self.handler,
host=host,
port=port,
backlog=self.backlog,
reuse_address=True,
)
self._server = self._loop.run_until_complete(self._server)
start_time = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
log.info('Perver has started at ' + start_time + '.')
self._loop.run_forever()
# In case of Skype on 80 port, access denials and other socket errors:
except OSError:
log.error('OS error, probably server is already running at that port \
or user is not sudoer.')
# Stop?
def stop(self):
""" Stops the Perver. """
self._server.close()
self._loop.stop()
# HTTP request handler:
@asyncio.coroutine
def handler(self, reader, writer):
try:
handler = PerverHandler(self)
yield from asyncio.wait_for(
handler.handle_request(reader, writer),
timeout=self.timeout
)
except KeyboardInterrupt:
log.warning('Interrupted by user.')
self.stop()
except SystemExit:
self.stop()
except asyncio.TimeoutError:
pass
except:
log.warning('Exception caught! \r\n' + format_exc())
# Pythonic async database
class PerverDB:
# Initialization:
def __init__(self, filename):
pass
# Not standalone:
if __name__ == '__main__':
print('Perver is not a standalone application. Use it as framework.')
print('Check "github.com/SweetPalma/Perver" for details.')
|
mit
| 1,527,089,193,175,442,400
| 25.686765
| 87
| 0.639885
| false
| 3.170335
| false
| false
| false
|
maku77/contest
|
codejam/2014_Round1A/A-ChargingChaos.py
|
1
|
2170
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Contest: Google Code Jam - 2014 Round A [2014-04-26]
# Problem: A. Charging Chaos
# URL: https://code.google.com/codejam/contest/2984486/dashboard
# Author: Masatoshi Ohta
# Strategy:
# 2 つのビット列の xor を取ると、異なる部分が分かることを利用する。
# 初期配置のリスト init[] の 1 つ目 init[0] のビット列と、
# 最終系のリスト desired[i] との xor を取ると、
# 候補となるスイッチのパターンを構成できる。
# つまり候補は desired[] の数 N だけ存在するが、
# この時点ではそのパターンが正しいとは分からない。
# そのパターンを残りの init[1..N-1] にも適用 (xor) していき、
# それぞれがいずれかの desired[] と一致するようであれば、
# そのパターン(スイッチの組み合わせ)は組み合わせとして正しいことになる。
# あとは正しいスイッチのパターンのうち、1 のビット数が少ないものを選ぶ。
import sys
def read_int(): return int(sys.stdin.readline())
def read_ints(): return [int(x) for x in sys.stdin.readline().split()]
def read_strs(): return sys.stdin.readline().split()
INF = float('inf')
def count_bits(val):
count = 0
while val > 0:
if val & 1 == 1:
count += 1
val >>= 1
return count
def solve():
N, L = read_ints()
inits = [int(x, 2) for x in read_strs()]
desired = [int(x, 2) for x in read_strs()]
patterns = map(lambda x: x ^ inits[0], desired)
min_change = INF
for p in patterns:
for i in range(1, N):
if not (p ^ inits[i] in desired):
# pattern p is not acceptable by inits[i]
break
else:
# pattern p seems acceptable
c = count_bits(p)
if c < min_change:
min_change = c
if min_change == INF:
return 'NOT POSSIBLE'
else:
return min_change
if __name__ == '__main__':
T = read_int()
for i in range(T):
print('Case #{}: {}'.format(i+1, str(solve())))
|
mit
| -1,554,268,937,330,754,300
| 28.37931
| 70
| 0.589202
| false
| 2.078049
| false
| false
| false
|
MSeifert04/nddata
|
nddata/nddata/mixins/ndreduce.py
|
1
|
16463
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ...utils.copyutils import do_copy
import numpy as np
from ..nduncertainty_stddev import StdDevUncertainty
from ..nduncertainty_var import VarianceUncertainty
from ...utils.inputvalidation import as_unsigned_integer
__all__ = ['NDReduceMixin']
class NDReduceMixin(object):
"""Mixin to provide methods for `~nddata.nddata.NDDataBase` which are \
applied along one dimension (axis) of the data.
These methods take the ``mask`` besides the ``data`` into account and
calculate based on the error of the result.
.. note::
The ``unit`` and ``meta`` of the result will be a copy of the original
`~nddata.nddata.NDDataBase` instance. ``wcs`` and ``flags`` as well but
this might change because they **should** be subject to a reduction
themselves- depending on the type of attribute.
"""
def _reduce_get_others(self):
# Meta and unit should stay the same for the reduce functions.
kwargs = {'meta': do_copy(self.meta),
'unit': self.unit,
'wcs': do_copy(self.wcs),
'flags': do_copy(self.flags)}
# TODO: WCS and Flags may also be subject to changes because of the
# reduction, but currently just copy them.
return kwargs
def reduce_average(self, axis=0, weights=None):
"""Compute the average along an axis with specified weights.
Parameters
----------
axis: positive `int`, optional
The axis (dimension) along which to compute the average. Must not
be ``None``. If you are loooking for overall statistics use:
:meth:`~nddata.nddata.mixins.NDStatsMixin.stats`.
Default is ``0``.
weights : `numpy.ndarray`-like or None, optional
The weights for averaging. Must be scalar or have the same shape as
the ``data`` or the same length as ``data.shape[axis]``. If the
weights are ``None`` it will call :meth:`reduce_mean`.
Default is ``None``.
Returns
-------
ndd : `~nddata.nddata.NDDataBase`-like
The result will have the same class as the instance this method was
called on. The results ``data`` contains the average of the
calculation while the ``mask`` is set in case any element had no
values to average and the ``uncertainty`` will be the variance of
the average (already corrected by the number of valid elements).
Examples
--------
Calculate the weighted mean of a 2 x 5 array along the first axis::
>>> import numpy as np
>>> from nddata.nddata import NDData
>>> ndd = NDData([[3, 2, 1, 1, 4], [2, 2, 2, 2, 2]],
... mask=np.array([[0, 1, 0, 1, 0], [0, 1, 0, 0, 0]],
... dtype=bool))
>>> avg = ndd.reduce_average(axis=0, weights=[1, 1.5])
>>> avg
NDData([ 2.4, 0. , 1.6, 2. , 2.8])
>>> avg.mask
array([False, True, False, False, False], dtype=bool)
>>> avg.uncertainty
VarianceUncertainty([ 0.096, 0. , 0.096, 0. , 0.384])
.. note::
The correction for the resulting uncertainty is the total number of
valid values **without** taking any degrees of freedom into
account.
"""
# If no weights are given this is essentially a mean reduce. So return
# the mean reduction result.
if weights is None:
return self.reduce_mean(axis=axis)
# To allow also list-like weights convert them to a numpy array here.
# Since this doesn't copy existing np.arrays this is relativly cheap if
# it's already an array.
weights = np.asarray(weights)
# The axis must be integer and because of later restrictions it also
# needs to be positive.
axis = as_unsigned_integer(axis)
# Get the data and the mask from the instance attributes
data = self.data
mask = self._get_mask_numpylike()
# Setup the masked array based on the data and mask saved in the
# instance. Important profiling information about this np.any is
# described in reduce_mean. This should stay the way it is.
if np.any(mask):
marr = np.ma.array(data, mask=mask, copy=False)
avg_func = np.ma.average
else:
marr = np.array(data, copy=False)
avg_func = np.average
# Abort the call in case the array is 1D, for 1D statistics see the
# NDStatsMixin.
if marr.ndim < 2:
raise ValueError('reduce functions need the data to have more '
'than one dimension.')
# Calculate the reduced data with np.average. The weights will be
# checked in here and an appropriate exception is raised if the shape
# does not match.
red_data = avg_func(marr, axis=axis, weights=weights)
# There is no builtin ufunc to calculate the weighted standard
# deviation so we need to do use the average again. This will
# calculate the variance of the average, but we have a
# VarianceUncertainty and the user can convert it later if he wants
# standard deviations.
# To calculate the difference we need to expand the reduced dimension
# of the reduced data again otherwise broadcasting could fail.
diff = (marr - np.expand_dims(red_data, axis=axis)) ** 2
red_uncert, eff_weights = avg_func(diff, axis=axis, weights=weights,
returned=True)
# To get the variance of the mean we need to divide this reduced
# variance by the number of valid values. This number of valid values
# are contained in the "eff_weights".
# So we don't end up with division by 0 problems set the values where
# we have no valid value to 1. Since the average of the uncertainty
# contains zeros where no valid element was present - the corrected
# variance will be calculated there as 0/1 = 0 which is exactly what
# we would expect. And not the 0/0 = nan we would otherwise have.
no_valid_value = (eff_weights == 0)
eff_weights[no_valid_value] = 1
# To get the variance of the mean we divide by the number of valid
# elements.
red_uncert = VarianceUncertainty(red_uncert / eff_weights)
# TODO: In theory it could be that we need some bias (dof) correction
# here. So either allow a ddof parameter here or clearly state that
# this isn't done here!
# TODO: The number of valid elements would make a good flag array
# maybe include it?
# The "red_data" is a masked array so the resulting class should
# split data and mask by itself.
return self.__class__(red_data, uncertainty=red_uncert,
**self._reduce_get_others())
def reduce_mean(self, axis=0):
"""Compute the mean along an axis.
Parameters
----------
axis: positive `int`, optional
The axis (dimension) along which to compute the mean. Must not
be ``None``. If you are loooking for overall statistics use:
:meth:`~nddata.nddata.mixins.NDStatsMixin.stats`.
Default is ``0``..
Returns
-------
ndd : `~nddata.nddata.NDDataBase`-like
The result will have the same class as the instance this method was
called on. The results ``data`` contains the mean of the
calculation while the ``mask`` is set in case any element had no
values to avergae and the ``uncertainty`` will be the variance of
the mean (already corrected by the number of valid elements).
Examples
--------
Calculate the mean of a 2 x 5 array along the first axis::
>>> import numpy as np
>>> from nddata.nddata import NDData
>>> ndd = NDData([[3, 2, 1, 1, 4], [2, 2, 2, 2, 2]],
... mask=np.array([[0, 1, 0, 1, 0], [0, 1, 0, 0, 0]],
... dtype=bool))
>>> avg = ndd.reduce_mean(axis=0)
>>> avg
NDData([ 2.5, 0. , 1.5, 2. , 3. ])
>>> avg.mask
array([False, True, False, False, False], dtype=bool)
>>> avg.uncertainty
VarianceUncertainty([ 0.125, 0. , 0.125, 0. , 0.5 ])
.. note::
This method is identical to :meth:`reduce_average` with
``weights=None``.
.. note::
The correction for the resulting uncertainty is the total number of
valid values **without** taking any degrees of freedom into
account.
"""
# Much the same as average but without weights and instead of average
# with mean and std
axis = as_unsigned_integer(axis)
data = self.data
mask = self._get_mask_numpylike()
# np.mean and np.var work on masked arrays so can create a normal numpy
# array if no value is masked. This will probably be a lot faster.
# IMPORTANT: Line profiling shows that in case of big arrays the
# _reduce_get_mask() function takes only 0.1% of the total run-time and
# the np.any() 0-3% so this could make a difference if we special cased
# the case when no mask is present but NOT much.
# On the other hand the np.mean on a plain numpy array is approximatly
# 6-10 times faster than on masked arrays so that actually makes a huge
# difference. So even if we have a mask it could be wise to check if
# there are any masked values at all.
# Therefore: This should stay as is!
if np.any(mask):
marr = np.ma.array(data, mask=mask, copy=False)
marr_is_masked = True
else:
marr = np.array(data, copy=False)
marr_is_masked = False
# Abort the call in case the array is 1D, for 1D statistics see the
# NDStatsMixin.
if marr.ndim < 2:
raise ValueError('reduce functions need the data to have more '
'than one dimension.')
red_data = np.mean(marr, axis=axis)
# np.var and np.std have the same runtime but since we would need to
# take the square root of the number of valid values calculating the
# variance and then just dividing by the number of valid pixel is much
# faster than calculating the std and then diving by the SQRT of the
# number of valid pixel. In case someone wants the resulting
# uncertainty in standard deviations he can cast it to one!
red_uncertainty = np.var(marr, axis=axis)
# We need to determine the number of valid pixel ourself, fortunatly
# this number is just the sum of unmakes values along the specified
# axis. With the correction for cases where no valid values is. This
# correction is described in reduce_average.
if marr_is_masked:
n_values = (~marr.mask).sum(axis=axis)
no_valid_value = (n_values == 0)
n_values[no_valid_value] = 1
else:
# In case no values were masked the number of valid values is just
# the length of the array along the given axis.
n_values = marr.shape[axis]
red_uncertainty = VarianceUncertainty(red_uncertainty / n_values)
return self.__class__(red_data, uncertainty=red_uncertainty,
**self._reduce_get_others())
def reduce_median(self, axis=0):
"""Compute the median along an axis.
Parameters
----------
axis: positive `int`, optional
The axis (dimension) along which to compute the median. Must not
be ``None``. If you are loooking for overall statistics use:
:meth:`~nddata.nddata.mixins.NDStatsMixin.stats`.
Default is ``0``..
Returns
-------
ndd : `~nddata.nddata.NDDataBase`-like
The result will have the same class as the instance this method was
called on. The results ``data`` contains the median of the
calculation while the ``mask`` is set in case any element had no
values for the computation and the ``uncertainty`` will be the
median absolute standard deviation of the median (already corrected
by the number of valid elements).
Examples
--------
Calculate the median of a 2 x 4 array along the first axis::
>>> import numpy as np
>>> from nddata.nddata import NDData
>>> ndd = NDData([[3, 2, 1, 1], [2, 2, 2, 2]],
... mask=np.array([[0, 1, 0, 1], [0, 1, 0, 0]],
... dtype=bool))
>>> avg = ndd.reduce_median(axis=0)
>>> avg
NDData([ 2.5, 0. , 1.5, 2. ])
>>> avg.mask
array([False, True, False, False], dtype=bool)
>>> avg.uncertainty
StdDevUncertainty([ 0.52417904, 0. , 0.52417904, 0. \
])
.. note::
The correction for the resulting uncertainty is the total number of
valid values **without** taking any degrees of freedom into
account.
"""
# This method is some hybrid from average and mean reduce. Only the
# real differences are commented upon. For further details on the
# rationale see these other methods.
axis = as_unsigned_integer(axis)
data = self.data
mask = self._get_mask_numpylike()
if np.any(mask):
marr = np.ma.array(data, mask=mask, copy=False)
# np.median doesn't work on masked arrays so we need to use
# np.ma.median here
med_func = np.ma.median
marr_is_masked = True
else:
marr = np.array(data, copy=False)
med_func = np.median
marr_is_masked = False
if marr.ndim < 2:
raise ValueError('reduce functions need the data to have more '
'than one dimension.')
red_data = med_func(marr, axis=axis)
# Constant is taken from astropy mad_std
# IMPORTANT: Using the astropy.stats.mad_std would calculate the median
# again, since we already have the median along the axis we can omit
# this expensive recalculation - but then we cannot reuse mad_std. But
# especially for large masked arrays the speed gain is huge.
diff = np.abs(marr - np.expand_dims(red_data, axis=axis))
red_uncertainty = 1.482602218505602 * med_func(diff, axis=axis)
if marr_is_masked:
n_values = (~marr.mask).sum(axis=axis)
no_valid_value = (n_values == 0)
n_values[no_valid_value] = 1
else:
n_values = marr.shape[axis]
# This time we work with standard deviations because that's what
# the median absolute deviation approximates so we need to take the
# square root of the n_values correction factor
n_values = np.sqrt(n_values)
red_uncertainty = StdDevUncertainty(red_uncertainty / n_values)
# FIXME: Strangly the result has an uncertainty different from 0 when
# all values are masked here. This is not the case for average or mean
# but it seems to be a problem with the median. I guess this is because
# the np.expand_dims doesn't preserve the mask and something weird
# happens so that the median of the "diff" doesn't realize it's all
# masked and returns something. Maybe this could be a numpy Bug but for
# now I just make it work by replacing them manually:
if marr_is_masked:
red_uncertainty.data[no_valid_value] = 0
return self.__class__(red_data, uncertainty=red_uncertainty,
**self._reduce_get_others())
|
bsd-3-clause
| 6,610,416,277,689,173,000
| 42.323684
| 79
| 0.586892
| false
| 4.213719
| false
| false
| false
|
diophantus7/plugin.video.romwod
|
resources/lib/wistia.py
|
1
|
1879
|
import re
import requests
import xbmc
import json
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
from bs4 import BeautifulSoup
_JSON_URL = "http://fast.wistia.com/embed/medias/%s.json"
_IFRAME_URL = "http://fast.wistia.net/embed/iframe/%s"
class ResolveError(Exception):
def __init__(self, message):
self.message = message
class WistiaExtractor:
def __init__(self, html_page, format):
self.html_page = html_page
self.video_id = self._extract_video_id()
self._format = format
def _extract_video_id(self):
bs = BeautifulSoup(self.html_page)
video_block = json.loads(bs.find('div', {'data-react-class':'VideoView'})['data-react-props'])
return video_block['video']['external_id']
#return re.search('wistia_async_([0-9a-z]*) ', str(bs)).group(1)
def _download_json(self):
s = requests.Session()
s.headers.update({'referer':_IFRAME_URL % self.video_id})
req = s.get(_JSON_URL % self.video_id)
return req.json()
def get_video_url(self):
json_data = self._download_json()
try:
url = next(d['url'] for d in json_data['media']['unnamed_assets']
if d['display_name'] == self._format and d['ext'] == 'm3u8')
except:
video_data = [d for d in json_data['media']['unnamed_assets']
if d['status'] == 2 and 'opt_vbitrate' in d
and 'display_name' in d and
'p' in d['display_name']]
if not video_data:
raise ResolveError("No video found.")
url = max(video_data,
key=lambda d: int(d['display_name'].strip('p')))['url']
xbmc.log("Fallback to url: %s" % url)
return url
|
gpl-3.0
| 3,844,811,310,062,879,000
| 31.396552
| 102
| 0.548164
| false
| 3.662768
| false
| false
| false
|
open-io/oio-swift
|
tests/unit/common/middleware/test_hashedcontainer.py
|
1
|
2935
|
# Copyright (C) 2016-2020 OpenIO SAS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import sys
import unittest
from swift.common import swob, utils
from swift.common.swob import Request
from oioswift.common.middleware import hashedcontainer
from oio.cli.common import clientmanager
# Hack PYTHONPATH so "test" is swift's test directory
sys.path.insert(1, os.path.abspath(os.path.join(__file__, '../../../../..'))) # noqa
from test.unit.common.middleware.helpers import FakeSwift # noqa: E402
class TestHashedContainer(unittest.TestCase):
GLOBAL_CONF = {
'sds_namespace': 'OPENIO',
'sds_default_account': 'OPENIO',
'sds_proxy_url': '127.0.0.1:666'
}
def setUp(self):
self.app = FakeSwift()
# prevent a call to oio-proxy
clientmanager.ClientManager.nsinfo = {
'options': {'flat_bitlength': '17'}}
self.hc = hashedcontainer.filter_factory(self.GLOBAL_CONF)(self.app)
def call_app(self, req, app=None):
if app is None:
app = self.app
self.authorized = []
def authorize(req):
self.authorized.append(req)
if 'swift.authorize' not in req.environ:
req.environ['swift.authorize'] = authorize
req.headers.setdefault("User-Agent", "Melted Cheddar")
status = [None]
headers = [None]
def start_response(s, h, ei=None):
status[0] = s
headers[0] = h
body_iter = app(req.environ, start_response)
with utils.closing_if_possible(body_iter):
body = b''.join(body_iter)
return status[0], headers[0], body
def _check_conversion(self, path_in, path_out):
self.app.register('PUT', path_out, swob.HTTPCreated, {})
req = Request.blank(path_in, method='PUT')
resp = self.call_app(req, app=self.hc)
self.assertEqual(resp[0], "201 Created")
self.assertEqual(self.app.calls, [('PUT', path_out)])
def test_default_config(self):
self._check_conversion(
'/prefix/229/358493922_something',
'/v1/OPENIO/6C800/prefix/229/358493922_something')
def test_custom_bits(self):
self.hc = hashedcontainer.filter_factory(
self.GLOBAL_CONF, bits=12)(self.app)
self._check_conversion(
'/prefix/229/358493922_something',
'/v1/OPENIO/6C8/prefix/229/358493922_something')
|
apache-2.0
| 5,798,349,336,250,656,000
| 32.352273
| 85
| 0.643271
| false
| 3.596814
| true
| false
| false
|
andreas-h/pybtex
|
pybtex/style/sorting/invyear_author_title.py
|
1
|
2418
|
# Copyright (c) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from pybtex.style.sorting import BaseSortingStyle
from datetime import datetime
import locale
class SortingStyle(BaseSortingStyle):
name = 'invyear_author_title'
def sorting_key(self, entry):
if entry.type in ('book', 'inbook'):
author_key = self.author_editor_key(entry)
else:
author_key = self.persons_key(entry.persons['author'])
time = int(entry.fields.get('year', '')) * 100
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
try:
time += datetime.strptime(entry.fields.get('month', '').strip(), "%B").month
except:
pass
return (-time, author_key, entry.fields.get('title', ''))
def persons_key(self, persons):
return ' '.join(self.person_key(person) for person in persons)
def person_key(self, person):
return ' '.join((
' '.join(person.prelast() + person.last()),
' '.join(person.first() + person.middle()),
' '.join(person.lineage()),
)).lower()
def author_editor_key(self, entry):
if entry.persons.get('author'):
return self.persons_key(entry.persons['author'])
elif entry.persons.get('editor'):
return self.persons_key(entry.persons['editor'])
|
mit
| 2,218,421,661,150,355,000
| 41.421053
| 88
| 0.674938
| false
| 4.050251
| false
| false
| false
|
sdiehl/rpygtk
|
rpygtk/lib/rsession.py
|
1
|
16192
|
# Copyright 2009-2010 Stephen Diehl
#
# This file is part of RPyGTK and distributed under the terms
# of the GPLv3 license. See the file LICENSE in the RPyGTK
# distribution for full details.
import rpy2.robjects as robjects
from rpy2 import rinterface
import time
import numpy
from ui import prefs
import threading
import thread
ri = rinterface
ro = robjects
r = robjects.r
env = robjects.r.globalenv()
#R Convience Wrappers
summary = robjects.r.summary
plot = robjects.r.plot
ls = robjects.r.ls
rm = robjects.r.rm
png = r.png
svg = r.svg
postscript = r.postscript
pdf = r.pdf
devoff = r['dev.off']
devcur = r['dev.cur']
devlist = r['dev.list']
X11 = r['X11']
typeof = lambda obj: (r['typeof'](obj))[0]
null = r['as.null']()
#TODO: We should convert these so that they actually return booleans, aka
# is_ts = lambda obj: r['is.ts'](obj)[0]
is_ts = r['is.ts']
is_array = r['is.array']
is_null = r['is.null']
#Using 'time' messes with the threads
times = r['time']
sapply = r['sapply']
df = r['data.frame']
library = r['library']
#Initialize the R Interface
rinterface.initr()
gdata_is_available = False
xtable_is_available = False
nlstools_is_available = False
#Check what libraries we have installed
try:
library('gdata')
gdata_is_available = True
except rinterface.RRuntimeError,RError:
print('Could not load the gdata library, importing of Excel spreadsheets is disabled.')
try:
library('xtable')
xtable_is_available = True
except rinterface.RRuntimeError,RError:
print('Could not load the xtable library, exporting LaTeX is disabled.')
try:
library('nlstools')
nlstools_is_available = True
except rinterface.RRuntimeError,RError:
print('Could not load the nlstools library, summary of nls is disabled.')
class rdict(dict):
'''A dictionary type that does not permit None types or empty strings in values'''
def __setitem__(self, key, value):
if value != None and value != '':
#Fetch the parent class (dict) and invoke __setitem__ just like normal but with the
#condition that we don't allow empty values
super(rdict, self).__setitem__(key, value)
def arguments_to_string(d):
'''Take a dictionary of arguments and return a string of comma seperated key,value pairs'''
#If we end using the low-level iterface (which we do too much), then we need a way of
#passing arguments
argument_str = ''
for key,value in d.iteritems():
if type(value) is str:
value = '"' + str(value) + '"'
#R likes capitalized logicals
if type(value) is bool:
value = str(value).upper()
argument_str += ',' + str(key)+'='+str(value)
return argument_str
def translate_types(type, reverse=False):
'''Translate between R types and Python types and vise versa, reverse=False implies translation to python'''
#r -> python
if not reverse:
if type == 'double':
return float
elif type == 'string':
return str
elif type == 'integer':
return int
elif type == 'character':
return str
elif type == 'logical':
return bool
elif type == int:
return type
elif type == float:
return type
elif type == str:
return type
else:
error('Cannot cast')
return
'''Translate between R types and Python types'''
#python -> r
if reverse:
if type == int:
return 'integer'
elif type == float:
return 'double'
elif type == str:
return 'character'
elif type == bool:
return 'logical'
elif type == 'double':
return 'double'
elif type == 'character':
return 'character'
elif type == 'integer':
return 'integer'
else:
error('Cannot cast')
def translate_to_vector(column):
'''Take a vanilla list or numpy column array and return the "equivelent" r vector form'''
if (type(column) is not numpy.ndarray) and (type(column) is not list):
print('Cannot translate non-numpy or list object to R Vector')
return
if type(column) is list:
return ro.FloatVector(column)
if column.dtype is numpy.dtype(int):
return ro.IntVector(column)
elif column.dtype is numpy.dtype(float):
return ro.FloatVector(column)
elif column.dtype is numpy.dtype(bool):
return ro.BoolVector(column)
elif column.dtype is numpy.dtype(str):
return ro.StrVector(column)
else:
print 'Mismatched (or strange) datatype in numpy array'
return
def column_extractor(data,output='rvector'):
'''Take any object (R Dataframe, List of Lists, Numpy Array, Python Array)
and return an iterator on its columns which yields either a numpy array,
RVector or a vanilla list
output='rvector'|'list'|'numpy'
'''
if type(data) == type(r['data.frame']):
for i in range(data.ncol()):
column = numpy.array(data.rx2(i))
yield translate_to_vector(column)
elif type(data) is list:
for i in range(len(data[0])):
column = lambda n: [x[n] for x in data]
yield robjects.FloatVector(column(i))
elif type(data) is numpy.ndarray:
#Check to see if we have a column
if len(data.shape)<2:
yield robjects.FloatVector(data)
else:
for i in range(data.shape[1]):
column = data[:,i]
yield robjects.FloatVector(column)
#This is a 'Borg' design pattern, all items appended get stored
#in the master shared state. We'll use this for threads so that
#we can kill every thread from the main thread
class ThreadHandler(list):
#Shared state
shared = []
def append(self,value):
super(ThreadHandler,self).append(value)
self.shared.append(value)
def remove(self,value):
super(ThreadHandler,self).remove(value)
self.shared.remove(value)
def get_shared(self):
return self.shared
def iter_shared(self):
for item in self.shared:
yield item
class RPlotThread( threading.Thread ):
halt = False
args = {}
data = {}
cmd_stack = []
#use f(**dict) to pass a dictionary of args to func
def __init__(self,data=dict(),args=dict(),type=None,export=None,par_mode=False):
self.args = args
self.data = data
self.type = type
self.export = export
self.par_mode = par_mode
threading.Thread.__init__(self)
def run (self):
#Plot types
plot = r.plot
hist = r.hist
barplot = r.barplot
pie = r.pie
qqnorm = r.qqnorm
qqplot= r.qqplot
#Export types
if self.export:
#Shut down all other plots before we do anything involving exporting
r['graphics.off']()
filename,extension = self.export
if extension not in filename:
extension += filename
if extension == '.svg':
svg(filename)
elif extension == '.png':
png(filename)
elif extension == '.ps':
postscript(filename)
elif extension == '.pdf':
#There is a rather strange bug where points get rendered as
#letters unless we toggle useDingbats=False
pdf(filename,useDingbats=False)
if self.par_mode:
rows, columns = self.par_mode
r('par(mfrow=c(%s,%s))' % (rows,columns))
else:
#Don't bother opening a new window if there already is an open one
#unless the user has specified that every new plot should open in a
#new window
if thereArePlotWindowsOpen() and prefs.get_pref('single_plot'):
#Clear the previous plots, unless we're in par mode
if not self.par_mode:
r('plot.new()')
else:
rows, columns = self.par_mode
r('par(mfrow=c(%s,%s))' % (rows,columns))
else:
X11()
if self.type=='scatter':
x = self.data['x']
y = self.data['y']
plot(x,y,**self.args)
if self.type=='scatter.smooth':
x = self.data['x']
y = self.data['y']
r['scatter.smooth'](x=x,y=y,**self.args)
if self.type=='matplot':
df = robjects.r['data.frame'](**self.data)
plot(df,**self.args)
if self.type=='histogram':
x = self.data['x']
hist(x,**self.args)
if self.type=='bar':
x = self.data['x']
barplot(x,**self.args)
if self.type=='pie':
x = self.data['x']
pie(x,**self.args)
if self.type=='qqnorm':
x = self.data['x']
qqnorm(x,**self.args)
if self.type=='qqplot':
x = self.data['x']
y = self.data['y']
qqplot(x,y,**self.args)
if self.type=='boxplot':
data = r['data.frame'](**self.data)
r['boxplot'](data,**self.args)
if self.type=='general':
'''data is passed directly to plot'''
plot(self.data,**self.args)
if self.export:
#Run through an secondary commands before we save the image
for c in self.cmd_stack:
cmd,args,kwargs = c
apply(cmd,args,kwargs)
self.cmd_stack.remove(c)
devoff()
return
self.t = threading.Timer(0.1, self.refresh)
self.t.start()
def add_cmd(self,command,*args,**kwargs):
'''Add a command to be executed after the plot is created'''
#Since this is a seperate thread we have to have a stack to handle
#commands passed after the timer is started'''
self.cmd_stack.append((command,args,kwargs))
def refresh(self):
while self.halt == False:
for c in self.cmd_stack:
cmd,args,kwargs = c
apply(cmd,args,kwargs)
self.cmd_stack.remove(c)
rinterface.process_revents()
time.sleep(0.1)
if self.halt == True:
self.t.cancel()
#----------------------------------------
# RPy Wrapper Classes
#----------------------------------------
class robject(object):
'The base class for all robjects'
#Human readable name of object
type = None
#Where the object should be viewed 'frame' or 'output'
outputsTo = None
#Reference to the RPy2 object
object = None
#Label of the object, should be identical to the name of the object
#in the globalEnv of RPy2
label = None
#Icon to show in object sidebar
icon = None
def __init__(self,*args,**kwargs):
apply(self.construct,args,kwargs)
def construct(self):
pass
def refresh(self):
pass
#---------------------------------
# Data Storing Objects
#---------------------------------
class dataframe(robject):
'''We store the data in three ways
columns -- a dictionary {column name, column type}
column_data -- a dictionary {column name, rvector}
rawdata -- a numpy array of the data
object -- the reference to the actual robject in the rsession
'''
# Ok to summarize this non-intuitive code...
# Say we have a dataframe in R, when we bring it in to python
# we store the data in a couple of ways
#
# V1 V2
# 1 0.1 5
# 2 0.2 6
# 3 0.3 7
# 4 0.4 8
# 5 0.5 9
#
# rawdata would hold the numpy array [ [0.1,0.2,0.3,0.4,0.5] , [5,6,7,8,9] ]
# rownames would hold the array [1,2,3,4,5]
# columns would hold column labels and their types {'V1':float , 'V2':int}
# column_data would hold {'V1': [0.1,0.2,0.3,0.4,0.5] , 'V2':[5,6,7,8,9]}
columns = {}
column_data = {}
rawdata = None
rownames = None
object = None
isColumn = False
outputsTo = 'frame'
icon = './ui/icons/dataframe.png'
type = 'Data Frame'
def construct(self,data,columns=None,label=None,rownames=None):
'''Take an array of data and dict of columns and create a dataframe class with
self.object as a reference to the rpy2 object
'''
self.rawdata = data
self.label = label
self.rownames = rownames
self.columns = columns
if len(data.shape)==1:
self.isColumn = True
d = dict()
for i,col in enumerate(column_extractor(data)):
column_name = columns.keys()[i]
#This gets passed to R
d[column_name] = col
#This is stored on the python side
self.column_data[column_name] = col
self.object = r['data.frame'](**d)
def refresh(self):
'''Rebuild the R object from the internal numpy array and return the R object'''
self.construct(data=self.rawdata,columns=self.columns,label=self.label,rownames=self.rownames)
return self.object
def __getitem__(self,key):
'''Returns a string containing the R code to access the column
data.frame $ column
'''
return self.label + '$' + key
class timeseries(robject):
start = None
end = None
frequency = None
times = None
deltat = None
times = None
columns = {}
column_data = {}
rawdata = None
rownames = None
object = None
isColumn = False
outputsTo = 'frame'
icon = './ui/icons/timeseries.png'
def construct(self,data,label=None,start=None,end=None,frequency=None,deltat=None):
self.rawdata = data
self.label = label
self.columns = {'V1':float}
data=translate_to_vector(data)
self.column_data = {'V1':data}
args = rdict({
'start':start,
'end':end,
'frequency':frequency,
'deltat':deltat,
})
self.type = "Time Series"
self.object = r['ts'](data,**args)
self.times = times(self.object)
self.rownames = self.times
#This is a 'hidden' variable that doesn't show up in the frame view
#but is still accessible if called directly by plots, stat tests, etc...
self.column_data['(Time)'] = r['as.numeric'](self.times)
def __getitem__(self,key):
if key == '(Time)':
return self.times
class dist(robject):
def construct(self):
pass
class matrix(robject):
def construct(self):
pass
class linear_model(robject):
type = 'Linear Model'
icon = './ui/icons/description.png'
outputsTo = 'output'
def construct(self,fit,label=None):
self.object = fit
self.label = label
#This isn't going to make it into this release
#numpy.array(r['coef'](fit))
#self.coefficents = dataframe(coefs,columns={'Residuals':float},label=label+'$'+'coefficents')
self.coefs = r['coef'](fit)
self.residuals = r['resid'](fit)
self.fitted = r['fitted'](fit)
self.text = str(fit)
class description(robject):
'''A text description of some stastical function: mean anova...'''
object = None
outputsTo = 'output'
label = None
text = None
type = 'Description'
icon = './ui/icons/description.png'
'''XTable can't handle some data types so we need need to run table()'''
tabelize = False
def construct(self,object,label=None,tabelize=False):
self.object = object
self.label = label
self.tabelize = tabelize
#Cache the output text so we aren't calling it constantly
self.text = str(object)
def thereArePlotWindowsOpen():
#dev.list is a vector, so is_null returns a vector apparently
window_list_is_empty = is_null(r['dev.list']())[0]
if window_list_is_empty:
return False
else:
return True
|
gpl-3.0
| 519,104,289,013,916,160
| 27.862745
| 112
| 0.573617
| false
| 3.826087
| false
| false
| false
|
cechrist/cardoon
|
cardoon/devices/memductor.py
|
1
|
6848
|
"""
:mod:`memductor` -- Basic (nonlinear) memductor
-----------------------------------------------
.. module:: memductor
.. moduleauthor:: Carlos Christoffersen
"""
import numpy as np
from cardoon.globalVars import const, glVar
import cardoon.circuit as cir
import cppaddev as ad
class Device(cir.Element):
r"""
Memductor
---------
Connection diagram::
+ Vin - Iin
_______________ ---->
0 |_ _ _ _| | 1
o----| |_| |_| |_| | |-------o External view
|_____________|_|
Device equation:
.. math::
q(t) = q(\varphi(t))
\frac{dq}{dt} = \frac{dq}{d\varphi} \frac{d\varphi}{dt}
I_{in} = W(\varphi) V_{in}
:math:`W(\varphi)` is the memductance function.
Netlist example::
memd:m1 1 0 w = '1e3 * (np.cosh(1e6 * phi)-1.)'
Notes:
* the memductance function (``W(phi)``) is given as an
expression in the ``w`` parameter. The independent variable is
the memductor flux: ``phi``. Constants and mathematical
functions can also be used in the definition of ``w``.
* The initial flux can be adjusted with the ``phi0`` parameter
* the memductor loses its memory as the capacitor discharges
through Rleak (Rleak is necessary to ensure a unique DC
solution). The values of C and Rleak can be adjusted to change
the time constant
* The capacitor value has no effect on the memductance, but has
an effect in the internal model: a larger capacitor will
produce lower voltages at vc.
Internal Topology
+++++++++++++++++
The internal implementation uses a gyrator and adds one internal
node: ``vc``. The voltage at ``vc`` is equal to ``(gyr/C) * phi``,
where ``gyr`` is a global variable that can be changed with the
``.options`` keyword::
--> Iin
0 o---------+
|
+ /|\ i = w(phi) * Vin
Vin ( | )
- \V/ phi = (C/gyr) * vc
|
1 o---------+
Term: vc
+ +----------------+--------+---------,
| | | |
/^\ ----- / /^\
vc ( | ) gyr Vin ----- C \ Rleak ( | ) phi0 * gyr / C / Rleak
\|/ | / \|/
| | | |
- +----------------+--------+---------'
|
--- tref
-
"""
# Device category
category = "Basic components"
# devtype is the 'model' name
devType = "memd"
# Number of terminals. If numTerms is set here, the parser knows
# in advance how many external terminals to expect. By default the
# parser makes no assumptions and allows any number of connections
#
numTerms = 2
isNonlinear = True
paramDict = dict(
w = ('Memductance function W(phi)', 'Siemens', str, 'abs(1e-3*phi)'),
phi0 = ('Initial flux', 'Vs', float, 0.),
c = ('Auxiliary capacitance', 'F', float, 10e-6),
rleak = ('Leackage resistance', 'Ohms', float, 1e9)
)
def __init__(self, instanceName):
# Here the Element constructor must be called. Do not connect
# internal nodes here.
cir.Element.__init__(self, instanceName)
def process_params(self):
# Called once the external terminals have been connected and
# the non-default parameters have been set. Make sanity checks
# here. Internal terminals/devices should also be defined
# here. Raise cir.CircuitError if a fatal error is found.
# remove any existing internal connections
self.clean_internal_terms()
# Test parameters
if not self.rleak:
raise cir.CircuitError(self.instanceName
+ ': leackage resistance can not be zero')
if not self.c:
raise cir.CircuitError(self.instanceName
+ ': capacitance can not be zero')
# test m expression to make sure it is valid
try:
phi = .5
result = eval(self.w)
except Exception as e:
raise cir.CircuitError(
'{0}: Invalid expression: {1} ({2})'.format(self.instanceName,
self.w, e))
try:
abs(result)
except TypeError:
raise cir.CircuitError(
'{0}: Invalid expression: {1} (result not a number)'.format(
self.instanceName, self.w))
# Connect internal terminal
tvc = self.add_internal_term('vc', 'V')
tref = self.add_reference_term()
# Set up source if phi0 is given
if self.phi0 != 0.:
self.isDCSource = True
self.sourceOutput = (tref, tvc)
self._i0 = self.phi0 * glVar.gyr / self.c / self.rleak
# Setup gyrator
# Access to global variables is through the glVar
self.linearVCCS = [((0,1), (tref, tvc), glVar.gyr),
((tvc, tref), (tvc, tref), 1./self.rleak)]
self.linearVCQS = [((tvc, tref), (tvc, tref), self.c)]
self.controlPorts = [(0,1), (tvc, tref)]
self.csOutPorts = [(0,1)]
self.qsOutPorts = []
def eval_cqs(self, vPort, getOP = False):
"""
Returns memductor current given input voltage. Charge vector
is empty
vPort[0] = memductor voltage
vPort[1] = internal cap voltage
iout[0] = memductor current
If getOP == True, a dictionary with OP variables is returned
"""
phi = self.c * vPort[1] / glVar.gyr
W = eval(self.w)
iout = np.array([W * vPort[0]])
if getOP:
return {'v': vPort[0],
'i': iout[0],
'phi': phi,
'W': W}
else:
return (iout, np.array([]))
# Use automatic differentiation for eval and deriv function
eval_and_deriv = ad.eval_and_deriv
eval = ad.eval
def get_OP(self, vPort):
"""
Calculates operating point information
vPort[0] = memductor voltage
vPort[1] = internal cap voltage
"""
return self.eval_cqs(vPort, getOP=True)
def get_DCsource(self):
return self._i0
|
gpl-3.0
| 7,188,436,473,438,162,000
| 32.242718
| 79
| 0.475905
| false
| 4.014068
| false
| false
| false
|
Tomographer/tomographer
|
test/pytest_t_tools_densedm.py
|
1
|
1933
|
#!/usr/bin/env python
from __future__ import print_function
import re
import numpy as np
import numpy.testing as npt
import logging
logging.basicConfig(level=logging.DEBUG)
import unittest
# import the module
import tomographer.tools.densedm
import tomographer.tools.densedm.mle
import tomographer
import tomographer.densedm
class SimulateMeasurements(unittest.TestCase):
def test_sim(self):
rho_sim = np.array([[0.9, 0], [0, 0.1]])
Mk = tomographer.tools.densedm.PauliMeasEffectsQubit
n = 1000
d = tomographer.tools.densedm.simulate_measurements(rho_sim, Mk, n)
self.assertEqual(d.Nm[0] + d.Nm[1], n)
self.assertEqual(d.Nm[2] + d.Nm[3], n)
self.assertEqual(d.Nm[4] + d.Nm[5], n)
# Hoeffding's inequality: Prob( |N(+) - p*n| > eps*n ) < 2*exp(-2*eps^2*n)
#
# --> so the probability to deviate by more than 0.1 fraction is bounded by
# 2*exp(-2 * 0.1**2 * n) ~ 4e-9 (for n=1000)
self.assertLessEqual( (d.Nm[0] - 0.5*n) , 0.1*n )
self.assertLessEqual( (d.Nm[2] - 0.5*n) , 0.1*n )
self.assertLessEqual( (d.Nm[4] - 0.9*n) , 0.1*n )
class Mle(unittest.TestCase):
def test_mle(self):
Emn = sum(tomographer.tools.densedm.PauliMeasEffectsQubit, [])
Nm = np.array([250, 250, 250, 250, 500, 0]) # really extreme example
llh = tomographer.densedm.IndepMeasLLH(tomographer.densedm.DMTypes(2))
llh.setMeas(Emn, Nm)
(rho_MLE, d) = tomographer.tools.densedm.mle.find_mle(llh)
# we know the exact solution, rho_MLE = |0><0|
npt.assert_array_almost_equal(rho_MLE,
np.array([[1, 0], [0, 0]]))
# normally, this is not needed as we are being run via pyruntest.py, but it might be
# useful if we want to run individually picked tests
if __name__ == '__main__':
unittest.main()
|
mit
| 5,261,438,763,180,251,000
| 27.426471
| 84
| 0.606829
| false
| 2.830161
| true
| false
| false
|
unball/strategy
|
simple_strategy/potential_fields.py
|
1
|
3902
|
#!/usr/bin/env python
import numpy as np
#Convert vector from cartesian to polar coordinate
#@vector = [x, y]
def cart2polar(vector):
x = vector[0]
y = vector[1]
r = np.sqrt(x*x + y*y)
th = np.arctan2(y, x)
return np.array([r, th])
#Convert vector from polar to cartesian coordinate
#@vector = [r, th]
def polar2cart(vector):
r = vector[0]
th = vector[1]
x = r*np.cos(th)
y = r*np.sin(th)
return np.array([x,y])
class AttractivePotentialField:
"""Radial attractive potential field
@origin Point that starts the field - Cartesian coordinates
@magnitude Radius of field """
def __init__(self, origin, magnitude, min_magnitude=1):
self.origin = origin
self.magnitude = magnitude
self.min_magnitude = min_magnitude
def calculate_force(self, position):
return self.origin
#difference = difference*np.array([self.magnitude, 1])
#if(difference[0] < self.min_magnitude):
# difference[0] = self.min_magnitude
class RepulsivePotentialField:
"""Radial repulsive potential field
@origin Point that starts the field - Cartesian coordinates
@range_field Distance from origin that fields act
@magnitude_weight Weight that fields act"""
def __init__(self, origin, range_field, magnitude_weight):
self.origin = origin
self.range_field = range_field
self.magnitude_weight = magnitude_weight
def calculate_force(self, position):
difference = cart2polar(position - self.origin)
if(difference[0] < self.range_field):
difference[0] = (self.range_field - difference[0])/(self.range_field/self.magnitude_weight)
else:
difference = np.array([0,0])
return polar2cart(difference)
class TangencialPotentialField:
"""Tangencial potential field
@origin Point that starts the field - Cartesian coordinates
"""
def __init__(self, origin, magnitude):
self.origin = origin
self.magnitude = magnitude
def calculate_force(self, position):
difference = cart2polar(self.origin - position)
difference[0] = self.magnitude
difference[1] += np.pi/2.5
print difference[1]
return polar2cart(difference)
class SelectivePotentialField:
"""Selective Potential field
set a combination of fields thats allows to kick the ball inside
of a conic region
@origin Point that starts the field - Cartesian coordinates
@direction Vector that indicates the direction
@magnitude
x"""
def __init__(self, origin, width, range_field, direction, goal,
mag_attractive_field, mag_tangencial_field):
self.origin = origin
self.width = width
self.range_field = range_field
self.direction = direction
self.mag_attractive_field = mag_attractive_field
self.mag_tangencial_field = mag_tangencial_field
self.goal = goal
def calculate_force(self, position):
angle = cart2polar(self.direction)[1]
difference = position - self.origin
force = np.array([0, 0])
weight = 1.0
if((np.fabs(angle - cart2polar(difference)[1]) <= weight*self.width) and (cart2polar(difference)[0] <= 0.4)):
attractive_field = AttractivePotentialField(self.goal, self.mag_attractive_field)
force = attractive_field.calculate_force(position)
print 'ME SEGURA TO INDO'
else:
tangencial_field = TangencialPotentialField(self.origin, self.mag_tangencial_field)
force = tangencial_field.calculate_force(position)
print 'RODA A ROLETA'
return force
class ConstantPotentialField:
def __init__(self, field_force):
self.field_force = field_force
def calculate_force(self):
return self.field_force
|
mit
| -7,559,067,572,238,686,000
| 32.350427
| 117
| 0.644541
| false
| 3.806829
| false
| false
| false
|
cmpitg/blutkit
|
blutkit/gui/keycombination.py
|
1
|
8206
|
#
# Copyright 2013 © Nguyễn Hà Dương (cmpitgATgmailDOTcom)
#
# This file is part of Blutkit.
#
# Blutkit is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Blutkit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Blutkit. If not, see <http://www.gnu.org/licenses/>.
#
import builtins
from blutkit.gui.keyconstants import *
from blutkit.gui.keyutils import *
class SingleKeyCombination:
"""
This class takes care of a single key/mouse combination and should NOT
be used directly. Every key/mouse combination should be constructed
using KeyCombination class.
"""
def __init__(self, key=None, mods=set(), mouse=[]):
self.key = key
self.mods = mods
self.mouse = mouse
def __key(self):
"""
The __key function serves the purpose of equality test and
hash.
"""
return (self.key,
frozenset(sorted(self.mods)), # Do we need to sort?
frozenset(self.mouse))
def __eq__(x, y):
"""Equality test."""
return x.__key() == y.__key()
def __hash__(self):
"""Quick and easy hash function."""
return hash(self.__key())
def fromString(keycombstr):
"""
Constructor, new SingleKeyCombination from string. The string format
belongs to one of the following examples:
SingleKeyCombination.fromString("Mouse_Left")
SingleKeyCombination.fromString("Mouse_Left-Mouse_Right")
SingleKeyCombination.fromString("Control-Shift-c")
SingleKeyCombination.fromString("Alt-Control-Shift-h")
This class serves as a helper class of KeyCombination.
"""
if keycombstr.strip() == "":
return SingleKeyCombination()
key = None
mods = set()
mouse = []
keycombstr = keycombstr.lower()
# Special case: bind the "-" button
if keycombstr == "-":
return SingleKeyCombination(toQtKey(keycombstr),
mods,
mouse)
for part in keycombstr.split('-'):
# Special case: if "-" is a part of the combination, then
# part is ""
if part == "":
currentKey = "-"
else:
currentKey = part
if isKeymod(currentKey):
mods.add(toKeymod(currentKey))
elif isMouseButton(currentKey):
mouse.append(toMouseButton(currentKey))
else:
key = toQtKey(currentKey)
return SingleKeyCombination(key, mods, mouse)
def keyToString(self):
"""Return key as string."""
if key:
return toKeyStr(key)
else:
return ""
def modsToString(self):
"""Return key mods as string."""
res = [capitalizeStr(toKeyStr(mod)) for mod in self.mods]
return "-".join(sorted(res))
def mouseToString(self):
"""Return mouse chords as string."""
res = [capitalizeStr(MouseButtonToStr[button])
for button in self.mouse]
return "-".join(res)
def toString(self):
"""Return the string representation of a SingleKeyCombination."""
formatStr = self.modsToString()
if self.key:
# Key event
formatStr += "-" + capitalizeStr(toKeyStr(self.key))
else:
# Mouse chord event
formatStr += "-" + capitalizeStr(self.mouseToString())
# Strip the redundant "-" if there's no modifier
if self.mods == set():
formatStr = formatStr[1:]
return formatStr.strip()
def __str__(self):
"""Return the string representation of a SingleKeyCombination."""
return self.toString()
def __repr__(self):
"""Return the repr form of a SingleKeyCombination."""
return self.toString()
class KeyCombination(builtins.tuple):
"""
This class takes care of complete key/mouse combination, including modal
ones. This class has no constructor since it's a subclass of
builtins.tuple. To construct programmatically, use its static methods
fromString, fromSingleKeyCombination, or fromKeys. Best practice:
use kbd and pass a string.
The string argument is case-insensitive.
E.g.
kbd('Control-S')
kbd('Control-C Control-Z')
kbd('Mouse_Left-Mouse_Right')
"""
def __init__(self, *args):
super(KeyCombination, self).__init__(args)
def isModal(keystr):
"""
Determine whether or not the string representing a key combination
is modal.
"""
return len(keystr) >= 2 and keystr.find(" ") != -1
def fromString(keystr):
"""
Construct a new Combination from string. The string format belongs
to one of the following examples:
KeyCombination.fromString("-")
KeyCombination.fromString("Control-S")
KeyCombination.fromString("Control-&")
KeyCombination.fromString("Control-X Control-Q")
KeyCombination.fromString("Mouse_Left-Mouse_Right")
Modal key combination is supported. Every key combination should
be constructed using this class, not SingleKeyCombination.
"""
if KeyCombination.isModal(keystr):
keycombList = map(lambda k: SingleKeyCombination.fromString(k),
keystr.split(" "))
return KeyCombination(keycombList)
else:
return KeyCombination((SingleKeyCombination.fromString(keystr),))
def fromKeys(key, mods, mouse):
return KeyCombination((SingleKeyCombination(key, mods, mouse),))
def fromSingleKeyCombination(keycomb):
return KeyCombination((keycomb,))
def sharePrefix(self, keyCombination):
"""Determine whether 2 key combinations share prefix."""
minLength = min(len(self), len(keyCombination))
for i in range(minLength):
if self[i] != keyCombination[i]:
return False
return True
def toString(self):
"""
Return the string representation of a key combination, which can be
used to reconstruct the key combination by using
KeyCombination.fromString.
"""
return "[" + " ".join(map(lambda x: str(x), self)) + "]"
def __add__(self, keys):
"""
Add a key combination to the current combination.
E.g.
kbd("Control-] Control-Q") + kbd("Control-W")
"""
if type(keys) != KeyCombination:
keys = KeyCombination.fromSingleKeyCombination(keys)
if self == kbd(""):
return keys
return KeyCombination(list(self) + list(keys))
def __str__(self):
"""Aliased to self.toString()"""
return self.toString()
def __repr__(self):
"""
Return the string representation of a key combination. See doc of
the __str__ method for more info.
"""
return self.__str__()
def kbd(keystr):
"""
Construct a KeyCombination from string by calling
KeyCombination.fromString. The string argument is case-insensitive.
E.g.
kbd('Control-S')
kbd('Control-C Control-Z')
kbd('Mouse_Left-Mouse_Right')
"""
return KeyCombination.fromString(keystr)
# print(kbd(','))
# print(kbd('Control-S'))
# print(kbd('Control-C Control-Z'))
# print(kbd('Mouse_Left-Mouse_Right'))
# print(kbd('Control-C Shift-Mouse_Left'))
# print(kbd('Control-C Control-Z').sharePrefix(kbd('Control-C'))) # True
# print(kbd('Control-C Control-Z').sharePrefix(kbd('Control-A'))) # False
# print(kbd('Control-C Control-Z').sharePrefix(kbd(''))) # False
|
gpl-3.0
| -4,797,116,071,737,052,000
| 30.417625
| 77
| 0.602073
| false
| 4.147699
| false
| false
| false
|
mazi-project/back-end
|
lib/sht11.py
|
1
|
1426
|
#!/usr/bin/pytho
import sys
import warnings
def help_message():
print ' '
print 'sht11'
print ' --help Displays this usage message '
print ' --detect Displays if the sensor is connected on Raspberry Pi'
print ' -h , --humidity Displays the Humidity '
print ' -t , --temperature Displays the Temperature'
def sht11( sensor ):
try:
warnings.filterwarnings("ignore")
from sht1x.Sht1x import Sht1x as SHT1x
dataPin = 5
clkPin = 3
sht1x = SHT1x(dataPin, clkPin, SHT1x.GPIO_BOARD)
if (sensor == "humidity"):
mesurement = sht1x.read_humidity()
elif (sensor == "temperature"):
mesurement = sht1x.read_temperature_C()
return mesurement
except:
return "false"
def detect():
var = sht11("temperature")
if (type(var) == int or type(var) == float):
print 'sht11'
if __name__ == '__main__':
args = len(sys.argv)
while ( args > 1):
args -= 1
if(sys.argv[args] == "--help"):
help_message()
elif(sys.argv[args] == "--detect"):
detect()
elif(sys.argv[args] == "-t" or sys.argv[args] == "--temperature"):
temperature = sht11("temperature")
print ("temperature %.1f" % temperature)
elif(sys.argv[args] == "-h" or sys.argv[args] == "--humidity"):
humidity = sht11("humidity")
print ("humidity %.1f" % humidity)
|
mit
| -2,510,886,870,512,368,600
| 26.960784
| 97
| 0.570827
| false
| 3.347418
| false
| false
| false
|
dvu4/Data-Wrangling-with-MongoDB
|
Lesson_4_Working_with_MongoDB/23-Using_$in_Operator/find_cars.py
|
1
|
1106
|
#!/usr/bin/env python
""" Your task is to write a query that will return all cars manufactured by "Ford Motor Company"
that are assembled in Germany, United Kingdom, or Japan.
Please modify only 'in_query' function, as only that will be taken into account.
Your code will be run against a MongoDB instance that we have provided.
If you want to run this code locally on your machine,
you have to install MongoDB, download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials.
"""
def get_db():
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client.examples
return db
def in_query():
# Write the query
query = {"manufacturer" : "Ford Motor Company", "assembly" :{"$in" : ["Germany", "United Kingdom", "Japan"]}}
return query
if __name__ == "__main__":
db = get_db()
query = in_query()
autos = db.autos.find(query, {"name":1, "manufacturer":1, "assembly": 1, "_id":0})
print "Found autos:", autos.count()
import pprint
for a in autos:
pprint.pprint(a)
|
agpl-3.0
| 656,383,715,339,374,500
| 30.6
| 113
| 0.679928
| false
| 3.650165
| false
| false
| false
|
TartuNLP/nazgul
|
nmtnazgul.py
|
1
|
6225
|
#!/usr/bin/python3
import sock
import translator
import sys
import html
import json
from time import time
from nltk import sent_tokenize
from constraints import getPolitenessConstraints as getCnstrs
from log import log
# IP and port for the server
MY_IP = '172.17.66.215'
MY_PORT = 12346
supportedStyles = { 'fml', 'inf', 'auto' }
styleToDomain = { 'fml': 'ep', 'inf': 'os', 'auto': 'pc' }
#supportedStyles = { "os", "un", "dg", "jr", "ep", "pc", "em", "nc" }
supportedOutLangs = { 'et', 'lv', 'en', 'ru', 'fi', 'lt', 'de' }
extraSupportedOutLangs = { 'est': 'et', 'lav': 'lv', 'eng': 'en', 'rus': 'ru', 'fin': 'fi', 'lit': 'lt', 'ger': 'de' }
defaultStyle = 'auto'
defaultOutLang = 'en'
USAGE_MSG = """\nUsage: nmtnazgul.py translation_model truecaser_model segmenter_model [output_lang [output_style]]
translation_model: path to a trained Sockeye model folder
truecaser_model: path to a trained TartuNLP truecaser model file
segmenter_model: path to a trained Google SentencePiece model file
Without the output language and any further parameters an NMT server is started; otherwise the script translates STDIN
output_lang: output language (one of the following: {0})
output_style: output style (one of the following: {1}; default: {2})
Further info: http://github.com/tartunlp/nazgul\n\n""".format(", ".join(list(supportedOutLangs)), ", ".join(list(supportedStyles)), defaultStyle)
#############################################################################################
###################################### STDIN and Server #####################################
#############################################################################################
def getConf(rawConf):
style = 'auto'
outlang = 'en'
for field in rawConf.split(','):
if field in supportedStyles:
style = field
if field in supportedOutLangs:
outlang = field
if field in extraSupportedOutLangs:
outlang = extraSupportedOutLangs[field]
return style, outlang
def parseInput(rawText):
global supportedStyles, defaultStyle, supportedOutLangs, defaultOutLang
try:
fullText = rawText['src']
rawStyle, rawOutLang = getConf(rawText['conf'])
livesubs = "|" in fullText
sentences = fullText.split("|") if livesubs else sent_tokenize(fullText)
delim = "|" if livesubs else " "
except KeyError:
sentences = rawText['sentences']
rawStyle = rawText['outStyle']
rawOutLang = rawText['outLang']
delim = False
if rawStyle not in supportedStyles:
#raise ValueError("style bad: " + rawStyle)
rawStyle = defaultStyle
if rawOutLang not in supportedOutLangs:
#raise ValueError("out lang bad: " + rawOutLang)
rawOutLang = defaultOutLang
outputLang = rawOutLang
outputStyle = styleToDomain[rawStyle]
return sentences, outputLang, outputStyle, delim
def decodeRequest(rawMessage):
struct = json.loads(rawMessage.decode('utf-8'))
segments, outputLang, outputStyle, delim = parseInput(struct)
return segments, outputLang, outputStyle, delim
def encodeResponse(translationList, delim):
translationText = delim.join(translationList)
result = json.dumps({'raw_trans': ['-'],
'raw_input': ['-'],
'final_trans': translationText})
return bytes(result, 'utf-8')
def serverTranslationFunc(rawMessage, models):
segments, outputLang, outputStyle, delim = decodeRequest(rawMessage)
translations, _, _, _ = translator.translate(models, segments, outputLang, outputStyle, getCnstrs())
return encodeResponse(translations, delim)
def startTranslationServer(models, ip, port):
log("started server")
# start listening as a socket server; apply serverTranslationFunc to incoming messages to genereate the response
sock.startServer(serverTranslationFunc, (models,), port = port, host = ip)
def translateStdinInBatches(models, outputLang, outputStyle):
"""Read lines from STDIN and treat each as a segment to translate;
translate them and print out tab-separated scores (decoder log-prob)
and the translation outputs"""
#read STDIN as a list of segments
lines = [line.strip() for line in sys.stdin]
#translate segments and get translations and scores
translations, scores, _, _ = translator.translate(models, lines, outputLang, outputStyle, getCnstrs())
#print each score and translation, separated with a tab
for translation, score in zip(translations, scores):
print("{0}\t{1}".format(score, translation))
#############################################################################################
################################## Cmdline and main block ###################################
#############################################################################################
def readCmdlineModels():
"""Read translation, truecaser and segmenter model paths from cmdline;
show usage info if failed"""
#This is a quick hack for reading cmdline args, should use argparse instead
try:
translationModelPath = sys.argv[1]
truecaserModelPath = sys.argv[2]
segmenterModelPath = sys.argv[3]
except IndexError:
sys.stderr.write(USAGE_MSG)
sys.exit(-1)
return translationModelPath, truecaserModelPath, segmenterModelPath
def readLangAndStyle():
"""Read output language and style off cmdline.
Language is optional -- if not given, a server is started.
Style is optional -- if not given, default (auto) is used."""
# EAFP
try:
outputLanguage = sys.argv[4]
try:
outputStyle = sys.argv[5]
except IndexError:
outputStyle = defaultStyle
except IndexError:
outputLanguage = None
outputStyle = None
return outputLanguage, outputStyle
if __name__ == "__main__":
# read translation and preprocessing model paths off cmdline
modelPaths = readCmdlineModels()
# read output language and style off cmdline -- both are optional and will be "None" if not given
olang, ostyle = readLangAndStyle()
# load translation and preprocessing models using paths
models = translator.loadModels(*modelPaths)
# if language is given, STDIN is translated; otherwise a server is started
if olang:
translateStdinInBatches(models, olang, ostyle)
else:
# when argparse is finally used, set MY_IP and MY_PORT to cmdline arguments
startTranslationServer(models, MY_IP, MY_PORT)
|
mit
| -6,915,478,788,811,677,000
| 29.970149
| 145
| 0.667952
| false
| 3.670401
| false
| false
| false
|
Flamacue/pretix
|
src/tests/multidomain/test_urlreverse.py
|
2
|
3969
|
import pytest
from django.conf import settings
from django.test import override_settings
from django.utils.timezone import now
from tests import assert_num_queries
from pretix.base.models import Event, Organizer
from pretix.multidomain.models import KnownDomain
from pretix.multidomain.urlreverse import build_absolute_uri, eventreverse
@pytest.fixture
def env():
o = Organizer.objects.create(name='MRMCD', slug='mrmcd')
event = Event.objects.create(
organizer=o, name='MRMCD2015', slug='2015',
date_from=now()
)
settings.SITE_URL = 'http://example.com'
event.get_cache().clear()
return o, event
@pytest.mark.django_db
def test_event_main_domain_front_page(env):
assert eventreverse(env[1], 'presale:event.index') == '/mrmcd/2015/'
assert eventreverse(env[0], 'presale:organizer.index') == '/mrmcd/'
@pytest.mark.django_db
def test_event_custom_domain_kwargs(env):
KnownDomain.objects.create(domainname='foobar', organizer=env[0])
assert eventreverse(env[1], 'presale:event.checkout', {'step': 'payment'}) == 'http://foobar/2015/checkout/payment/'
@pytest.mark.django_db
def test_event_main_domain_kwargs(env):
assert eventreverse(env[1], 'presale:event.checkout', {'step': 'payment'}) == '/mrmcd/2015/checkout/payment/'
@pytest.mark.django_db
def test_event_custom_domain_front_page(env):
KnownDomain.objects.create(domainname='foobar', organizer=env[0])
assert eventreverse(env[1], 'presale:event.index') == 'http://foobar/2015/'
assert eventreverse(env[0], 'presale:organizer.index') == 'http://foobar/'
@pytest.mark.django_db
def test_event_custom_domain_keep_port(env):
settings.SITE_URL = 'http://example.com:8081'
KnownDomain.objects.create(domainname='foobar', organizer=env[0])
assert eventreverse(env[1], 'presale:event.index') == 'http://foobar:8081/2015/'
@pytest.mark.django_db
def test_event_custom_domain_keep_scheme(env):
settings.SITE_URL = 'https://example.com'
KnownDomain.objects.create(domainname='foobar', organizer=env[0])
assert eventreverse(env[1], 'presale:event.index') == 'https://foobar/2015/'
@pytest.mark.django_db
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
})
def test_event_main_domain_cache(env):
env[0].get_cache().clear()
with assert_num_queries(1):
eventreverse(env[1], 'presale:event.index')
with assert_num_queries(0):
eventreverse(env[1], 'presale:event.index')
@pytest.mark.django_db
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
})
def test_event_custom_domain_cache(env):
KnownDomain.objects.create(domainname='foobar', organizer=env[0])
env[0].get_cache().clear()
with assert_num_queries(1):
eventreverse(env[1], 'presale:event.index')
with assert_num_queries(0):
eventreverse(env[1], 'presale:event.index')
@pytest.mark.django_db
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
})
def test_event_custom_domain_cache_clear(env):
kd = KnownDomain.objects.create(domainname='foobar', organizer=env[0])
env[0].get_cache().clear()
with assert_num_queries(1):
eventreverse(env[1], 'presale:event.index')
kd.delete()
with assert_num_queries(1):
eventreverse(env[1], 'presale:event.index')
@pytest.mark.django_db
def test_event_main_domain_absolute(env):
assert build_absolute_uri(env[1], 'presale:event.index') == 'http://example.com/mrmcd/2015/'
@pytest.mark.django_db
def test_event_custom_domain_absolute(env):
KnownDomain.objects.create(domainname='foobar', organizer=env[0])
assert build_absolute_uri(env[1], 'presale:event.index') == 'http://foobar/2015/'
|
apache-2.0
| 7,996,995,170,991,871,000
| 32.635593
| 120
| 0.693878
| false
| 3.16255
| true
| false
| false
|
xaedes/canopen_301_402
|
src/canopen_301_402/datatypes.py
|
1
|
7126
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import struct
from canopen_301_402.utils import collect_all_leaf_subclasses
from canopen_301_402.utils import parseIntAutoBase
from canopen_301_402.constants import CanOpenBasicDatatypes
class CanDatatype(object):
def __init__(self):
'''
@summary: abstract base class for all can datatypes
@raises: NotImplemented
'''
raise NotImplemented
def identifier(self):
'''
@summary: return standard data type identifier
@param self:
@result: uint16 containing data type identifier
@see http://atlas.web.cern.ch/Atlas/GROUPS/DAQTRIG/DCS/LMB/PROFILE/cano-eds.htm
'''
raise NotImplemented
def number_of_bits(self):
'''
@summary: returns number of bits for one value encoded with this datatype
@param self:
@result: number of bits
'''
raise NotImplemented
def decode(self, data):
'''
@summary: returns value of decoded data
@param self:
@param data: byte array
@result: value
'''
raise NotImplemented
def encode(self, value):
'''
@summary: returns encoded value
@param self:
@param value: value to be encoded
@result: data byte array
'''
raise NotImplemented
def decode_string(self, string):
'''
@summary: returns value of human readable representation
@param self:
@param string: human readable representation of value as string
@result: value
'''
raise NotImplemented
def encode_string(self, value):
'''
@summary: returns human readable representation
@param self:
@param value: value to be encoded
@result: human readable representation of value as string
'''
raise NotImplemented
class CanDatatypeStruct(CanDatatype):
def __init__(self, identifier, struct_data_format):
'''
@summary: Can data type base class using python 'struct' module for data coding
@param identifier: specifies can datatype identifier
@param struct_data_format: specifies data format for struct.pack and struct.unpack
example data_format "<i"
'<' little endian
'i' 32 bit signed integer
'''
self._identifier = identifier
self._data_format = struct_data_format
self._number_of_bits = struct.calcsize(self.data_format)*8
def identifier(self):
return self._identifier
def number_of_bits(self):
return self._number_of_bits
@property
def data_format(self):
# '<' : little endian
return '<' + self._data_format
def decode(self, data):
result = struct.unpack_from(self.data_format, data)
# unpack value of length-1 tuples
if len(result) == 1:
value, = result
return value
def encode(self, value):
return bytearray(struct.pack(self.data_format, value))
def decode_string(self, string):
# default implementation tries to interprete as integer number
return parseIntAutoBase(string)
def encode_string(self, value):
return str(value)
class CanDatatypeFloat32(CanDatatypeStruct):
def __init__(self):
super(CanDatatypeFloat32,self).__init__(CanOpenBasicDatatypes.float32,"f")
def decode_string(self, string):
num_value = float(string)
return num_value
class CanDatatypeBoolean(CanDatatypeStruct):
def __init__(self):
super(CanDatatypeBoolean,self).__init__(CanOpenBasicDatatypes.boolean,"?")
def decode_string(self, string):
# look for true/false keywords
if str.lower(string).strip() == "true":
return True
elif str.lower(string).strip() == "false":
return False
# try to interprete as integer number
num_value = parseIntAutoBase(string)
if num_value is None: # interpretation failed
return None
else:
return num_value != 0 # c interpretation of bool
class CanDatatypePDOMapping(CanDatatype):
def __init__(self, node, num_mapped=0, mappings=list()):
'''
@summary: Can data type representing a specific pdo mapping
@param identifier: specifies can datatype identifier
@param num_mapped: number of currently mapped objects
@param mappings: list of currently mapped object identifiers
max_num_mappings will be constant after initialization
num_mapped & max_num_mappings can still be updated (to remap the pdo)
'''
self.node = node
self.canopen = node.canopen
self._identifier = identifier
self._num_mapped = num_mapped
self.mappings = [0]*64 # max 64 mappings 301_v04020005_cor3.pdf pg. 93
for k,mapping in enumerate(mappings):
self.mappings[k] = mapping
def identifier(self):
return self._identifier
def number_of_bits(self):
return self._number_of_bits
@property
def num_mapped(self):
return self._num_mapped
@num_mapped.setter
def num_mapped(self,v):
if 0 <= v <= self.max_num_mappings:
self._num_mapped = v
else:
raise ValueError()
@property
def data_format(self):
result = ""
for obj_id in self.mappings[:self.num_mapped]:
datatype = self.node.obj_dict.objects[obj_id].datatype
if not hasattr(datatype,"_data_format"):
raise RuntimeError()
result += datatype._data_format
return "<" + result
def decode(self, data):
obj_values = struct.unpack_from(self.data_format, data)
return obj_values
def encode(self, obj_values):
return bytearray(struct.pack(self.data_format, obj_values))
def decode_string(self, string):
raise RuntimeError()
def encode_string(self, value):
raise RuntimeError()
class CanDatatypes(object):
def __init__(self):
# generate basic datatypes
self.all_datatypes = list()
self.all_datatypes.append(CanDatatypeStruct(CanOpenBasicDatatypes.int8,"b"))
self.all_datatypes.append(CanDatatypeStruct(CanOpenBasicDatatypes.int16,"h"))
self.all_datatypes.append(CanDatatypeStruct(CanOpenBasicDatatypes.int32,"i"))
self.all_datatypes.append(CanDatatypeStruct(CanOpenBasicDatatypes.uint8,"B"))
self.all_datatypes.append(CanDatatypeStruct(CanOpenBasicDatatypes.uint16,"H"))
self.all_datatypes.append(CanDatatypeStruct(CanOpenBasicDatatypes.uint32,"I"))
self.all_datatypes.append(CanDatatypeFloat32())
self.all_datatypes.append(CanDatatypeBoolean())
# add datatypes to dictionary mapping from its identifiers
self.datatypes = dict()
for datatype in self.all_datatypes:
self.datatypes[datatype.identifier().value] = datatype
|
mit
| -3,884,830,007,540,805,600
| 30.396476
| 90
| 0.621807
| false
| 4.254328
| false
| false
| false
|
ccxt/ccxt
|
examples/py/kraken-fetch-my-trades-pagination.py
|
1
|
1540
|
# -*- coding: utf-8 -*-
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
exchange = ccxt.kraken({
'apiKey': 'YOUR_API_KEY',
'secret': 'YOUR_API_SECRET',
'enableRateLimit': True, # required by the Manual https://github.com/ccxt/ccxt/wiki/Manual#rate-limit
})
exchange.load_markets()
# exchange.verbose = True # uncomment for verbose debug output
exchange.rateLimit = 10000 # set a higher value if you get rate-limiting errors
all_trades = []
offset = 0
while True:
trades = exchange.fetch_my_trades(symbol=None, since=None, limit=None, params={'ofs': offset})
print('-----------------------------------------------------------------')
print(exchange.iso8601(exchange.milliseconds()), 'Fetched', len(trades), 'trades')
if len(trades) < 1:
break
else:
first = exchange.safe_value(trades, 0)
last = exchange.safe_value(trades, len(trades) - 1)
print('From:', first['datetime'])
print('To:', last['datetime'])
all_trades = trades + all_trades
offset += len(trades)
print(len(all_trades), 'trades fetched in total')
print('-----------------------------------------------------------------')
print(len(all_trades), 'trades fetched')
first = exchange.safe_value(all_trades, 0)
if first:
last = exchange.safe_value(all_trades, len(all_trades) - 1)
print('First:', first['datetime'])
print('Last:', last['datetime'])
|
mit
| -830,446,503,497,661,400
| 31.765957
| 106
| 0.596104
| false
| 3.39207
| false
| false
| false
|
MadMac/PyTetris
|
src/main/main.py
|
1
|
1650
|
import pygame, sys, os, random
from classes import *
from pygame.locals import *
blocksFile = "blocks.txt"
thisBlock = ""
allBlocks = []
boardWidth = 15
boardHeight = 20
gameOver = False
# Make all the blocks which are in file "blocks.txt"
file = open(blocksFile, "r")
while file:
line = file.readline()
if line.find("END") >= 0:
break
if line.find("/") >= 0:
allBlocks.append(blockStyle(thisBlock))
thisBlock = ""
continue
thisBlock = thisBlock + line
# Make board
gameBoard = board(boardWidth, boardHeight)
# All pygame init
pygame.init()
gameWindow = pygame.display.set_mode((640, 480))
pygame.display.set_caption('PyTetris')
clock = pygame.time.Clock()
playerBlock = block(boardWidth, boardHeight, allBlocks[random.randrange(len(allBlocks))].getStyle(), gameBoard)
pygame.time.Clock()
pygame.time.set_timer(pygame.USEREVENT + 1, 150)
pygame.time.set_timer(pygame.USEREVENT + 2, 1000)
#Game loop
while gameOver == False:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameOver = True
elif event.type == KEYDOWN and event.key == K_ESCAPE:
gameOver = True
elif event.type == pygame.USEREVENT + 1:
playerBlock.handlePlayerInput()
elif event.type == pygame.USEREVENT + 2:
playerBlock.updatePlayer()
if playerBlock.isDown == True:
playerBlock.changeStyle(allBlocks[random.randrange(len(allBlocks))].getStyle())
gameWindow.fill((0,0,0))
gameBoard.drawBoard()
gameBoard.update()
playerBlock.drawBlock()
pygame.display.flip()
pygame.quit()
|
mit
| -2,331,694,246,887,766,000
| 23.626866
| 111
| 0.664242
| false
| 3.481013
| false
| false
| false
|
openslack/openslack-web
|
openslack/apps/company/views.py
|
1
|
1072
|
# encoding:utf-8
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from apps.company.models import Company, Comment
from django.shortcuts import get_object_or_404
from utils.page import paginator_objects
def index(request):
return render_to_response('index.html', {}, RequestContext(request))
def company_list(request, template, page=1):
"""
公司分类列表
"""
page = int(page)
companies = Company.objects.filter(status=True)
print companies
page_range, companies = paginator_objects(page, companies)
dt_view = {
"companies": companies,
"page_range": page_range,
"page": page
}
return render_to_response(template, dt_view, context_instance=RequestContext(request))
# @silk_profile(name='Get Detail')
def company_detail(request, template, pk):
company = get_object_or_404(Company, pk=pk, status=True)
dt_view = {
"company": company,
}
return render_to_response(template, dt_view, context_instance=RequestContext(request))
|
apache-2.0
| -7,157,730,011,399,489,000
| 29.285714
| 90
| 0.701887
| false
| 3.630137
| false
| false
| false
|
localhuman/neo-python
|
examples/json-rpc-api-server.py
|
1
|
2783
|
#!/usr/bin/env python3
"""
This example provides a JSON-RPC API to query blockchain data, implementing `neo.api.JSONRPC.JsonRpcApi`
"""
import argparse
import os
from logzero import logger
from twisted.internet import reactor, task
from neo import __version__
from neo.Core.Blockchain import Blockchain
from neo.Implementations.Blockchains.LevelDB.LevelDBBlockchain import LevelDBBlockchain
from neo.Implementations.Notifications.LevelDB.NotificationDB import NotificationDB
from neo.api.JSONRPC.JsonRpcApi import JsonRpcApi
from neo.Network.NodeLeader import NodeLeader
from neo.Settings import settings, DIR_PROJECT_ROOT
from neo.UserPreferences import preferences
# Logfile settings & setup
LOGFILE_FN = os.path.join(DIR_PROJECT_ROOT, 'json-rpc.log')
LOGFILE_MAX_BYTES = 5e7 # 50 MB
LOGFILE_BACKUP_COUNT = 3 # 3 logfiles history
settings.set_logfile(LOGFILE_FN, LOGFILE_MAX_BYTES, LOGFILE_BACKUP_COUNT)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mainnet", action="store_true", default=False,
help="Use MainNet instead of the default TestNet")
parser.add_argument("-p", "--privnet", action="store_true", default=False,
help="Use PrivNet instead of the default TestNet")
parser.add_argument("-c", "--config", action="store", help="Use a specific config file")
parser.add_argument('--version', action='version',
version='neo-python v{version}'.format(version=__version__))
args = parser.parse_args()
if args.config and (args.mainnet or args.privnet):
print("Cannot use both --config and --mainnet/--privnet arguments, please use only one.")
exit(1)
if args.mainnet and args.privnet:
print("Cannot use both --mainnet and --privnet arguments")
exit(1)
# Setup depending on command line arguments. By default, the testnet settings are already loaded.
if args.config:
settings.setup(args.config)
elif args.mainnet:
settings.setup_mainnet()
elif args.privnet:
settings.setup_privnet()
# Instantiate the blockchain and subscribe to notifications
blockchain = LevelDBBlockchain(settings.LEVELDB_PATH)
Blockchain.RegisterBlockchain(blockchain)
dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks)
dbloop.start(.1)
settings.set_log_smart_contract_events(False)
ndb = NotificationDB.instance()
ndb.start()
# Run
reactor.suggestThreadPoolSize(15)
NodeLeader.Instance().Start()
host = "0.0.0.0"
port = settings.RPC_PORT
logger.info("Starting json-rpc api server on http://%s:%s" % (host, port))
api_server = JsonRpcApi(port)
api_server.app.run(host, port)
if __name__ == "__main__":
main()
|
mit
| 3,520,455,970,852,346,400
| 34.227848
| 104
| 0.701761
| false
| 3.690981
| true
| false
| false
|
ehua7365/RibbonOperators
|
TEBD/mpstest8.py
|
1
|
5968
|
"""
mpstest8.py
A test of manipulating matrix product states with numpy.
2014-08-25
"""
import numpy as np
import matplotlib.pyplot as plt
from cmath import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
def main():
#test3()
getMPS(randomState(2,5),3).shape
def test3():
""" Test MPS conversion functions by computing fidelity between
generated MPS and orginal, with new and old bond dimensions
chi0 and chi1 varied.
"""
print("*** Started testing MPS ***")
N = 5
d = 2
# Points to plot on 3d graph
(X,Y,Z) = ([],[],[])
for chi0 in xrange(1,5):
for chi1 in xrange(1,5):
F = 0
# Run random test for 20 points and take average fidelity
for i in xrange(10):
mps0 = randomMPS(N,chi0,d) # Make random MPS
state0 = getState(mps0) # Convert to state
mps1 = getMPS(state0,chi1) # Convert back to MPS with new bond dimension
state1 = getState(mps1) # Convert back to state
F += fidelityMPS(mps0,mps1) # Compute fidelity and add to sum
# F += fidelity(state0,state1) # Uncomment this to try with vectors
X.append(chi0)
Y.append(chi1)
Z.append(F/20)
X = np.array(X)
Y = np.array(Y)
Z = np.array(Z)
# Plot the surface
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_trisurf(X, Y, Z, cmap=cm.jet, linewidth=0.2)
ax.set_xlabel('chi0')
ax.set_ylabel('chi1')
ax.set_zlabel('fidelity')
plt.show()
print("*** Finished testing MPS ***")
def fidelityMPS(A,B):
""" Fidelity of two MPS representations
f = <A|B><B|A>/(<A|A><B|B>).
"""
return innerProduct(A,B)*innerProduct(B,A)\
/innerProduct(A,A)/innerProduct(B,B)
def fidelity(a,b):
""" Fidelity of two state vectors
f = <a|b><b|a>/(<a|a><b|b>).
"""
return np.inner(np.conj(a),b)*np.inner(np.conj(b),a)\
/np.inner(np.conj(a),a)/np.inner(np.conj(b),b)
def randomMPS(N,chi,d):
""" Returns a random MPS given parameters N, chi, d."""
A = []
for i in xrange(N):
# Each real part of each value varies between -0.5 and 0.5.
A.append((np.random.rand(chi,d,chi)-.5)+1j*(np.random.rand(chi,d,chi)-.5))
return np.array(A)
def getState(A):
""" State vector of a MPS by contracting MPS."""
N = len(A) # Number of spins
chi = A[0].shape[0] # Bond dimension
d = A[0].shape[1] # d = 2 for qubits
c = A[0]
for i in xrange(1,N):
c = np.tensordot(c,A[i],axes=(-1,0))
c = np.trace(c,axis1=0,axis2=-1)
return np.reshape(c,d**N)
def getMPS(state,chi):
""" MPS of a state."""
d = 2 # Qubits have 2 states each
N = int(np.log2(len(state))) # Number of qubits
c = np.reshape(state,cShape(d,N)) # State amplitudes tensor c.
A = [] # List of N matrices of MPS, each of shape (chi,d,chi)
# Start left end with a vector of size (d,chi)
c = np.reshape(c,(d,d**(N-1))) # Reshape c
(ap,sv,c) = np.linalg.svd(c) # Apply SVD
s = np.zeros((d,chi),dtype=complex) # Construct singular value matrix shape
s[:d,:d] = np.diag(sv[:chi]) # Fill s with singular values
# Trim c or fill rest of c with zeros
newc = np.zeros((chi,d**(N-1)),dtype=complex)
newc[:min(chi,d**(N-1)),:] = c[:chi,:]
c = newc
A.append(np.dot(ap,s)) # Contract and append to A
# Sweep through the middle, creating matrix products each with
# shape (chi,d,chi)
for i in xrange(1,N-2):
c = np.reshape(c,(d*chi,d**(N-i-1)))
(ap,sv,c) = np.linalg.svd(c)
s = np.zeros((d*chi,chi),dtype=complex)
s[:min(chi,len(sv)),:min(chi,len(sv))] = np.diag(sv[:chi])
A.append(np.reshape(np.dot(ap,s),(chi,d,chi)))
newc = np.zeros((chi,d**(N-i-1)),dtype=complex)
newc[:min(chi,len(sv)),:] = c[:chi,:]
c = newc
# Finish right end with the remaining vector
c = np.reshape(c,(d*chi,d))
(ap,sv,c) = np.linalg.svd(c)
s = np.zeros((chi,d),dtype=complex)
s[:d,:d] = np.diag(sv[:chi])
A.append(np.reshape(ap[:chi,:],(chi,d,chi)))
c = np.dot(s,c)
A.append(c)
prod = A[0]
for i in xrange(1,N):
prod = np.tensordot(prod,A[i],axes=(-1,0))
print(prod-np.reshape(state,cShape(d,N)))
# Fix up ends by filling first row of correctly shaped zeros with
# end vectors such that the trace is preserved.
start = np.zeros((chi,d,chi),dtype=complex)
start[0,:,:] = A[0]
A[0] = start
finish = np.zeros((chi,d,chi),dtype=complex)
finish[:,:,0] = A[-1]
A[-1] = finish
# Return MPS as numpy array with shape (N,chi,d,chi)
return np.array(A)
def innerProduct(A,B):
""" Inner product <A|B> using transfer matrices
where A and B are MPS representations of }A> and }B>.
"""
N = len(A) # Number of qubits
chiA = A.shape[1] # bond dimension of MPS in A
chiB = B.shape[1] # bond dimension of MPS in B
d = A.shape[2] # d = 2 for qubits
# Take adjoint of |A> to get <A|
A = np.conj(A)
# Construct list of transfer matrices by contracting pairs of
# tensors from A and B.
transfer = []
for i in xrange(N):
t = np.tensordot(A[i],B[i],axes=(1,1))
t = np.transpose(t,axes=(0,2,1,3))
t = np.reshape(t,(chiA*chiB,chiA*chiB))
transfer.append(t)
# Contract the transfer matrices.
prod = transfer[0]
for i in xrange(1,len(transfer)):
prod = np.tensordot(prod,transfer[i],axes=(-1,0))
return np.trace(prod)
def randomState(d,N):
state = (np.random.rand(d**N)-.5) + (np.random.rand(d**N)-.5)*1j
state = state/np.linalg.norm(state)
return state
def cShape(d,N):
""" Returns the shape of c tensor representation.
I.e. simply just (d,d,...,d) N times.
"""
return tuple([d for i in xrange(N)])
if __name__ == "__main__":
main()
|
mit
| -5,615,611,594,896,353,000
| 31.972376
| 88
| 0.575737
| false
| 2.874759
| true
| false
| false
|
PolyCortex/pyMuse
|
pymuse/signal.py
|
1
|
1235
|
from dataclasses import dataclass
from threading import Event
from pymuse.utils.stoppablequeue import StoppableQueue
@dataclass
class SignalData():
"""
Dataclass for a signal data point. Event_marker attribute is optional
"""
time: float
values: list
event_marker: list = None
class Signal():
"""Represents the accumulated signal that is store in a queue. It tag every sample with a time"""
def __init__(self, length: int, acquisition_frequency: float):
self._shutdown_event = Event()
self._signal_queue: StoppableQueue = StoppableQueue(length, self._shutdown_event)
self._signal_period: float = (1 / acquisition_frequency)
self._data_counter: int = 0
@property
def signal_queue(self) -> StoppableQueue:
return self._signal_queue
def push(self, data_list: list):
time = self._data_counter * self._signal_period
signal_data: SignalData = SignalData(time, data_list)
self._signal_queue.put(signal_data, True, self._signal_period)
self._data_counter += 1
def pop(self, timeout=None) -> SignalData:
return self._signal_queue.get(True, timeout)
def shutdown(self):
self._shutdown_event.set()
|
mit
| 1,287,241,640,009,721,900
| 31.5
| 101
| 0.670445
| false
| 3.871473
| false
| false
| false
|
abcdef123/stem
|
stem/response/events.py
|
1
|
30484
|
import datetime
import re
import StringIO
import stem
import stem.control
import stem.descriptor.router_status_entry
import stem.response
import stem.version
from stem.util import connection, log, str_tools, tor_tools
# Matches keyword=value arguments. This can't be a simple "(.*)=(.*)" pattern
# because some positional arguments, like circuit paths, can have an equal
# sign.
KW_ARG = re.compile("^(.*) ([A-Za-z0-9_]+)=(\S*)$")
QUOTED_KW_ARG = re.compile("^(.*) ([A-Za-z0-9_]+)=\"(.*)\"$")
class Event(stem.response.ControlMessage):
"""
Base for events we receive asynchronously, as described in section 4.1 of the
`control-spec
<https://gitweb.torproject.org/torspec.git/blob/HEAD:/control-spec.txt>`_.
:var str type: event type
:var int arrived_at: unix timestamp for when the message arrived
:var list positional_args: positional arguments of the event
:var dict keyword_args: key/value arguments of the event
"""
_POSITIONAL_ARGS = () # attribute names for recognized positional arguments
_KEYWORD_ARGS = {} # map of 'keyword => attribute' for recognized attributes
_QUOTED = () # positional arguments that are quoted
_SKIP_PARSING = False # skip parsing contents into our positional_args and keyword_args
_VERSION_ADDED = stem.version.Version('0.1.1.1-alpha') # minimum version with control-spec V1 event support
def _parse_message(self, arrived_at):
if not str(self).strip():
raise stem.ProtocolError("Received a blank tor event. Events must at the very least have a type.")
self.type = str(self).split().pop(0)
self.arrived_at = arrived_at
# if we're a recognized event type then translate ourselves into that subclass
if self.type in EVENT_TYPE_TO_CLASS:
self.__class__ = EVENT_TYPE_TO_CLASS[self.type]
self.positional_args = []
self.keyword_args = {}
if not self._SKIP_PARSING:
self._parse_standard_attr()
self._parse()
def _parse_standard_attr(self):
"""
Most events are of the form...
650 *( positional_args ) *( key "=" value )
This parses this standard format, populating our **positional_args** and
**keyword_args** attributes and creating attributes if it's in our event's
**_POSITIONAL_ARGS** and **_KEYWORD_ARGS**.
"""
# Tor events contain some number of positional arguments followed by
# key/value mappings. Parsing keyword arguments from the end until we hit
# something that isn't a key/value mapping. The rest are positional.
content = str(self)
while True:
match = QUOTED_KW_ARG.match(content)
if not match:
match = KW_ARG.match(content)
if match:
content, keyword, value = match.groups()
self.keyword_args[keyword] = value
else:
break
# Setting attributes for the fields that we recognize.
self.positional_args = content.split()[1:]
positional = list(self.positional_args)
for attr_name in self._POSITIONAL_ARGS:
attr_value = None
if positional:
if attr_name in self._QUOTED:
attr_values = [positional.pop(0)]
if not attr_values[0].startswith('"'):
raise stem.ProtocolError("The %s value should be quoted, but didn't have a starting quote: %s" % (attr_name, self))
while True:
if not positional:
raise stem.ProtocolError("The %s value should be quoted, but didn't have an ending quote: %s" % (attr_name, self))
attr_values.append(positional.pop(0))
if attr_values[-1].endswith('"'): break
attr_value = " ".join(attr_values)[1:-1]
else:
attr_value = positional.pop(0)
setattr(self, attr_name, attr_value)
for controller_attr_name, attr_name in self._KEYWORD_ARGS.items():
setattr(self, attr_name, self.keyword_args.get(controller_attr_name))
# method overwritten by our subclasses for special handling that they do
def _parse(self):
pass
def _log_if_unrecognized(self, attr, attr_enum):
"""
Checks if an attribute exists in a given enumeration, logging a message if
it isn't. Attributes can either be for a string or collection of strings
:param str attr: name of the attribute to check
:param stem.util.enum.Enum enum: enumeration to check against
"""
attr_values = getattr(self, attr)
if attr_values:
if isinstance(attr_values, str):
attr_values = [attr_values]
for value in attr_values:
if not value in attr_enum:
log_id = "event.%s.unknown_%s.%s" % (self.type.lower(), attr, value)
unrecognized_msg = "%s event had an unrecognized %s (%s). Maybe a new addition to the control protocol? Full Event: '%s'" % (self.type, attr, value, self)
log.log_once(log_id, log.INFO, unrecognized_msg)
class AddrMapEvent(Event):
"""
Event that indicates a new address mapping.
:var str hostname: address being resolved
:var str destination: destionation of the resolution, this is usually an ip,
but could be a hostname if TrackHostExits is enabled or **NONE** if the
resolution failed
:var datetime expiry: expiration time of the resolution in local time
:var str error: error code if the resolution failed
:var datetime utc_expiry: expiration time of the resolution in UTC
The ADDRMAP event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
"""
_POSITIONAL_ARGS = ("hostname", "destination", "expiry")
_KEYWORD_ARGS = {
"error": "error",
"EXPIRES": "utc_expiry",
}
_QUOTED = ("expiry")
def _parse(self):
if self.destination == "<error>":
self.destination = None
if self.expiry is not None:
self.expiry = datetime.datetime.strptime(self.expiry, "%Y-%m-%d %H:%M:%S")
if self.utc_expiry is not None:
self.utc_expiry = datetime.datetime.strptime(self.utc_expiry, "%Y-%m-%d %H:%M:%S")
class AuthDirNewDescEvent(Event):
"""
Event specific to directory authorities, indicating that we just received new
descriptors. The descriptor type contained within this event is unspecified
so the descriptor contents are left unparsed.
:var stem.AuthDescriptorAction action: what is being done with the descriptor
:var str message: explanation of why we chose this action
:var str descriptor: content of the descriptor
The AUTHDIR_NEWDESCS event was introduced in tor version 0.1.1.10-alpha.
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Version('0.1.1.10-alpha')
def _parse(self):
lines = str(self).split('\n')
if len(lines) < 5:
raise stem.ProtocolError("AUTHDIR_NEWDESCS events must contain lines for at least the type, action, message, descriptor, and terminating 'OK'")
elif not lines[-1] == "OK":
raise stem.ProtocolError("AUTHDIR_NEWDESCS doesn't end with an 'OK'")
self.action = lines[1]
self.message = lines[2]
self.descriptor = '\n'.join(lines[3:-1])
class BandwidthEvent(Event):
"""
Event emitted every second with the bytes sent and received by tor.
:var long read: bytes received by tor that second
:var long written: bytes sent by tor that second
The BW event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
"""
_POSITIONAL_ARGS = ("read", "written")
def _parse(self):
if not self.read:
raise stem.ProtocolError("BW event is missing its read value")
elif not self.written:
raise stem.ProtocolError("BW event is missing its written value")
elif not self.read.isdigit() or not self.written.isdigit():
raise stem.ProtocolError("A BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
self.read = long(self.read)
self.written = long(self.written)
class BuildTimeoutSetEvent(Event):
"""
Event indicating that the timeout value for a circuit has changed. This was
first added in tor version 0.2.2.7.
:var stem.TimeoutSetType set_type: way in which the timeout is changing
:var int total_times: circuit build times tor used to determine the timeout
:var int timeout: circuit timeout value in milliseconds
:var int xm: Pareto parameter Xm in milliseconds
:var float alpha: Pareto parameter alpha
:var float quantile: CDF quantile cutoff point
:var float timeout_rate: ratio of circuits that have time out
:var int close_timeout: duration to keep measurement circuits in milliseconds
:var float close_rate: ratio of measurement circuits that are closed
The BUILDTIMEOUT_SET event was introduced in tor version 0.2.2.7-alpha.
"""
_POSITIONAL_ARGS = ("set_type",)
_KEYWORD_ARGS = {
"TOTAL_TIMES": "total_times",
"TIMEOUT_MS": "timeout",
"XM": "xm",
"ALPHA": "alpha",
"CUTOFF_QUANTILE": "quantile",
"TIMEOUT_RATE": "timeout_rate",
"CLOSE_MS": "close_timeout",
"CLOSE_RATE": "close_rate",
}
_VERSION_ADDED = stem.version.Version('0.2.2.7-alpha')
def _parse(self):
# convert our integer and float parameters
for param in ('total_times', 'timeout', 'xm', 'close_timeout'):
param_value = getattr(self, param)
if param_value is not None:
try:
setattr(self, param, int(param_value))
except ValueError:
raise stem.ProtocolError("The %s of a BUILDTIMEOUT_SET should be an integer: %s" % (param, self))
for param in ('alpha', 'quantile', 'timeout_rate', 'close_rate'):
param_value = getattr(self, param)
if param_value is not None:
try:
setattr(self, param, float(param_value))
except ValueError:
raise stem.ProtocolError("The %s of a BUILDTIMEOUT_SET should be a float: %s" % (param, self))
self._log_if_unrecognized('set_type', stem.TimeoutSetType)
class CircuitEvent(Event):
"""
Event that indicates that a circuit has changed.
The fingerprint or nickname values in our 'path' may be **None** if the
VERBOSE_NAMES feature isn't enabled. The option was first introduced in tor
version 0.1.2.2, and on by default after 0.2.2.1.
:var str id: circuit identifier
:var stem.CircStatus status: reported status for the circuit
:var tuple path: relays involved in the circuit, these are
**(fingerprint, nickname)** tuples
:var tuple build_flags: :data:`~stem.CircBuildFlag` attributes
governing how the circuit is built
:var stem.CircPurpose purpose: purpose that the circuit is intended for
:var stem.HiddenServiceState hs_state: status if this is a hidden service circuit
:var str rend_query: circuit's rendezvous-point if this is hidden service related
:var datetime created: time when the circuit was created or cannibalized
:var stem.CircClosureReason reason: reason for the circuit to be closed
:var stem.CircClosureReason remote_reason: remote side's reason for the circuit to be closed
The CIRC event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
"""
_POSITIONAL_ARGS = ("id", "status", "path")
_KEYWORD_ARGS = {
"BUILD_FLAGS": "build_flags",
"PURPOSE": "purpose",
"HS_STATE": "hs_state",
"REND_QUERY": "rend_query",
"TIME_CREATED": "created",
"REASON": "reason",
"REMOTE_REASON": "remote_reason",
}
def _parse(self):
self.path = tuple(stem.control._parse_circ_path(self.path))
if self.build_flags is not None:
self.build_flags = tuple(self.build_flags.split(','))
if self.created is not None:
try:
self.created = str_tools.parse_iso_timestamp(self.created)
except ValueError, exc:
raise stem.ProtocolError("Unable to parse create date (%s): %s" % (exc, self))
if not tor_tools.is_valid_circuit_id(self.id):
raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self._log_if_unrecognized('status', stem.CircStatus)
self._log_if_unrecognized('build_flags', stem.CircBuildFlag)
self._log_if_unrecognized('purpose', stem.CircPurpose)
self._log_if_unrecognized('hs_state', stem.HiddenServiceState)
self._log_if_unrecognized('reason', stem.CircClosureReason)
self._log_if_unrecognized('remote_reason', stem.CircClosureReason)
class CircMinorEvent(Event):
"""
Event providing information about minor changes in our circuits. This was
first added in tor version 0.2.3.11.
:var str id: circuit identifier
:var stem.CircEvent event: type of change in the circuit
:var tuple path: relays involved in the circuit, these are
**(fingerprint, nickname)** tuples
:var tuple build_flags: :data:`~stem.CircBuildFlag` attributes
governing how the circuit is built
:var stem.CircPurpose purpose: purpose that the circuit is intended for
:var stem.HiddenServiceState hs_state: status if this is a hidden service circuit
:var str rend_query: circuit's rendezvous-point if this is hidden service related
:var datetime created: time when the circuit was created or cannibalized
:var stem.CircPurpose old_purpose: prior purpose for the circuit
:var stem.HiddenServiceState old_hs_state: prior status as a hidden service circuit
The CIRC_MINOR event was introduced in tor version 0.2.3.11-alpha.
"""
_POSITIONAL_ARGS = ("id", "event", "path")
_KEYWORD_ARGS = {
"BUILD_FLAGS": "build_flags",
"PURPOSE": "purpose",
"HS_STATE": "hs_state",
"REND_QUERY": "rend_query",
"TIME_CREATED": "created",
"OLD_PURPOSE": "old_purpose",
"OLD_HS_STATE": "old_hs_state",
}
_VERSION_ADDED = stem.version.Version('0.2.3.11-alpha')
def _parse(self):
self.path = tuple(stem.control._parse_circ_path(self.path))
if self.build_flags is not None:
self.build_flags = tuple(self.build_flags.split(','))
if self.created is not None:
try:
self.created = str_tools.parse_iso_timestamp(self.created)
except ValueError, exc:
raise stem.ProtocolError("Unable to parse create date (%s): %s" % (exc, self))
if not tor_tools.is_valid_circuit_id(self.id):
raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self._log_if_unrecognized('event', stem.CircEvent)
self._log_if_unrecognized('build_flags', stem.CircBuildFlag)
self._log_if_unrecognized('purpose', stem.CircPurpose)
self._log_if_unrecognized('hs_state', stem.HiddenServiceState)
self._log_if_unrecognized('old_purpose', stem.CircPurpose)
self._log_if_unrecognized('old_hs_state', stem.HiddenServiceState)
class ClientsSeenEvent(Event):
"""
Periodic event on bridge relays that provides a summary of our users.
:var datetime start_time: time in UTC that we started collecting these stats
:var dict locales: mapping of country codes to a rounded count for the number of users
:var dict ip_versions: mapping of ip protocols to a rounded count for the number of users
The CLIENTS_SEEN event was introduced in tor version 0.2.1.10-alpha.
"""
_KEYWORD_ARGS = {
"TimeStarted": "start_time",
"CountrySummary": "locales",
"IPVersions": "ip_versions",
}
_VERSION_ADDED = stem.version.Version('0.2.1.10-alpha')
def _parse(self):
if self.start_time is not None:
self.start_time = datetime.datetime.strptime(self.start_time, "%Y-%m-%d %H:%M:%S")
if self.locales is not None:
locale_to_count = {}
for entry in self.locales.split(','):
if not '=' in entry:
raise stem.ProtocolError("The CLIENTS_SEEN's CountrySummary should be a comma separated listing of '<locale>=<count>' mappings: %s" % self)
locale, count = entry.split('=', 1)
if len(locale) != 2:
raise stem.ProtocolError("Locales should be a two character code, got '%s': %s" % (locale, self))
elif not count.isdigit():
raise stem.ProtocolError("Locale count was non-numeric (%s): %s" % (count, self))
elif locale in locale_to_count:
raise stem.ProtocolError("CountrySummary had multiple mappings for '%s': %s" % (locale, self))
locale_to_count[locale] = int(count)
self.locales = locale_to_count
if self.ip_versions is not None:
protocol_to_count = {}
for entry in self.ip_versions.split(','):
if not '=' in entry:
raise stem.ProtocolError("The CLIENTS_SEEN's IPVersions should be a comma separated listing of '<protocol>=<count>' mappings: %s" % self)
protocol, count = entry.split('=', 1)
if not count.isdigit():
raise stem.ProtocolError("IP protocol count was non-numeric (%s): %s" % (count, self))
protocol_to_count[protocol] = int(count)
self.ip_versions = protocol_to_count
class ConfChangedEvent(Event):
"""
Event that indicates that our configuration changed, either in response to a
SETCONF or RELOAD signal.
:var dict config: mapping of configuration options to their new values
(**None** if the option is being unset)
The CONF_CHANGED event was introduced in tor version 0.2.3.3-alpha.
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Version('0.2.3.3-alpha')
def _parse(self):
self.config = {}
# Skip first and last line since they're the header and footer. For
# instance...
#
# 650-CONF_CHANGED
# 650-ExitNodes=caerSidi
# 650-ExitPolicy
# 650-MaxCircuitDirtiness=20
# 650 OK
for line in str(self).splitlines()[1:-1]:
if '=' in line:
key, value = line.split('=', 1)
else:
key, value = line, None
self.config[key] = value
class DescChangedEvent(Event):
"""
Event that indicates that our descriptor has changed.
The DESCCHANGED event was introduced in tor version 0.1.2.2-alpha.
"""
_VERSION_ADDED = stem.version.Version('0.1.2.2-alpha')
pass
class GuardEvent(Event):
"""
Event that indicates that our guard relays have changed.
:var stem.GuardType guard_type: purpose the guard relay is for
:var str name: nickname or fingerprint of the guard relay
:var stem.GuardStatus status: status of the guard relay
The GUARD event was introduced in tor version 0.1.2.5-alpha.
"""
_VERSION_ADDED = stem.version.Version('0.1.2.5-alpha')
# TODO: We should replace the 'name' field with a fingerprint or nickname
# attribute once we know what it can be...
#
# https://trac.torproject.org/7619
_POSITIONAL_ARGS = ("guard_type", "name", "status")
class LogEvent(Event):
"""
Tor logging event. These are the most visible kind of event since, by
default, tor logs at the NOTICE :data:`~stem.Runlevel` to stdout.
:var stem.Runlevel runlevel: runlevel of the logged message
:var str message: logged message
The logging events were some of the first Control Protocol V1 events
and were introduced in tor version 0.1.1.1-alpha.
"""
_SKIP_PARSING = True
def _parse(self):
self.runlevel = self.type
self._log_if_unrecognized('runlevel', stem.Runlevel)
# message is our content, minus the runlevel and ending "OK" if a
# multi-line message
self.message = str(self)[len(self.runlevel) + 1:].rstrip("\nOK")
class NetworkStatusEvent(Event):
"""
Event for when our copy of the consensus has changed. This was introduced in
tor version 0.1.2.3.
:var list desc: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for the changed descriptors
The NS event was introduced in tor version 0.1.2.3-alpha.
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Version('0.1.2.3-alpha')
def _parse(self):
content = str(self).lstrip("NS\n")
self.desc = list(stem.descriptor.router_status_entry.parse_file(
StringIO.StringIO(content),
True,
entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3,
))
class NewConsensusEvent(Event):
"""
Event for when we have a new consensus. This is similar to
:class:`~stem.response.events.NetworkStatusEvent`, except that it contains
the whole consensus so anything not listed is implicitly no longer
recommended.
:var list desc: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for the changed descriptors
The NEWCONSENSUS event was introduced in tor version 0.2.1.13-alpha.
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Version('0.2.1.13-alpha')
def _parse(self):
content = str(self).lstrip("NEWCONSENSUS\n")
self.desc = list(stem.descriptor.router_status_entry.parse_file(
StringIO.StringIO(content),
True,
entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3,
))
class NewDescEvent(Event):
"""
Event that indicates that a new descriptor is available.
The fingerprint or nickname values in our 'relays' may be **None** if the
VERBOSE_NAMES feature isn't enabled. The option was first introduced in tor
version 0.1.2.2, and on by default after 0.2.2.1.
:var tuple relays: **(fingerprint, nickname)** tuples for the relays with
new descriptors
The NEWDESC event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
"""
def _parse(self):
self.relays = tuple([stem.control._parse_circ_entry(entry) for entry in str(self).split()[1:]])
class ORConnEvent(Event):
"""
Event that indicates a change in a relay connection. The 'endpoint' could be
any of several things including a...
* fingerprint
* nickname
* 'fingerprint=nickname' pair
* address:port
The derived 'endpoint_*' attributes are generally more useful.
:var str endpoint: relay that the event concerns
:var str endpoint_fingerprint: endpoint's finterprint if it was provided
:var str endpoint_nickname: endpoint's nickname if it was provided
:var str endpoint_address: endpoint's address if it was provided
:var int endpoint_port: endpoint's port if it was provided
:var stem.ORStatus status: state of the connection
:var stem.ORClosureReason reason: reason for the connection to be closed
:var int circ_count: number of established and pending circuits
The ORCONN event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
"""
_POSITIONAL_ARGS = ("endpoint", "status")
_KEYWORD_ARGS = {
"REASON": "reason",
"NCIRCS": "circ_count",
}
def _parse(self):
self.endpoint_fingerprint = None
self.endpoint_nickname = None
self.endpoint_address = None
self.endpoint_port = None
try:
self.endpoint_fingerprint, self.endpoint_nickname = \
stem.control._parse_circ_entry(self.endpoint)
except stem.ProtocolError:
if not ':' in self.endpoint:
raise stem.ProtocolError("ORCONN endpoint is neither a relay nor 'address:port': %s" % self)
address, port = self.endpoint.split(':', 1)
if not connection.is_valid_port(port):
raise stem.ProtocolError("ORCONN's endpoint location's port is invalid: %s" % self)
self.endpoint_address = address
self.endpoint_port = int(port)
if self.circ_count is not None:
if not self.circ_count.isdigit():
raise stem.ProtocolError("ORCONN event got a non-numeric circuit count (%s): %s" % (self.circ_count, self))
self.circ_count = int(self.circ_count)
self._log_if_unrecognized('status', stem.ORStatus)
self._log_if_unrecognized('reason', stem.ORClosureReason)
class SignalEvent(Event):
"""
Event that indicates that tor has received and acted upon a signal being sent
to the process. As of tor version 0.2.4.6 the only signals conveyed by this
event are...
* RELOAD
* DUMP
* DEBUG
* NEWNYM
* CLEARDNSCACHE
:var stem.Signal signal: signal that tor received
The SIGNAL event was introduced in tor version 0.2.3.1-alpha.
"""
_POSITIONAL_ARGS = ("signal",)
_VERSION_ADDED = stem.version.Version('0.2.3.1-alpha')
def _parse(self):
# log if we recieved an unrecognized signal
expected_signals = (
stem.Signal.RELOAD,
stem.Signal.DUMP,
stem.Signal.DEBUG,
stem.Signal.NEWNYM,
stem.Signal.CLEARDNSCACHE,
)
self._log_if_unrecognized('signal', expected_signals)
class StatusEvent(Event):
"""
Notification of a change in tor's state. These are generally triggered for
the same sort of things as log messages of the NOTICE level or higher.
However, unlike :class:`~stem.response.events.LogEvent` these contain well
formed data.
:var stem.StatusType status_type: category of the status event
:var stem.Runlevel runlevel: runlevel of the logged message
:var str message: logged message
The STATUS_GENERAL, STATUS_CLIENT, STATUS_SERVER events were introduced
in tor version 0.1.2.3-alpha.
"""
_POSITIONAL_ARGS = ("runlevel", "action")
_VERSION_ADDED = stem.version.Version('0.1.2.3-alpha')
def _parse(self):
if self.type == 'STATUS_GENERAL':
self.status_type = stem.StatusType.GENERAL
elif self.type == 'STATUS_CLIENT':
self.status_type = stem.StatusType.CLIENT
elif self.type == 'STATUS_SERVER':
self.status_type = stem.StatusType.SERVER
else:
raise ValueError("BUG: Unrecognized status type (%s), likely an EVENT_TYPE_TO_CLASS addition without revising how 'status_type' is assigned." % self.type)
self._log_if_unrecognized('runlevel', stem.Runlevel)
class StreamEvent(Event):
"""
Event that indicates that a stream has changed.
:var str id: stream identifier
:var stem.StreamStatus status: reported status for the stream
:var str circ_id: circuit that the stream is attached to
:var str target: destination of the stream
:var str target_address: destination address (ip or hostname)
:var int target_port: destination port
:var stem.StreamClosureReason reason: reason for the stream to be closed
:var stem.StreamClosureReason remote_reason: remote side's reason for the stream to be closed
:var stem.StreamSource source: origin of the REMAP request
:var str source_addr: requester of the connection
:var str source_address: requester address (ip or hostname)
:var int source_port: requester port
:var stem.StreamPurpose purpose: purpose for the stream
The STREAM event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
"""
_POSITIONAL_ARGS = ("id", "status", "circ_id", "target")
_KEYWORD_ARGS = {
"REASON": "reason",
"REMOTE_REASON": "remote_reason",
"SOURCE": "source",
"SOURCE_ADDR": "source_addr",
"PURPOSE": "purpose",
}
def _parse(self):
if self.target is None:
raise stem.ProtocolError("STREAM event didn't have a target: %s" % self)
else:
if not ':' in self.target:
raise stem.ProtocolError("Target location must be of the form 'address:port': %s" % self)
address, port = self.target.split(':', 1)
if not connection.is_valid_port(port, allow_zero = True):
raise stem.ProtocolError("Target location's port is invalid: %s" % self)
self.target_address = address
self.target_port = int(port)
if self.source_addr is None:
self.source_address = None
self.source_port = None
else:
if not ':' in self.source_addr:
raise stem.ProtocolError("Source location must be of the form 'address:port': %s" % self)
address, port = self.source_addr.split(':', 1)
if not connection.is_valid_port(port, allow_zero = True):
raise stem.ProtocolError("Source location's port is invalid: %s" % self)
self.source_address = address
self.source_port = int(port)
# spec specifies a circ_id of zero if the stream is unattached
if self.circ_id == "0":
self.circ_id = None
self._log_if_unrecognized('reason', stem.StreamClosureReason)
self._log_if_unrecognized('remote_reason', stem.StreamClosureReason)
self._log_if_unrecognized('purpose', stem.StreamPurpose)
class StreamBwEvent(Event):
"""
Event (emitted approximately every second) with the bytes sent and received
by the application since the last such event on this stream.
:var str id: stream identifier
:var long written: bytes sent by the application
:var long read: bytes received by the application
The STREAM_BW event was introduced in tor version 0.1.2.8-beta.
"""
_POSITIONAL_ARGS = ("id", "written", "read")
_VERSION_ADDED = stem.version.Version('0.1.2.8-beta')
def _parse(self):
if not tor_tools.is_valid_stream_id(self.id):
raise stem.ProtocolError("Stream IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
elif not self.written:
raise stem.ProtocolError("STREAM_BW event is missing its written value")
elif not self.read:
raise stem.ProtocolError("STREAM_BW event is missing its read value")
elif not self.read.isdigit() or not self.written.isdigit():
raise stem.ProtocolError("A STREAM_BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
self.read = long(self.read)
self.written = long(self.written)
EVENT_TYPE_TO_CLASS = {
"ADDRMAP": AddrMapEvent,
"AUTHDIR_NEWDESCS": AuthDirNewDescEvent,
"BUILDTIMEOUT_SET": BuildTimeoutSetEvent,
"BW": BandwidthEvent,
"CIRC": CircuitEvent,
"CIRC_MINOR": CircMinorEvent,
"CLIENTS_SEEN": ClientsSeenEvent,
"CONF_CHANGED": ConfChangedEvent,
"DEBUG": LogEvent,
"DESCCHANGED": DescChangedEvent,
"ERR": LogEvent,
"GUARD": GuardEvent,
"INFO": LogEvent,
"NEWCONSENSUS": NewConsensusEvent,
"NEWDESC": NewDescEvent,
"NOTICE": LogEvent,
"NS": NetworkStatusEvent,
"ORCONN": ORConnEvent,
"SIGNAL": SignalEvent,
"STATUS_CLIENT": StatusEvent,
"STATUS_GENERAL": StatusEvent,
"STATUS_SERVER": StatusEvent,
"STREAM": StreamEvent,
"STREAM_BW": StreamBwEvent,
"WARN": LogEvent,
# accounting for a bug in tor 0.2.0.22
"STATUS_SEVER": StatusEvent,
}
|
lgpl-3.0
| -4,938,717,986,826,051,000
| 34.653801
| 164
| 0.673468
| false
| 3.723009
| false
| false
| false
|
henriquebastos/fixofx
|
bin/ofxfix.py
|
1
|
9764
|
#!/usr/bin/env python
# Copyright 2005-2010 Wesabe, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# fixofx.py - canonicalize all recognized upload formats to OFX 2.0
#
import os
import os.path
import sys
from fixofx.ofx import Response, FileTyper
from fixofx.ofxtools.ofc_converter import OfcConverter
from fixofx.ofxtools.qif_converter import QifConverter
def fixpath(filename):
mypath = os.path.dirname(sys._getframe(1).f_code.co_filename)
return os.path.normpath(os.path.join(mypath, filename))
from optparse import OptionParser
from pyparsing import ParseException
__doc__ = \
"""Canonicalizes files from several supported data upload formats (currently
OFX 1.02, OFX 1.5, OFX 1.6, OFX 2.0, OFC, and QIF) to OFX 2.0 (which is a
standard XML 1.0 file). Since it is easiest for the database loader to use a
single, XML-based format, and since users might prefer an XML document to OFX
1.02 or other formats for export, this script essentially removes the need for
any other code to know about all of the variations in data formats. By
default, the converter will read a single file of any supported format from
standard input and write the converted OFX 2.0 file to standard output. A
command line option also allows reading a single file, and other options allow
you to insert data into the output file not available in the source file (for
instance, QIF does not contain the account number, so an option allows you to
specify that for insertion into the OFX output)."""
# Import Psyco if available, for speed.
try:
import psyco
psyco.full()
except ImportError:
pass
def convert(filecontent, filetype, verbose=False, fid="UNKNOWN", org="UNKNOWN",
bankid="UNKNOWN", accttype="UNKNOWN", acctid="UNKNOWN",
balance="UNKNOWN", curdef=None, lang="ENG", dayfirst=False,
debug=False):
text = os.linesep.join(s for s in filecontent.splitlines() if s)
# This finishes a verbosity message started by the caller, where the
# caller explains the source command-line option and this explains the
# source format.
if verbose:
sys.stderr.write("Converting from %s format.\n" % filetype)
if options.debug and (filetype in ["OFC", "QIF"] or filetype.startswith("OFX")):
sys.stderr.write("Starting work on raw text:\n")
sys.stderr.write(rawtext + "\n\n")
if filetype.startswith("OFX/2"):
if verbose: sys.stderr.write("No conversion needed; returning unmodified.\n")
# The file is already OFX 2 -- return it unaltered, ignoring
# any of the parameters passed to this method.
return text
elif filetype.startswith("OFX"):
if verbose: sys.stderr.write("Converting to OFX/2.0...\n")
# This will throw a ParseException if it is unable to recognize
# the source format.
response = Response(text, debug=debug)
return response.as_xml(original_format=filetype)
elif filetype == "OFC":
if verbose: sys.stderr.write("Beginning OFC conversion...\n")
converter = OfcConverter(text, fid=fid, org=org, curdef=curdef,
lang=lang, debug=debug)
# This will throw a ParseException if it is unable to recognize
# the source format.
if verbose:
sys.stderr.write("Converting to OFX/1.02...\n\n%s\n\n" %
converter.to_ofx102())
sys.stderr.write("Converting to OFX/2.0...\n")
return converter.to_xml()
elif filetype == "QIF":
if verbose: sys.stderr.write("Beginning QIF conversion...\n")
converter = QifConverter(text, fid=fid, org=org,
bankid=bankid, accttype=accttype,
acctid=acctid, balance=balance,
curdef=curdef, lang=lang, dayfirst=dayfirst,
debug=debug)
# This will throw a ParseException if it is unable to recognize
# the source format.
if verbose:
sys.stderr.write("Converting to OFX/1.02...\n\n%s\n\n" %
converter.to_ofx102())
sys.stderr.write("Converting to OFX/2.0...\n")
return converter.to_xml()
else:
raise TypeError("Unable to convert source format '%s'." % filetype)
parser = OptionParser(description=__doc__)
parser.add_option("-d", "--debug", action="store_true", dest="debug",
default=False, help="spit out gobs of debugging output during parse")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="be more talkative, social, outgoing")
parser.add_option("-t", "--type", action="store_true", dest="type",
default=False, help="print input file type and exit")
parser.add_option("-f", "--file", dest="filename", default=None,
help="source file to convert (writes to STDOUT)")
parser.add_option("--fid", dest="fid", default="UNKNOWN",
help="(OFC/QIF only) FID to use in output")
parser.add_option("--org", dest="org", default="UNKNOWN",
help="(OFC/QIF only) ORG to use in output")
parser.add_option("--curdef", dest="curdef", default=None,
help="(OFC/QIF only) Currency identifier to use in output")
parser.add_option("--lang", dest="lang", default="ENG",
help="(OFC/QIF only) Language identifier to use in output")
parser.add_option("--bankid", dest="bankid", default="UNKNOWN",
help="(QIF only) Routing number to use in output")
parser.add_option("--accttype", dest="accttype", default="UNKNOWN",
help="(QIF only) Account type to use in output")
parser.add_option("--acctid", dest="acctid", default="UNKNOWN",
help="(QIF only) Account number to use in output")
parser.add_option("--balance", dest="balance", default="UNKNOWN",
help="(QIF only) Account balance to use in output")
parser.add_option("--dayfirst", action="store_true", dest="dayfirst", default=False,
help="(QIF only) Parse dates day first (UK format)")
parser.add_option("-s", "--string", dest="string", default=None,
help="string to convert")
(options, args) = parser.parse_args()
#
# Check the python environment for minimum sanity levels.
#
if options.verbose and not hasattr(open, 'newlines'):
# Universal newlines are generally needed to deal with various QIF downloads.
sys.stderr.write('Warning: universal newline support NOT available.\n')
if options.verbose: print("Options: %s" % options)
#
# Load up the raw text to be converted.
#
rawtext = None
if options.filename:
if os.path.isfile(options.filename):
if options.verbose:
sys.stderr.write("Reading from '%s'\n." % options.filename)
try:
srcfile = open(options.filename, 'rU')
rawtext = srcfile.read()
srcfile.close()
except Exception as detail:
print("Exception during file read:\n%s" % detail)
print("Exiting.")
sys.stderr.write("fixofx failed with error code 1\n")
sys.exit(1)
else:
print("'%s' does not appear to be a file. Try --help." % options.filename)
sys.stderr.write("fixofx failed with error code 2\n")
sys.exit(2)
elif options.string:
if options.verbose:
sys.stderr.write("Reading from string\n")
rawtext = options.string.replace('\r','')
else:
if options.verbose:
sys.stderr.write("Reading from standard input.\n")
stdin_universal = os.fdopen(os.dup(sys.stdin.fileno()), "rU")
rawtext = stdin_universal.read()
if rawtext == "" or rawtext is None:
print("No input. Pipe a file to convert to the script,\n" + \
"or call with -f. Call with --help for more info.")
sys.stderr.write("fixofx failed with error code 3\n")
sys.exit(3)
#
# Convert the raw text to OFX 2.0.
#
try:
# Determine the type of file contained in 'text', using a quick guess
# rather than parsing the file to make sure. (Parsing will fail
# below if the guess is wrong on OFX/1 and QIF.)
filetype = FileTyper(rawtext).trust()
if options.type:
print("Input file type is %s." % filetype)
sys.exit(0)
elif options.debug:
sys.stderr.write("Input file type is %s.\n" % filetype)
converted = convert(rawtext, filetype, verbose=options.verbose,
fid=options.fid, org=options.org, bankid=options.bankid,
accttype=options.accttype, acctid=options.acctid,
balance=options.balance, curdef=options.curdef,
lang=options.lang, dayfirst=options.dayfirst,
debug=options.debug)
print(converted)
sys.exit(0)
except ParseException as detail:
print("Parse exception during '%s' conversion:\n%s" % (filetype, detail))
print("Exiting.")
sys.stderr.write("fixofx failed with error code 4\n")
sys.exit(4)
except TypeError as detail:
print(detail)
print("Exiting.")
sys.stderr.write("fixofx failed with error code 5\n")
sys.exit(5)
|
apache-2.0
| -8,566,761,918,685,850,000
| 38.530364
| 87
| 0.643589
| false
| 3.836542
| false
| false
| false
|
samdroid-apps/browse
|
pdfviewer.py
|
1
|
21028
|
# Copyright (C) 2012, One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import logging
import tempfile
from gettext import gettext as _
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import GLib
from gi.repository import WebKit
from sugar3.graphics.toolbarbox import ToolbarBox
from sugar3.graphics.toolbutton import ToolButton
from sugar3.graphics.icon import Icon
from sugar3.graphics.progressicon import ProgressIcon
from sugar3.graphics import style
from sugar3.datastore import datastore
from sugar3.activity import activity
from sugar3.bundle.activitybundle import ActivityBundle
class EvinceViewer(Gtk.Overlay):
"""PDF viewer with a toolbar overlay for basic navigation and an
option to save to Journal.
"""
__gsignals__ = {
'save-to-journal': (GObject.SignalFlags.RUN_FIRST,
None,
([])),
'open-link': (GObject.SignalFlags.RUN_FIRST,
None,
([str])),
}
def __init__(self, uri):
GObject.GObject.__init__(self)
self._uri = uri
# delay Evince import until is needed to improve activity startup time
from gi.repository import EvinceDocument
from gi.repository import EvinceView
# Create Evince objects to handle the PDF in the URI:
EvinceDocument.init()
self._doc = EvinceDocument.Document.factory_get_document(uri)
self._view = EvinceView.View()
self._model = EvinceView.DocumentModel()
self._model.set_document(self._doc)
self._view.set_model(self._model)
self._EVINCE_MODE_FREE = EvinceView.SizingMode.FREE
self._view.connect('external-link', self.__handle_link_cb)
self._model.connect('page-changed', self.__page_changed_cb)
self._back_page_button = None
self._forward_page_button = None
self._toolbar_box = self._create_toolbar()
self._update_nav_buttons()
self._toolbar_box.set_halign(Gtk.Align.FILL)
self._toolbar_box.set_valign(Gtk.Align.END)
self.add_overlay(self._toolbar_box)
self._toolbar_box.show()
scrolled_window = Gtk.ScrolledWindow()
self.add(scrolled_window)
scrolled_window.show()
scrolled_window.add(self._view)
self._view.show()
def _create_toolbar(self):
toolbar_box = ToolbarBox()
zoom_out_button = ToolButton('zoom-out')
zoom_out_button.set_tooltip(_('Zoom out'))
zoom_out_button.connect('clicked', self.__zoom_out_cb)
toolbar_box.toolbar.insert(zoom_out_button, -1)
zoom_out_button.show()
zoom_in_button = ToolButton('zoom-in')
zoom_in_button.set_tooltip(_('Zoom in'))
zoom_in_button.connect('clicked', self.__zoom_in_cb)
toolbar_box.toolbar.insert(zoom_in_button, -1)
zoom_in_button.show()
zoom_original_button = ToolButton('zoom-original')
zoom_original_button.set_tooltip(_('Actual size'))
zoom_original_button.connect('clicked', self.__zoom_original_cb)
toolbar_box.toolbar.insert(zoom_original_button, -1)
zoom_original_button.show()
separator = Gtk.SeparatorToolItem()
separator.props.draw = True
toolbar_box.toolbar.insert(separator, -1)
separator.show()
self._back_page_button = ToolButton('go-previous-paired')
self._back_page_button.set_tooltip(_('Previous page'))
self._back_page_button.props.sensitive = False
self._back_page_button.connect('clicked', self.__go_back_page_cb)
toolbar_box.toolbar.insert(self._back_page_button, -1)
self._back_page_button.show()
self._forward_page_button = ToolButton('go-next-paired')
self._forward_page_button.set_tooltip(_('Next page'))
self._forward_page_button.props.sensitive = False
self._forward_page_button.connect('clicked', self.__go_forward_page_cb)
toolbar_box.toolbar.insert(self._forward_page_button, -1)
self._forward_page_button.show()
separator = Gtk.SeparatorToolItem()
separator.props.draw = True
toolbar_box.toolbar.insert(separator, -1)
separator.show()
self._save_to_journal_button = ToolButton('save-to-journal')
self._save_to_journal_button.set_tooltip(_('Save PDF to Journal'))
self._save_to_journal_button.connect('clicked',
self.__save_to_journal_button_cb)
toolbar_box.toolbar.insert(self._save_to_journal_button, -1)
self._save_to_journal_button.show()
return toolbar_box
def disable_journal_button(self):
self._save_to_journal_button.props.sensitive = False
def __handle_link_cb(self, widget, url):
self.emit('open-link', url.get_uri())
def __page_changed_cb(self, model, page_from, page_to):
self._update_nav_buttons()
def __zoom_out_cb(self, widget):
self.zoom_out()
def __zoom_in_cb(self, widget):
self.zoom_in()
def __zoom_original_cb(self, widget):
self.zoom_original()
def __go_back_page_cb(self, widget):
self._view.previous_page()
def __go_forward_page_cb(self, widget):
self._view.next_page()
def __save_to_journal_button_cb(self, widget):
self.emit('save-to-journal')
self._save_to_journal_button.props.sensitive = False
def _update_nav_buttons(self):
current_page = self._model.props.page
self._back_page_button.props.sensitive = current_page > 0
self._forward_page_button.props.sensitive = \
current_page < self._doc.get_n_pages() - 1
def zoom_original(self):
self._model.props.sizing_mode = self._EVINCE_MODE_FREE
self._model.props.scale = 1.0
def zoom_in(self):
self._model.props.sizing_mode = self._EVINCE_MODE_FREE
self._view.zoom_in()
def zoom_out(self):
self._model.props.sizing_mode = self._EVINCE_MODE_FREE
self._view.zoom_out()
def get_pdf_title(self):
return self._doc.get_title()
class DummyBrowser(GObject.GObject):
"""Has the same interface as browser.Browser ."""
__gsignals__ = {
'new-tab': (GObject.SignalFlags.RUN_FIRST, None, ([str])),
'tab-close': (GObject.SignalFlags.RUN_FIRST, None, ([object])),
'selection-changed': (GObject.SignalFlags.RUN_FIRST, None, ([])),
'security-status-changed': (GObject.SignalFlags.RUN_FIRST, None, ([])),
}
__gproperties__ = {
"title": (object, "title", "Title", GObject.PARAM_READWRITE),
"uri": (object, "uri", "URI", GObject.PARAM_READWRITE),
"progress": (object, "progress", "Progress", GObject.PARAM_READWRITE),
"load-status": (object, "load status", "a WebKit LoadStatus",
GObject.PARAM_READWRITE),
}
def __init__(self, tab):
GObject.GObject.__init__(self)
self._tab = tab
self._title = ""
self._uri = ""
self._progress = 0.0
self._load_status = WebKit.LoadStatus.PROVISIONAL
self.security_status = None
def do_get_property(self, prop):
if prop.name == 'title':
return self._title
elif prop.name == 'uri':
return self._uri
elif prop.name == 'progress':
return self._progress
elif prop.name == 'load-status':
return self._load_status
else:
raise AttributeError, 'Unknown property %s' % prop.name
def do_set_property(self, prop, value):
if prop.name == 'title':
self._title = value
elif prop.name == 'uri':
self._uri = value
elif prop.name == 'progress':
self._progress = value
elif prop.name == 'load-status':
self._load_status = value
else:
raise AttributeError, 'Unknown property %s' % prop.name
def get_title(self):
return self._title
def get_uri(self):
return self._uri
def get_progress(self):
return self._progress
def get_load_status(self):
return self._load_status
def emit_new_tab(self, uri):
self.emit('new-tab', uri)
def emit_close_tab(self):
self.emit('tab-close', self._tab)
def get_history(self):
return [{'url': self.props.uri, 'title': self.props.title}]
def can_undo(self):
return False
def can_redo(self):
return False
def can_go_back(self):
return False
def can_go_forward(self):
return False
def can_copy_clipboard(self):
return False
def can_paste_clipboard(self):
return False
def set_history_index(self, index):
pass
def get_history_index(self):
return 0
def set_zoom_level(self, zoom_level):
pass
def get_zoom_level(self):
return 0
def stop_loading(self):
self._tab.close_tab()
def reload(self):
pass
def load_uri(self, uri):
pass
def grab_focus(self):
pass
class PDFProgressMessageBox(Gtk.EventBox):
def __init__(self, message, button_callback):
Gtk.EventBox.__init__(self)
self.modify_bg(Gtk.StateType.NORMAL,
style.COLOR_WHITE.get_gdk_color())
alignment = Gtk.Alignment.new(0.5, 0.5, 0.1, 0.1)
self.add(alignment)
alignment.show()
box = Gtk.VBox()
alignment.add(box)
box.show()
icon = ProgressIcon(icon_name='book',
pixel_size=style.LARGE_ICON_SIZE,
stroke_color=style.COLOR_BUTTON_GREY.get_svg(),
fill_color=style.COLOR_SELECTION_GREY.get_svg())
self.progress_icon = icon
box.pack_start(icon, expand=True, fill=False, padding=0)
icon.show()
label = Gtk.Label()
color = style.COLOR_BUTTON_GREY.get_html()
label.set_markup('<span weight="bold" color="%s">%s</span>' % ( \
color, GLib.markup_escape_text(message)))
box.pack_start(label, expand=True, fill=False, padding=0)
label.show()
button_box = Gtk.HButtonBox()
button_box.set_layout(Gtk.ButtonBoxStyle.CENTER)
box.pack_start(button_box, False, True, 0)
button_box.show()
button = Gtk.Button(label=_('Cancel'))
button.connect('clicked', button_callback)
button.props.image = Icon(icon_name='dialog-cancel',
pixel_size=style.SMALL_ICON_SIZE)
button_box.pack_start(button, expand=True, fill=False, padding=0)
button.show()
class PDFErrorMessageBox(Gtk.EventBox):
def __init__(self, title, message, button_callback):
Gtk.EventBox.__init__(self)
self.modify_bg(Gtk.StateType.NORMAL,
style.COLOR_WHITE.get_gdk_color())
alignment = Gtk.Alignment.new(0.5, 0.5, 0.1, 0.1)
self.add(alignment)
alignment.show()
box = Gtk.VBox()
alignment.add(box)
box.show()
# Get the icon of this activity through the bundle path.
bundle_path = activity.get_bundle_path()
activity_bundle = ActivityBundle(bundle_path)
icon = Icon(pixel_size=style.LARGE_ICON_SIZE,
file=activity_bundle.get_icon(),
stroke_color=style.COLOR_BUTTON_GREY.get_svg(),
fill_color=style.COLOR_TRANSPARENT.get_svg())
box.pack_start(icon, expand=True, fill=False, padding=0)
icon.show()
color = style.COLOR_BUTTON_GREY.get_html()
label = Gtk.Label()
label.set_markup('<span weight="bold" color="%s">%s</span>' % ( \
color, GLib.markup_escape_text(title)))
box.pack_start(label, expand=True, fill=False, padding=0)
label.show()
label = Gtk.Label()
label.set_markup('<span color="%s">%s</span>' % ( \
color, GLib.markup_escape_text(message)))
box.pack_start(label, expand=True, fill=False, padding=0)
label.show()
button_box = Gtk.HButtonBox()
button_box.set_layout(Gtk.ButtonBoxStyle.CENTER)
box.pack_start(button_box, False, True, 0)
button_box.show()
button = Gtk.Button(label=_('Try again'))
button.connect('clicked', button_callback)
button.props.image = Icon(icon_name='entry-refresh',
pixel_size=style.SMALL_ICON_SIZE,
stroke_color=style.COLOR_WHITE.get_svg(),
fill_color=style.COLOR_TRANSPARENT.get_svg())
button_box.pack_start(button, expand=True, fill=False, padding=0)
button.show()
class PDFTabPage(Gtk.HBox):
"""Shows a basic PDF viewer, download the file first if the PDF is
in a remote location.
When the file is remote, display a message while downloading.
"""
def __init__(self):
GObject.GObject.__init__(self)
self._browser = DummyBrowser(self)
self._message_box = None
self._evince_viewer = None
self._pdf_uri = None
self._requested_uri = None
def setup(self, requested_uri, title=None):
self._requested_uri = requested_uri
# The title may be given from the Journal:
if title is not None:
self._browser.props.title = title
self._browser.props.uri = requested_uri
self._browser.props.load_status = WebKit.LoadStatus.PROVISIONAL
# show PDF directly if the file is local (from the system tree
# or from the journal)
if requested_uri.startswith('file://'):
self._pdf_uri = requested_uri
self._browser.props.load_status = WebKit.LoadStatus.FINISHED
self._show_pdf()
elif requested_uri.startswith('journal://'):
self._pdf_uri = self._get_path_from_journal(requested_uri)
self._browser.props.load_status = WebKit.LoadStatus.FINISHED
self._show_pdf(from_journal=True)
# download first if file is remote
elif requested_uri.startswith('http://'):
self._download_from_http(requested_uri)
def _get_browser(self):
return self._browser
browser = GObject.property(type=object, getter=_get_browser)
def _show_pdf(self, from_journal=False):
self._evince_viewer = EvinceViewer(self._pdf_uri)
self._evince_viewer.connect('save-to-journal',
self.__save_to_journal_cb)
self._evince_viewer.connect('open-link',
self.__open_link_cb)
# disable save to journal if the PDF is already loaded from
# the journal:
if from_journal:
self._evince_viewer.disable_journal_button()
self._evince_viewer.show()
self.pack_start(self._evince_viewer, True, True, 0)
# If the PDF has a title, set it as the browse page title,
# otherwise use the last part of the URI. Only when the title
# was not set already from the Journal.
if from_journal:
self._browser.props.title = self._browser.props.title
return
pdf_title = self._evince_viewer.get_pdf_title()
if pdf_title is not None:
self._browser.props.title = pdf_title
else:
self._browser.props.title = os.path.basename(self._requested_uri)
def _get_path_from_journal(self, journal_uri):
"""Get the system tree URI of the file for the Journal object."""
journal_id = self.__journal_id_from_uri(journal_uri)
jobject = datastore.get(journal_id)
return 'file://' + jobject.file_path
def _download_from_http(self, remote_uri):
"""Download the PDF from a remote location to a temporal file."""
# Display a message
self._message_box = PDFProgressMessageBox(
message=_("Downloading document..."),
button_callback=self.close_tab)
self.pack_start(self._message_box, True, True, 0)
self._message_box.show()
# Figure out download URI
temp_path = os.path.join(activity.get_activity_root(), 'instance')
if not os.path.exists(temp_path):
os.makedirs(temp_path)
fd, dest_path = tempfile.mkstemp(dir=temp_path)
self._pdf_uri = 'file://' + dest_path
network_request = WebKit.NetworkRequest.new(remote_uri)
self._download = WebKit.Download.new(network_request)
self._download.set_destination_uri('file://' + dest_path)
# FIXME: workaround for SL #4385
# self._download.connect('notify::progress', self.__download_progress_cb)
self._download.connect('notify::current-size',
self.__current_size_changed_cb)
self._download.connect('notify::status', self.__download_status_cb)
self._download.connect('error', self.__download_error_cb)
self._download.start()
def __current_size_changed_cb(self, download, something):
current_size = download.get_current_size()
total_size = download.get_total_size()
progress = current_size / float(total_size)
self._browser.props.progress = progress
self._message_box.progress_icon.update(progress)
def __download_progress_cb(self, download, data):
progress = download.get_progress()
self._browser.props.progress = progress
self._message_box.progress_icon.update(progress)
def __download_status_cb(self, download, data):
status = download.get_status()
if status == WebKit.DownloadStatus.STARTED:
self._browser.props.load_status = WebKit.LoadStatus.PROVISIONAL
elif status == WebKit.DownloadStatus.FINISHED:
self._browser.props.load_status = WebKit.LoadStatus.FINISHED
self.remove(self._message_box)
self._message_box = None
self._show_pdf()
elif status == WebKit.DownloadStatus.CANCELLED:
logging.debug('Download PDF canceled')
def __download_error_cb(self, download, err_code, err_detail, reason):
logging.debug('Download error! code %s, detail %s: %s' % \
(err_code, err_detail, reason))
title = _('This document could not be loaded')
self._browser.props.title = title
if self._message_box is not None:
self.remove(self._message_box)
self._message_box = PDFErrorMessageBox(
title=title,
message=_('Please make sure you are connected to the Internet.'),
button_callback=self.reload)
self.pack_start(self._message_box, True, True, 0)
self._message_box.show()
def reload(self, button=None):
self.remove(self._message_box)
self._message_box = None
self.setup(self._requested_uri)
def close_tab(self, button=None):
self._browser.emit_close_tab()
def cancel_download(self):
self._download.cancel()
def __journal_id_to_uri(self, journal_id):
"""Return an URI for a Journal object ID."""
return "journal://" + journal_id + ".pdf"
def __journal_id_from_uri(self, journal_uri):
"""Return a Journal object ID from an URI."""
return journal_uri[len("journal://"):-len(".pdf")]
def __save_to_journal_cb(self, widget):
"""Save the PDF in the Journal.
Put the PDF title as the title, or if the PDF doesn't have
one, use the filename instead. Put the requested uri as the
description.
"""
jobject = datastore.create()
jobject.metadata['title'] = self._browser.props.title
jobject.metadata['description'] = _('From: %s') % self._requested_uri
jobject.metadata['mime_type'] = "application/pdf"
jobject.file_path = self._pdf_uri[len("file://"):]
datastore.write(jobject)
# display the new URI:
self._browser.props.uri = self.__journal_id_to_uri(jobject.object_id)
def __open_link_cb(self, widget, uri):
"""Open the external link of a PDF in a new tab."""
self._browser.emit_new_tab(uri)
|
gpl-2.0
| 9,211,314,109,004,162,000
| 33.872305
| 81
| 0.606097
| false
| 3.810801
| false
| false
| false
|
gamechanger/dusty
|
tests/unit/systems/known_hosts/init_test.py
|
1
|
2430
|
import os
import tempfile
from mock import patch
import dusty.constants
from dusty.systems.known_hosts import ensure_known_hosts
from ....testcases import DustyTestCase
@patch('dusty.systems.known_hosts._get_known_hosts_path')
@patch('dusty.systems.known_hosts.check_output')
class TestKnownHostsSystem(DustyTestCase):
def setUp(self):
super(TestKnownHostsSystem, self).setUp()
self.temp_hosts_path = tempfile.mkstemp()[1]
def tearDown(self):
super(TestKnownHostsSystem, self).tearDown()
os.remove(self.temp_hosts_path)
def test_preserves_existing_content(self, fake_check_output, fake_get_known_hosts):
fake_get_known_hosts.return_value = self.temp_hosts_path
fake_check_output.return_value = 'dusty.host:SOMESHA'
initial_content = 'prev.known.host.1:SOMESHA\nprev.known.host.2:SOMESHA'
with open(self.temp_hosts_path, 'w') as f:
f.write(initial_content)
expected_result_content = 'prev.known.host.1:SOMESHA\nprev.known.host.2:SOMESHA\ndusty.host:SOMESHA'
ensure_known_hosts(['dusty.host'])
with open(self.temp_hosts_path, 'r') as f:
self.assertEqual(f.read(), expected_result_content)
def test_not_modified(self, fake_check_output, fake_get_known_hosts):
fake_get_known_hosts.return_value = self.temp_hosts_path
fake_check_output.return_value = 'prev.known.host.1:SOMESHA'
initial_content = 'prev.known.host.1:SOMESHA\nprev.known.host.2:SOMESHA'
with open(self.temp_hosts_path, 'w') as f:
f.write(initial_content)
ensure_known_hosts(['prev.known.host.1'])
with open(self.temp_hosts_path, 'r') as f:
self.assertEqual(f.read(), initial_content)
def test_redundant_additions(self, fake_check_output, fake_get_known_hosts):
fake_get_known_hosts.return_value = self.temp_hosts_path
fake_check_output.return_value = 'dusty.host:SOMESHA'
initial_content = 'prev.known.host.1:SOMESHA\nprev.known.host.2:SOMESHA'
with open(self.temp_hosts_path, 'w') as f:
f.write(initial_content)
expected_result_content = 'prev.known.host.1:SOMESHA\nprev.known.host.2:SOMESHA\ndusty.host:SOMESHA'
ensure_known_hosts(['dusty.host', 'dusty.host', 'dusty.host'])
with open(self.temp_hosts_path, 'r') as f:
self.assertEqual(f.read(), expected_result_content)
|
mit
| -5,612,656,755,419,461,000
| 41.631579
| 108
| 0.677366
| false
| 3.111396
| true
| false
| false
|
mucow24/statusboard
|
darksky.py
|
1
|
1582
|
import time
import urllib2
import json
import sys
Default_Lat = 40.697017
Default_Lon = -73.995267
Request_Url = "https://api.forecast.io/forecast"
def getWeather(key, lat = Default_Lat, lon = Default_Lon):
request = "%s/%s/%s,%s" % (Request_Url, key, lat, lon)
u = urllib2.urlopen(request)
return json.loads(u.read())
def makeRainPlot(data):
# Find max precip:
Inch_to_MM = 25.4
max_rain_mm = 5
for e in data['minutely']['data']:
if e['precipIntensity'] * Inch_to_MM > max_rain_mm:
max_rain_mm = e['precipIntensity'] * Inch_to_MM
ret = {}
ret['graph'] = {}
graph = ret['graph']
graph['title'] = "Dark Sky Next Hour"
graph['type'] = "bar"
graph['yAxis'] = { 'minValue' : 0, 'maxValue' : max_rain_mm }
graph['datasequences'] = []
graph['refreshEveryNSeconds'] = 15
dataseq = graph['datasequences']
dataseq.append({})
seq = dataseq[0]
seq['title'] = "Rain (mm/hr)"
seq['color'] = 'aqua'
seq['datapoints'] = []
ctr = 0
for e in data['minutely']['data']:
ctr = ctr + 1
if ctr % 2 == 0:
continue
time_str = time.strftime("%H:%M", time.localtime(e['time']))
precip = e['precipIntensity'] * Inch_to_MM
seq['datapoints'].append({'title' : time_str, 'value' : precip})
return ret
def main(argv):
refresh_interval = int(argv[0])
output_file = argv[1]
while True:
d = getWeather()
p = makeRainPlot(d)
f = open(output_file, 'w')
f.write(json.dumps(j, indent = 2, separators = (',', ': ')))
f.close()
sleep(refresh_interval)
if __name__ == "__main__":
main(sys.argv)
|
gpl-2.0
| 5,671,711,468,693,888,000
| 25.366667
| 68
| 0.600506
| false
| 2.819964
| false
| false
| false
|
PaddlePaddle/models
|
PaddleAudio/examples/panns/parse_result.py
|
1
|
2990
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import ast
import os
from typing import Dict, List
import numpy as np
from paddleaudio.utils import logger
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument('--tagging_file', type=str, required=True, help='')
parser.add_argument('--top_k', type=int, default=10, help='Get top k predicted results of audioset labels.')
parser.add_argument('--smooth', type=ast.literal_eval, default=True, help='Set "True" to apply posterior smoothing.')
parser.add_argument('--smooth_size', type=int, default=5, help='Window size of posterior smoothing.')
parser.add_argument('--label_file', type=str, default='./assets/audioset_labels.txt', help='File of audioset labels.')
parser.add_argument('--output_dir', type=str, default='./output_dir', help='Directory to save tagging labels.')
args = parser.parse_args()
# yapf: enable
def smooth(results: np.ndarray, win_size: int):
"""
Execute posterior smoothing in-place.
"""
for i in range(len(results) - 1, -1, -1):
if i < win_size - 1:
left = 0
else:
left = i + 1 - win_size
results[i] = np.sum(results[left:i + 1], axis=0) / (i - left + 1)
def generate_topk_label(k: int, label_map: Dict, result: np.ndarray):
"""
Return top k result.
"""
result = np.asarray(result)
topk_idx = (-result).argsort()[:k]
ret = ''
for idx in topk_idx:
label, score = label_map[idx], result[idx]
ret += f'{label}: {score}\n'
return ret
if __name__ == "__main__":
label_map = {}
with open(args.label_file, 'r') as f:
for i, l in enumerate(f.readlines()):
label_map[i] = l.strip()
results = np.load(args.tagging_file, allow_pickle=True)
times, scores = results['time'], results['scores']
if args.smooth:
logger.info('Posterior smoothing...')
smooth(scores, win_size=args.smooth_size)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
output_file = os.path.join(
args.output_dir,
os.path.basename(args.tagging_file).split('.')[0] + '.txt')
with open(output_file, 'w') as f:
for time, score in zip(times, scores):
f.write(f'{time}\n')
f.write(generate_topk_label(args.top_k, label_map, score) + '\n')
logger.info(f'Saved tagging labels to {output_file}')
|
apache-2.0
| 3,274,352,965,856,468,500
| 34.595238
| 118
| 0.652508
| false
| 3.432836
| false
| false
| false
|
zsiki/realcentroid
|
realcentroid_dialog.py
|
1
|
4135
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
RealCentroidDialog
A QGIS plugin
Create internal point for a polygon layer
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2018-12-01
git sha : $Format:%H$
copyright : (C) 2018 by Zotan Siki
email : siki1958@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from PyQt5 import uic
from PyQt5 import QtWidgets
from PyQt5 import QtCore
from qgis.core import QgsMapLayerProxyModel, QgsSettings
from qgis.gui import QgsEncodingFileDialog
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'realcentroid_dialog_base.ui'))
class RealCentroidDialog(QtWidgets.QDialog, FORM_CLASS):
""" dialog class for realcentroid QGIS3 plugin """
def __init__(self, parent=None):
"""Constructor."""
super(RealCentroidDialog, self).__init__(parent)
self.setupUi(self)
self.encoding = None
self.layerBox.currentIndexChanged.connect(self.sel)
self.browseButton.clicked.connect(self.browse)
self.cancelBtn.clicked.connect(self.reject)
self.okBtn.clicked.connect(self.ok)
def showEvent(self, event):
""" initialize dialog widgets """
# filter polygonlayers
self.layerBox.setFilters(QgsMapLayerProxyModel.PolygonLayer)
# clear previous pointlayer
self.pointEdit.clear()
self.sel()
def sel(self):
""" check/uncheck selectBox if selected layer changed """
l = self.layerBox.currentLayer()
try:
sf = l.selectedFeatures()
except:
sf = None
if sf: # is not None and len(sf):
self.selectedBox.setEnabled(True)
self.selectedBox.setCheckState(QtCore.Qt.Checked)
else:
self.selectedBox.setEnabled(False)
self.selectedBox.setCheckState(QtCore.Qt.Unchecked)
def browse(self):
""" open save layer dialog """
settings = QgsSettings()
dirName = settings.value("/UI/lastShapefileDir")
encode = settings.value("/UI/encoding")
fileDialog = QgsEncodingFileDialog(self, "Output shape file", dirName,
"Shape file (*.shp)", encode)
fileDialog.setDefaultSuffix("shp")
fileDialog.setFileMode(QtWidgets.QFileDialog.AnyFile)
fileDialog.setAcceptMode(QtWidgets.QFileDialog.AcceptSave)
#fileDialog.setConfirmOverwrite(True)
if not fileDialog.exec_() == QtWidgets.QDialog.Accepted:
return
files = fileDialog.selectedFiles()
self.pointEdit.setText(files[0])
self.encoding = fileDialog.encoding()
def ok(self):
""" check widgets """
if len(self.layerBox.currentText()) == 0:
QtWidgets.QMessageBox.warning(self, "Realcentroid", \
QtWidgets.QApplication.translate("RealCentroid", \
"No polygon layer selected", None))
return
if len(self.pointEdit.text()) == 0:
QtWidgets.QMessageBox.warning(self, "Realcentroid", \
QtWidgets.QApplication.translate("RealCentroid", \
"No point layer given", None))
return
self.accept()
|
gpl-2.0
| -666,068,943,287,623,000
| 39.539216
| 78
| 0.536397
| false
| 4.709567
| false
| false
| false
|
morucci/repoxplorer
|
repoxplorer/index/__init__.py
|
1
|
4623
|
# Copyright 2016, Fabien Boucher
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import time
import pytz
import datetime
from pecan import conf
from Crypto.Hash import SHA
from elasticsearch import client
from jsonschema import validate as schema_validate
from repoxplorer.index.yamlbackend import YAMLBackend
def date2epoch(date):
d = datetime.datetime.strptime(date, "%Y-%m-%d")
d = d.replace(tzinfo=pytz.utc)
epoch = (d - datetime.datetime(1970, 1, 1,
tzinfo=pytz.utc)).total_seconds()
return int(epoch)
def get_elasticsearch_version(es):
version = es.info()['version']['number']
return int(version.split('.')[0])
def add_params(es):
if get_elasticsearch_version(es) >= 7:
return {'include_type_name': 'true'}
else:
return {}
# From https://stackoverflow.com/a/27974027/1966658
def clean_empty(d):
if not isinstance(d, (dict, list)):
return d
if isinstance(d, list):
return [v for v in (clean_empty(v) for v in d) if v]
return {k: v for k, v in ((k, clean_empty(v)) for k, v in d.items()) if (
v or v == False)} # noqa: E712
class Connector(object):
def __init__(self, host=None, port=None, index=None, index_suffix=None):
self.host = (host or
getattr(conf, 'elasticsearch_host', None) or
'localhost')
self.port = (port or
getattr(conf, 'elasticsearch_port', None) or
9200)
self.index = (index or
getattr(conf, 'elasticsearch_index', None) or
'repoxplorer')
if index_suffix:
self.index += "-%s" % index_suffix
if (getattr(conf, 'elasticsearch_user', None) and
getattr(conf, 'elasticsearch_password', None)):
self.http_auth = "%s:%s" % (
getattr(conf, 'elasticsearch_user', None),
getattr(conf, 'elasticsearch_password', None))
# NOTE(dpawlik) Opendistro is using self signed certs,
# so verify_certs is set to False.
self.es = client.Elasticsearch(
[{"host": self.host,
"port": self.port,
"http_auth": self.http_auth,
"use_ssl": True,
"verify_certs": False,
"ssl_show_warn": True}], timeout=60)
else:
self.es = client.Elasticsearch(
[{"host": self.host, "port": self.port}],
timeout=60)
self.ic = client.IndicesClient(self.es)
if not self.ic.exists(index=self.index):
self.ic.create(index=self.index)
# Give some time to have the index fully created
time.sleep(1)
class YAMLDefinition(object):
def __init__(self, db_path=None, db_default_file=None,
db_cache_path=None):
db_cache_path = db_cache_path or conf.get('db_cache_path') or db_path
self.yback = YAMLBackend(
db_path or conf.get('db_path'),
db_default_file=db_default_file or conf.get('db_default_file'),
db_cache_path=db_cache_path)
self.yback.load_db()
self.hashes_str = SHA.new(
"".join(self.yback.hashes).encode(errors='ignore')).hexdigest()
self.default_data, self.data = self.yback.get_data()
self._merge()
def _check_basic(self, key, schema, identifier):
""" Verify schema and no data duplicated
"""
issues = []
ids = set()
for d in self.data:
data = d.get(key, {})
try:
schema_validate({key: data},
yaml.load(schema))
except Exception as e:
issues.append(e.message)
duplicated = set(data.keys()) & ids
if duplicated:
issues.append("%s IDs [%s,] are duplicated" % (
identifier, ",".join(duplicated)))
ids.update(set(data.keys()))
return ids, issues
|
apache-2.0
| 8,594,167,975,764,877,000
| 34.561538
| 77
| 0.567597
| false
| 3.904561
| false
| false
| false
|
maczniak/emberjs
|
website_rebase.py
|
1
|
3151
|
#!/usr/bin/python
#
# The original Ember.js website is under http://emberjs.com/. And translation
# website is under http://maczniak.github.io/emberjs/. Then all absolute urls
# were broken. I failed to search generic html rebase tools, made a simple
# script in Python. It is a specific solution to this problem.
# see also: http://a3nm.net/blog/htmlrebase.html
#-- configuration start --
BUILD_ROOT = 'build/'
PREFIX = 'emberjs/' # must include a trailing slash only
#-- configuration end --
import os
import os.path
import re
# <link href="/stylesheets/fonts/fontello-ie7.css" media="screen" rel="stylesheet" type="text/css" />
html_link_str = '<link.*?href="/'
html_link_pattern = re.compile(html_link_str)
# _gaq.push(['_setAccount', 'UA-27675533-1']);
# from layout.erb
html_ga_str = 'UA-27675533-1'
html_ga_pattern = re.compile(html_ga_str)
# <script type="text/javascript" src="/javascripts/common-old-ie.js"></script>
html_script_str = '<script.*?src="/(?=[^/])'
html_script_pattern = re.compile(html_script_str)
# <a id="logo" href="/">
# <a href="/guides">
html_a_str = '<a .*?href="/'
html_a_pattern = re.compile(html_a_str)
# <img src="/images/about/mhelabs.png">
# exclude src="//ssl.gstatic.com/images/icons/gplus-32.png"
html_img_str = '<img.*?src="/(?=[^/])'
html_img_pattern = re.compile(html_img_str)
# var i=r.map(function(e){return $.ajax("/javascripts/app/examples/"+n+"/
# from javascripts/app/about/inline-examples.js
# <div class="example-app example-loading" data-name="loading" data-files="app.js templates/application.hbs">
js_ajax_str = '[$][.]ajax[(]"/'
js_ajax_pattern = re.compile(js_ajax_str)
# background-image:url("/images/background-shades.svg")
css_url_str = 'url[(]"/'
css_url_pattern = re.compile(css_url_str)
# url("../../fonts -> url("../fonts
css_font_str = 'url[(]"../../'
css_font_pattern = re.compile(css_font_str)
def read(filename):
f = open(filename, 'r')
content = f.read()
f.close()
return content
def write(filename, content):
f = open(filename, 'w')
content = f.write(content)
f.close()
def handle_html(filename):
content = read(filename)
content = html_link_pattern.sub('\g<0>' + PREFIX, content)
content = html_ga_pattern.sub('UA-45832618-1', content)
content = html_script_pattern.sub('\g<0>' + PREFIX, content)
content = html_a_pattern.sub('\g<0>' + PREFIX, content)
content = html_img_pattern.sub('\g<0>' + PREFIX, content)
write(filename, content)
def handle_js(filename):
content = read(filename)
content = js_ajax_pattern.sub('\g<0>' + PREFIX, content)
write(filename, content)
def handle_css(filename):
content = read(filename)
content = css_url_pattern.sub('\g<0>' + PREFIX, content)
content = css_font_pattern.sub('url("../', content)
write(filename, content)
def extension(filename):
idx = filename.rfind('.')
if idx == -1:
return ''
else:
return filename[idx:]
for root, dirs, files in os.walk(BUILD_ROOT):
for file in files:
ext = extension(file)
if ext == '.html':
handle_html(os.path.join(root, file))
elif ext == '.js':
handle_js(os.path.join(root, file))
elif ext == '.css':
handle_css(os.path.join(root, file))
|
mit
| -3,186,537,805,941,783,600
| 29.892157
| 109
| 0.675341
| false
| 2.781112
| false
| false
| false
|
google/glazier
|
glazier/lib/actions/domain.py
|
1
|
1479
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Actions for interacting with the company domain."""
from glazier.lib import domain_join
from glazier.lib.actions.base import ActionError
from glazier.lib.actions.base import BaseAction
from glazier.lib.actions.base import ValidationError
class DomainJoin(BaseAction):
"""Create an imaging timer."""
def Run(self):
method = str(self._args[0])
domain = str(self._args[1])
ou = None
if len(self._args) > 2:
ou = str(self._args[2])
joiner = domain_join.DomainJoin(method, domain, ou)
try:
joiner.JoinDomain()
except domain_join.DomainJoinError as e:
raise ActionError('Unable to complete domain join. %s' % str(e))
def Validate(self):
self._ListOfStringsValidator(self._args, length=2, max_length=3)
if self._args[0] not in domain_join.AUTH_OPTS:
raise ValidationError('Invalid join method: %s' % self._args[0])
|
apache-2.0
| 2,751,999,170,932,907,000
| 35.073171
| 74
| 0.721433
| false
| 3.660891
| false
| false
| false
|
markgw/jazzparser
|
lib/nltk/parse/earleychart.py
|
1
|
18301
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: An Incremental Earley Chart Parser
#
# Copyright (C) 2001-2010 NLTK Project
# Author: Peter Ljunglöf <peter.ljunglof@heatherleaf.se>
# Rob Speer <rspeer@mit.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# Jean Mark Gawron <gawron@mail.sdsu.edu>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
#
# $Id: chart.py 8144 2009-06-01 22:27:39Z edloper $
"""
Data classes and parser implementations for I{incremental} chart
parsers, which use dynamic programming to efficiently parse a text.
A X{chart parser} derives parse trees for a text by iteratively adding
\"edges\" to a \"chart\". Each X{edge} represents a hypothesis about the tree
structure for a subsequence of the text. The X{chart} is a
\"blackboard\" for composing and combining these hypotheses.
A parser is X{incremental}, if it guarantees that for all i, j where i < j,
all edges ending at i are built before any edges ending at j.
This is appealing for, say, speech recognizer hypothesis filtering.
The main parser class is L{EarleyChartParser}, which is a top-down
algorithm, originally formulated by Jay Earley (1970).
"""
from nltk.grammar import *
from api import *
from chart import *
from featurechart import *
#////////////////////////////////////////////////////////////
# Incremental Chart
#////////////////////////////////////////////////////////////
class IncrementalChart(Chart):
def initialize(self):
# A sequence of edge lists contained in this chart.
self._edgelists = tuple([] for x in self._positions())
# The set of child pointer lists associated with each edge.
self._edge_to_cpls = {}
# Indexes mapping attribute values to lists of edges
# (used by select()).
self._indexes = {}
def edges(self):
return list(self.iteredges())
def iteredges(self):
return (edge for edgelist in self._edgelists for edge in edgelist)
def select(self, end, **restrictions):
edgelist = self._edgelists[end]
# If there are no restrictions, then return all edges.
if restrictions=={}: return iter(edgelist)
# Find the index corresponding to the given restrictions.
restr_keys = restrictions.keys()
restr_keys.sort()
restr_keys = tuple(restr_keys)
# If it doesn't exist, then create it.
if restr_keys not in self._indexes:
self._add_index(restr_keys)
vals = tuple(restrictions[key] for key in restr_keys)
return iter(self._indexes[restr_keys][end].get(vals, []))
def _add_index(self, restr_keys):
# Make sure it's a valid index.
for key in restr_keys:
if not hasattr(EdgeI, key):
raise ValueError, 'Bad restriction: %s' % key
# Create the index.
index = self._indexes[restr_keys] = tuple({} for x in self._positions())
# Add all existing edges to the index.
for end, edgelist in enumerate(self._edgelists):
this_index = index[end]
for edge in edgelist:
vals = tuple(getattr(edge, key)() for key in restr_keys)
this_index.setdefault(vals, []).append(edge)
def _register_with_indexes(self, edge):
end = edge.end()
for (restr_keys, index) in self._indexes.items():
vals = tuple(getattr(edge, key)() for key in restr_keys)
index[end].setdefault(vals, []).append(edge)
def _append_edge(self, edge):
self._edgelists[edge.end()].append(edge)
def _positions(self):
return xrange(self.num_leaves() + 1)
class FeatureIncrementalChart(IncrementalChart, FeatureChart):
def select(self, end, **restrictions):
edgelist = self._edgelists[end]
# If there are no restrictions, then return all edges.
if restrictions=={}: return iter(edgelist)
# Find the index corresponding to the given restrictions.
restr_keys = restrictions.keys()
restr_keys.sort()
restr_keys = tuple(restr_keys)
# If it doesn't exist, then create it.
if restr_keys not in self._indexes:
self._add_index(restr_keys)
vals = tuple(self._get_type_if_possible(restrictions[key])
for key in restr_keys)
return iter(self._indexes[restr_keys][end].get(vals, []))
def _add_index(self, restr_keys):
# Make sure it's a valid index.
for key in restr_keys:
if not hasattr(EdgeI, key):
raise ValueError, 'Bad restriction: %s' % key
# Create the index.
index = self._indexes[restr_keys] = tuple({} for x in self._positions())
# Add all existing edges to the index.
for end, edgelist in enumerate(self._edgelists):
this_index = index[end]
for edge in edgelist:
vals = tuple(self._get_type_if_possible(getattr(edge, key)())
for key in restr_keys)
this_index.setdefault(vals, []).append(edge)
def _register_with_indexes(self, edge):
end = edge.end()
for (restr_keys, index) in self._indexes.items():
vals = tuple(self._get_type_if_possible(getattr(edge, key)())
for key in restr_keys)
index[end].setdefault(vals, []).append(edge)
#////////////////////////////////////////////////////////////
# Incremental CFG Rules
#////////////////////////////////////////////////////////////
class CompleteFundamentalRule(SingleEdgeFundamentalRule):
def _apply_incomplete(self, chart, grammar, left_edge):
end = left_edge.end()
# When the chart is incremental, we only have to look for
# empty complete edges here.
for right_edge in chart.select(start=end, end=end,
is_complete=True,
lhs=left_edge.next()):
new_edge = left_edge.move_dot_forward(right_edge.end())
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
class CompleterRule(CompleteFundamentalRule):
_fundamental_rule = CompleteFundamentalRule()
def apply_iter(self, chart, grammar, edge):
if not isinstance(edge, LeafEdge):
for new_edge in self._fundamental_rule.apply_iter(chart, grammar, edge):
yield new_edge
class ScannerRule(CompleteFundamentalRule):
_fundamental_rule = CompleteFundamentalRule()
def apply_iter(self, chart, grammar, edge):
if isinstance(edge, LeafEdge):
for new_edge in self._fundamental_rule.apply_iter(chart, grammar, edge):
yield new_edge
class PredictorRule(CachedTopDownPredictRule):
pass
class FilteredCompleteFundamentalRule(FilteredSingleEdgeFundamentalRule):
def apply_iter(self, chart, grammar, edge):
# Since the Filtered rule only works for grammars without empty productions,
# we only have to bother with complete edges here.
if edge.is_complete():
for new_edge in self._apply_complete(chart, grammar, edge):
yield new_edge
#////////////////////////////////////////////////////////////
# Incremental FCFG Rules
#////////////////////////////////////////////////////////////
class FeatureCompleteFundamentalRule(FeatureSingleEdgeFundamentalRule):
def _apply_incomplete(self, chart, grammar, left_edge):
fr = self._fundamental_rule
end = left_edge.end()
# When the chart is incremental, we only have to look for
# empty complete edges here.
for right_edge in chart.select(start=end, end=end,
is_complete=True,
lhs=left_edge.next()):
for new_edge in fr.apply_iter(chart, grammar, left_edge, right_edge):
yield new_edge
class FeatureCompleterRule(CompleterRule):
_fundamental_rule = FeatureCompleteFundamentalRule()
class FeatureScannerRule(ScannerRule):
_fundamental_rule = FeatureCompleteFundamentalRule()
class FeaturePredictorRule(FeatureTopDownPredictRule):
pass
#////////////////////////////////////////////////////////////
# Incremental CFG Chart Parsers
#////////////////////////////////////////////////////////////
EARLEY_STRATEGY = [LeafInitRule(),
TopDownInitRule(),
CompleterRule(),
ScannerRule(),
PredictorRule()]
TD_INCREMENTAL_STRATEGY = [LeafInitRule(),
TopDownInitRule(),
CachedTopDownPredictRule(),
CompleteFundamentalRule()]
BU_INCREMENTAL_STRATEGY = [LeafInitRule(),
EmptyPredictRule(),
BottomUpPredictRule(),
CompleteFundamentalRule()]
BU_LC_INCREMENTAL_STRATEGY = [LeafInitRule(),
EmptyPredictRule(),
BottomUpPredictCombineRule(),
CompleteFundamentalRule()]
LC_INCREMENTAL_STRATEGY = [LeafInitRule(),
FilteredBottomUpPredictCombineRule(),
FilteredCompleteFundamentalRule()]
class IncrementalChartParser(ChartParser):
"""
An I{incremental} chart parser implementing Jay Earley's
parsing algorithm:
- For each index I{end} in [0, 1, ..., N]:
- For each I{edge} s.t. I{edge}.end = I{end}:
- If I{edge} is incomplete, and I{edge}.next is not a part
of speech:
- Apply PredictorRule to I{edge}
- If I{edge} is incomplete, and I{edge}.next is a part of
speech:
- Apply ScannerRule to I{edge}
- If I{edge} is complete:
- Apply CompleterRule to I{edge}
- Return any complete parses in the chart
"""
def __init__(self, grammar, strategy=BU_LC_INCREMENTAL_STRATEGY,
trace=0, trace_chart_width=50,
chart_class=IncrementalChart):
"""
Create a new Earley chart parser, that uses C{grammar} to
parse texts.
@type grammar: C{ContextFreeGrammar}
@param grammar: The grammar used to parse texts.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
@type trace_chart_width: C{int}
@param trace_chart_width: The default total width reserved for
the chart in trace output. The remainder of each line will
be used to display edges.
@param chart_class: The class that should be used to create
the charts used by this parser.
"""
self._grammar = grammar
self._trace = trace
self._trace_chart_width = trace_chart_width
self._chart_class = chart_class
self._axioms = []
self._inference_rules = []
for rule in strategy:
if rule.NUM_EDGES == 0:
self._axioms.append(rule)
elif rule.NUM_EDGES == 1:
self._inference_rules.append(rule)
else:
raise ValueError("Incremental inference rules must have "
"NUM_EDGES == 0 or 1")
def chart_parse(self, tokens, trace=None):
if trace is None: trace = self._trace
trace_new_edges = self._trace_new_edges
tokens = list(tokens)
self._grammar.check_coverage(tokens)
chart = self._chart_class(tokens)
grammar = self._grammar
# Width, for printing trace edges.
trace_edge_width = self._trace_chart_width / (chart.num_leaves() + 1)
if trace: print chart.pp_leaves(trace_edge_width)
for axiom in self._axioms:
new_edges = axiom.apply(chart, grammar)
trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width)
inference_rules = self._inference_rules
for end in range(chart.num_leaves()+1):
if trace > 1: print "\n* Processing queue:", end, "\n"
agenda = list(chart.select(end=end))
while agenda:
edge = agenda.pop()
for rule in inference_rules:
new_edges = rule.apply_iter(chart, grammar, edge)
if trace:
new_edges = list(new_edges)
trace_new_edges(chart, rule, new_edges, trace, trace_edge_width)
for new_edge in new_edges:
if new_edge.end()==end:
agenda.append(new_edge)
return chart
class EarleyChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, EARLEY_STRATEGY, **parser_args)
pass
class IncrementalTopDownChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, TD_INCREMENTAL_STRATEGY, **parser_args)
class IncrementalBottomUpChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, BU_INCREMENTAL_STRATEGY, **parser_args)
class IncrementalBottomUpLeftCornerChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, BU_LC_INCREMENTAL_STRATEGY, **parser_args)
class IncrementalLeftCornerChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
if not grammar.is_nonempty():
raise ValueError("IncrementalLeftCornerParser only works for grammars "
"without empty productions.")
IncrementalChartParser.__init__(self, grammar, LC_INCREMENTAL_STRATEGY, **parser_args)
#////////////////////////////////////////////////////////////
# Incremental FCFG Chart Parsers
#////////////////////////////////////////////////////////////
EARLEY_FEATURE_STRATEGY = [LeafInitRule(),
FeatureTopDownInitRule(),
FeatureCompleterRule(),
FeatureScannerRule(),
FeaturePredictorRule()]
TD_INCREMENTAL_FEATURE_STRATEGY = [LeafInitRule(),
FeatureTopDownInitRule(),
FeatureTopDownPredictRule(),
FeatureCompleteFundamentalRule()]
BU_INCREMENTAL_FEATURE_STRATEGY = [LeafInitRule(),
FeatureEmptyPredictRule(),
FeatureBottomUpPredictRule(),
FeatureCompleteFundamentalRule()]
BU_LC_INCREMENTAL_FEATURE_STRATEGY = [LeafInitRule(),
FeatureEmptyPredictRule(),
FeatureBottomUpPredictCombineRule(),
FeatureCompleteFundamentalRule()]
class FeatureIncrementalChartParser(IncrementalChartParser, FeatureChartParser):
def __init__(self, grammar,
strategy=BU_LC_INCREMENTAL_FEATURE_STRATEGY,
trace_chart_width=20,
chart_class=FeatureIncrementalChart,
**parser_args):
IncrementalChartParser.__init__(self, grammar,
strategy=strategy,
trace_chart_width=trace_chart_width,
chart_class=chart_class,
**parser_args)
class FeatureEarleyChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, EARLEY_FEATURE_STRATEGY, **parser_args)
class FeatureIncrementalTopDownChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, TD_INCREMENTAL_FEATURE_STRATEGY, **parser_args)
class FeatureIncrementalBottomUpChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, BU_INCREMENTAL_FEATURE_STRATEGY, **parser_args)
class FeatureIncrementalBottomUpLeftCornerChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, BU_LC_INCREMENTAL_FEATURE_STRATEGY, **parser_args)
#////////////////////////////////////////////////////////////
# Demonstration
#////////////////////////////////////////////////////////////
def demo(should_print_times=True, should_print_grammar=False,
should_print_trees=True, trace=2,
sent='I saw John with a dog with my cookie', numparses=5):
"""
A demonstration of the Earley parsers.
"""
import sys, time
# The grammar for ChartParser and SteppingChartParser:
grammar = nltk.parse.chart.demo_grammar()
if should_print_grammar:
print "* Grammar"
print grammar
# Tokenize the sample sentence.
print "* Sentence:"
print sent
tokens = sent.split()
print tokens
print
# Do the parsing.
earley = EarleyChartParser(grammar, trace=trace)
t = time.clock()
chart = earley.chart_parse(tokens)
parses = chart.parses(grammar.start())
t = time.clock()-t
# Print results.
if numparses:
assert len(parses)==numparses, 'Not all parses found'
if should_print_trees:
for tree in parses: print tree
else:
print "Nr trees:", len(parses)
if should_print_times:
print "Time:", t
if __name__ == '__main__': demo()
|
gpl-3.0
| 8,186,104,699,162,732,000
| 39.939597
| 112
| 0.575847
| false
| 4.234151
| false
| false
| false
|
sassoftware/rpath-product-definition
|
doc/example.py
|
1
|
5275
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Example code for interacting with rPath product definition xml files.
"""
from rpath_proddef import api1 as proddef
import sys
# This is an example of how this module would be used to generate the XML
# for the proddef source trove.
#
# This should produce an xml file equivalent to example.xml
baseFlavor = """
~MySQL-python.threadsafe, ~X, ~!alternatives, !bootstrap,
~builddocs, ~buildtests, !cross, ~desktop, ~!dietlibc, ~!dom0, ~!domU,
~emacs, ~gcj, ~gnome, ~grub.static, ~gtk, ~ipv6, ~kde, ~!kernel.debug,
~kernel.debugdata, ~!kernel.numa, ~kernel.smp, ~krb, ~ldap, ~nptl,
~!openssh.smartcard, ~!openssh.static_libcrypto, pam, ~pcre, ~perl,
~!pie, ~!postfix.mysql, ~python, ~qt, ~readline, ~!sasl, ~!selinux,
~sqlite.threadsafe, ssl, ~tcl, tcpwrappers, ~tk, ~uClibc, !vmware,
~!xen, ~!xfce, ~!xorg-x11.xprint
"""
productDescription = """
This here is my awesome appliance.
Is it not nifty?
Worship the appliance.
"""
productVersionDescription = """
Version 1.0 features "stability" and "usefulness", which is a
vast improvement over our pre-release code.
"""
prodDef = proddef.ProductDefinition()
prodDef.setProductName("My Awesome Appliance")
prodDef.setProductShortname("awesome")
prodDef.setProductDescription(productDescription)
prodDef.setProductVersion("1.0")
prodDef.setProductVersionDescription(productVersionDescription)
prodDef.setConaryRepositoryHostname("product.example.com")
prodDef.setConaryNamespace("exm")
prodDef.setImageGroup("group-awesome-dist")
prodDef.setBaseFlavor(baseFlavor)
# Don't use addPromoteMap unless you know what you're doing; see
# https://issues.rpath.com/browse/RPCL-17 for more information on
# how to use them. These maps cause packages in devel groups to
# be flattened into the main label on promote to QA and promotes
# from example to be flattened into an alternate label.
prodDef.addStage(name='devel', labelSuffix='-devel',
promoteMaps = [('contrib', 'contrib.rpath.org@rpl:2'),
('other', 'example.rpath.org@rpl:2')])
prodDef.addStage(name='qa', labelSuffix='-qa',
promoteMaps = [('contrib', '/product.example.com@exm:group-awesome-dist-1-qa'),
('other', '/product.example.com@exm:other-1-qa') ])
prodDef.addStage(name='release', labelSuffix='',
promoteMaps = [('other', '/product.example.com@exm:other-1')])
prodDef.addSearchPath(troveName='group-rap-standard',
label='rap.rpath.com@rpath:linux-1')
prodDef.addSearchPath(troveName='group-postgres',
label='products.rpath.com@rpath:postgres-8.2')
prodDef.addFactorySource(troveName='group-factories',
label='products.rpath.com@rpath:factories-1')
prodDef.addBuildDefinition(name='x86 Installable ISO Build',
baseFlavor='is: x86',
imageType=prodDef.imageType('installableIsoImage'),
stages = ['devel', 'qa', 'release'])
prodDef.addBuildDefinition(name='x86-64 Installable ISO Build',
baseFlavor='is: x86 x86_64',
imageType=prodDef.imageType('installableIsoImage'),
stages = ['devel', 'qa', 'release'])
prodDef.addBuildDefinition(name='x86 Citrix Xenserver Virtual Appliance',
baseFlavor='~xen, ~domU is: x86',
imageType=prodDef.imageType('xenOvaImage'),
stages = ['devel', 'qa', 'release'])
prodDef.addBuildDefinition(name='Another Xen Build',
baseFlavor='~xen, ~domU is: x86',
imageType=prodDef.imageType('rawHdImage',
dict(autoResolve="true",
baseFileName="/poo/moo/foo")),
stages = ['devel', 'qa', 'release'])
prodDef.addBuildDefinition(name='VMWare build',
baseFlavor='~vmware is: x86 x86_64',
imageType=prodDef.imageType('vmwareImage',
dict(autoResolve="true",
baseFileName="foobar")),
stages = ['devel', 'qa'])
prodDef.addBuildDefinition(name='Totally VMware optional build from a different group',
baseFlavor='~vmware is: x86 x86_64',
imageGroup='group-foo-dist',
imageType=prodDef.imageType('vmwareImage'))
# Don't use addSecondaryLabel unless you know what you're doing
prodDef.addSecondaryLabel('Xen', '-xen')
prodDef.addSecondaryLabel('VMware', 'my@label:vmware')
prodDef.serialize(sys.stdout)
sys.stdout.flush()
sys.exit(0)
|
apache-2.0
| 2,042,442,910,359,023,400
| 42.595041
| 87
| 0.647583
| false
| 3.670842
| false
| false
| false
|
janmtl/drift_qec
|
drift_qec/A_old.py
|
1
|
4165
|
"""
Exposes the 5 parameter unital channel.
"""
import numpy as np
import scipy as sp
from scipy.linalg import polar
PDIAG = np.zeros((9, 9))
for esi in np.eye(3):
one = np.kron(esi, esi)
PDIAG = PDIAG + np.outer(one, one)
PDIAG = PDIAG.astype(np.int)
FIXEDQ = np.array([[-0.1911, 0.3136, -0.9301],
[-0.8547, 0.4128, 0.3148],
[ 0.4826, 0.8551, 0.1891]])
def o(Q, D):
return np.dot(np.dot(Q, D), Q.T)
def Ls(d1=0.1, d2=0.1, d3=0.1):
L1 = np.array([[np.cos(d1), -np.sin(d1), 0],
[np.sin(d1), np.cos(d1), 0],
[0, 0, 1]])
L2 = np.array([[np.cos(d2), 0, -np.sin(d2)],
[0, 1, 0],
[np.sin(d2), 0, np.cos(d2)]])
L3 = np.array([[1, 0, 0],
[0, np.cos(d3), -np.sin(d3)],
[0, np.sin(d3), np.cos(d3)]])
return L1, L2, L3
def SENSOR(d1=0.1, d2=0.1, d3=0.1):
L1, L2, L3 = Ls(d1, d2, d3)
LL1 = np.dot(PDIAG, np.kron(L1, L1))
LL2 = np.dot(PDIAG, np.kron(L2, L2))
LL3 = np.dot(PDIAG, np.kron(L3, L3))
SENSOR = np.r_[LL1[[0, 4, 8], :], LL2[[0, 4, 8], :], LL3[[0, 4, 8], :]]
return SENSOR
class Channel(object):
def __init__(self, kx, ky, kz, **kwargs):
# Ground truth variables
self.kx, self.ky, self.kz = kx, ky, kz
self.n = kwargs.get("n", 1e6)
self.Q = kwargs.get("Q", np.eye(3))
self.C = np.dot(np.dot(self.Q,
np.diag([self.kx, self.ky, self.kz])),
self.Q.T)
self.Q = np.linalg.svd(self.C)[0]
# Sensor parameters
self.d1 = kwargs.get("d1", 0.01)
self.d2 = kwargs.get("d2", 0.01)
self.d3 = kwargs.get("d3", 0.01)
# Estimators
self.at = np.zeros(9)
self.Vt = np.zeros((9, 9))
self.Qc = np.linalg.qr(np.random.randn(3, 3))[0]
self.M = np.zeros((3, 3))
self.cycle = 1
def sample_data(self):
QcQc = np.kron(self.Qc, self.Qc)
cvec = np.dot(QcQc, np.reshape(self.C, (9,)))
rates = np.dot(SENSOR(self.d1, self.d2, self.d3), cvec)
# Get samples for each L_i
D1 = np.random.multinomial(self.n, rates[0:3]) / float(self.n)
D2 = np.random.multinomial(self.n, rates[3:6]) / float(self.n)
D3 = np.random.multinomial(self.n, rates[6:9]) / float(self.n)
data = np.r_[D1, D2, D3]
return data
def update(self):
# Get new data at this effective orientation
x = self.sample_data()
# Recover the vectorized process matrix and its covariance through a
# linear inversion
a, Sa = self.recover_a(x)
# Update the running mean of the covariance matrix and of the linear
# inversion channel estimate
self.Vt = self.Vt + np.linalg.pinv(Sa)
self.at = np.dot(np.linalg.pinv(self.Vt),
self.at + np.dot(np.linalg.pinv(Sa), a))
# Recover the physical process matrix from the linear inversion
A = np.reshape(self.at, (3, 3))
self.M = self.recoverM(A)
# Get the estimated channel Pauli-basis
self.Qc = np.linalg.svd(self.M)[0]
# Update the process matrices
self.cycle = self.cycle + 1
def recover_a(self, x):
# Initiate the sensor and basis matrices
L = SENSOR(self.d1, self.d2, self.d3)
Linv = np.linalg.pinv(L)
QcQc = np.kron(self.Qc, self.Qc)
# Calculate the data covariance
Sx = sp.linalg.block_diag(
1.0 / self.n * np.outer(x[0:3], x[0:3]),
1.0 / self.n * np.outer(x[3:6], x[3:6]),
1.0 / self.n * np.outer(x[6:9], x[6:9])
)
Sx[np.diag_indices(9)] = 1.0 / self.n * x * (1.0 - x)
# Perform the linear inversion and transform to the standard basis
ac = np.dot(Linv, x)
Sac = o(Linv, Sx)
a = np.dot(QcQc.T, ac)
Sa = o(QcQc.T, Sac)
return a, Sa
@staticmethod
def recoverM(A):
B = 0.5 * (A + A.T)
H = polar(B)[1]
M = 0.5 * (B+H)
M = M / np.trace(M)
return M
|
isc
| 7,062,491,971,050,405,000
| 29.625
| 76
| 0.507803
| false
| 2.654557
| false
| false
| false
|
IECS/MansOS
|
tools/IDE/src/upload_module.py
|
1
|
5090
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2012 the MansOS team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import wx
from newMote import NewMote
from motelist import Motelist
from Translater import localize
class UploadModule(wx.Panel):
def __init__(self, parent, API):
super(UploadModule, self).__init__(parent = parent)
self.API = API
self.editorManager = self.API.tabManager.GetCurrentPage()
self.filename = self.editorManager.fileName
Motelist.addUpdateCallback(self.updateMotelist)
self.tmpDir = self.API.path + '/temp/'
self.haveMote = False
self.platform = "telosb"
self.moteOrder = list()
# this is path from /mansos/tools/IDE
self.pathToMansos = self.API.path + "/../.."
self.motes = []
self.main = wx.BoxSizer(wx.VERTICAL)
self.controls = wx.GridBagSizer(10, 10)
#self.source = wx.ComboBox(self, choices = ["USB", "Shell"])
#self.source.SetValue("USB")
self.upload = wx.Button(self, label = localize("Upload"))
self.platforms = wx.ComboBox(self, choices = self.API.getPlatforms())
self.refresh = wx.Button(self, label = localize("Refresh"))
self.compile = wx.Button(self, label = localize("Compile"))
self.newMote = wx.Button(self, label = localize("Add mote"))
self.platforms.SetValue(self.API.getActivePlatform())
if self.API.platformOnly != None:
self.platforms.Enable(False)
self.controls.Add(self.compile, (0, 0), flag = wx.EXPAND | wx.ALL)
self.controls.Add(self.platforms, (0, 1), flag = wx.EXPAND | wx.ALL)
self.controls.Add(self.upload, (0, 2), span = (2, 2),
flag = wx.EXPAND | wx.ALL)
#self.controls.Add(self.source, (1, 1), flag = wx.EXPAND | wx.ALL)
self.controls.Add(self.newMote, (1, 1), flag = wx.EXPAND | wx.ALL)
self.controls.Add(self.refresh, (1, 0), flag = wx.EXPAND | wx.ALL)
self.list = wx.CheckListBox(self, wx.ID_ANY, style = wx.MULTIPLE)
self.main.Add(self.controls, 0, wx.EXPAND | wx.ALL, 3);
self.main.Add(self.list, 0, wx.EXPAND | wx.ALL, 3);
self.Bind(wx.EVT_BUTTON, self.API.doCompile, self.compile)
self.Bind(wx.EVT_BUTTON, self.API.doUpload, self.upload)
self.Bind(wx.EVT_BUTTON, self.updateMotelist, self.refresh)
#self.Bind(wx.EVT_COMBOBOX, self.populateMotelist, self.source)
self.Bind(wx.EVT_BUTTON, self.openNewMoteDialog, self.newMote)
self.Bind(wx.EVT_COMBOBOX, self.API.changePlatform, self.platforms)
self.Bind(wx.EVT_CHECKLISTBOX, self.modifyTargets, self.list)
self.SetSizerAndFit(self.main)
self.SetAutoLayout(1)
self.Show()
self.updateMotelist()
def __del__(self):
Motelist.removeUpdateCallback(self.updateMotelist)
def updateMotelist(self, event = None):
old = self.list.GetCheckedStrings()
pos = 0
self.list.Clear()
for mote in Motelist.getMotelist(False):
self.list.Enable(True)
self.list.Insert(mote.getNiceName(), pos)
if mote.getNiceName() in old:
self.list.Check(pos)
pos += 1
if self.list.GetCount() == 0:
self.list.Enable(False)
self.list.Insert(localize("No devices found!"), 0)
def modifyTargets(self, event):
temp = list()
for target in self.list.GetCheckedStrings():
if target.count("(") != 0:
temp.append(target.split("(")[1].split(")")[0])
self.API.targets = temp
def openNewMoteDialog(self, event):
dialog = NewMote(self, self.API)
dialog.ShowModal()
dialog.Destroy()
self.updateMotelist()
|
mit
| 1,440,115,819,273,884,700
| 39.07874
| 77
| 0.654813
| false
| 3.669791
| false
| false
| false
|
ajing/clusterVis
|
LigandsPlot.py
|
1
|
2385
|
"""
Display a list of ligand structures in file
"""
import os
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from TreeParser import *
from NodeReference import *
IMAGE_DIR = "./Image"
def ReturnFileDir(ligandname):
return os.path.join(IMAGE_DIR, ligandname)
def IndexOfLigands():
infile = "tmp"
liganddict = dict()
for line in open(infile):
content = line.strip().split(" : ")
liganddict[content[1]] = content[0]
print liganddict
return liganddict
def PlotLigandStructures(ligands, nodesize):
N = len(ligands)
col_num = 3
row_num = N/col_num + 1
liganddict = LigandDict()
plt.figure(figsize = (40,40))
## This is for getting numbering mapping for ligand name, can be deleted later
index_dict = IndexOfLigands()
########################
for i in range(N):
plt.subplot(row_num, col_num, i + 1)
a_ligand = ligands[i]
proteinname = liganddict.GetProteinName(a_ligand)
liganddir = ReturnFileDir(a_ligand)
img=mpimg.imread(liganddir)
imgplot = plt.imshow(img)
plt.title(a_ligand + "," + index_dict[a_ligand] + "\n" + proteinname + "," + str(nodesize[i]), fontsize=35)
plt.axis('off')
plt.savefig( "./Data/" + str(ligands[0]) + ".pdf", format = 'pdf')
plt.show()
if __name__ == "__main__":
IndexOfLigands()
tree_file = "./Data/all_0.9.gv"
#ligandname = "ASD01911150" # 40
#ligandname = "ASD01910794" # 47
#ligandname = "ASD01910452" # 14
#ligandname = "CHEMBL106917" # 60
#ligandname = "ASD03540047" # 32
ligandname = "CHEMBL347077" # 0
#ligandname = "CHEMBL566469" # 29
#ligandname = "ASD01150884" # 43
#ligandname = "ASD02900007" # 49 this ligand branch is kind of mixture of everything
#ligandname = "ASD01410309" # 5
#ligandname = "ASD03720006" # 42 mixed with different receptors
#ligandname = "ASD01410309" # 42
#ligandname = "ASD00170564" # 54
#ligandname = "ASD01150113" # 21
#ligandname = "ASD01120069" # 4
#ligandname = "ASD01120153" # 59
#ligandname = "ASD03910042" # 26
#ligandname = "CHEMBL596211" # 16
#ligandname = "ASD03090737" # 37
ligandlist, node_size = GetBranchLargeCluster(ligandname, tree_file)
PlotLigandStructures(ligandlist, node_size)
print ligandlist
print node_size
|
apache-2.0
| 2,718,396,933,683,163,000
| 32.591549
| 115
| 0.630608
| false
| 2.849462
| false
| false
| false
|
CiscoSystems/nova-solver-scheduler
|
nova/tests/scheduler/test_solver_scheduler_host_manager.py
|
1
|
7720
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For SolverSchedulerHostManager
"""
from nova.openstack.common import timeutils
from nova.scheduler import solver_scheduler_host_manager as host_manager
from nova import test
from nova.tests.scheduler import solver_scheduler_fakes as fakes
class SolverSchedulerHostManagerTestCase(test.NoDBTestCase):
"""Test case for HostManager class."""
def setUp(self):
super(SolverSchedulerHostManagerTestCase, self).setUp()
self.host_manager = host_manager.SolverSchedulerHostManager()
self.fake_hosts = [host_manager.SolverSchedulerHostState(
'fake_host%s' % x, 'fake-node') for x in xrange(1, 5)]
self.fake_hosts += [host_manager.SolverSchedulerHostState(
'fake_multihost', 'fake-node%s' % x) for x in xrange(1, 5)]
self.addCleanup(timeutils.clear_time_override)
def _verify_result(self, info, result):
self.assertEqual(set(info['expected_objs']), set(result))
def test_get_hosts_with_ignore(self):
fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
'fake_host5', 'fake_multihost']}
# [1] and [3] are host2 and host4
info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
'expected_fprops': fake_properties}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_force(self):
fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
'fake_host5']}
# [0] and [2] are host1 and host3
info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_no_matching_force_hosts(self):
fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
info = {'expected_objs': [],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_ignore_and_force_hosts(self):
# Ensure ignore_hosts processed before force_hosts in host filters.
fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
'ignore_hosts': ['fake_host1']}
# only fake_host3 should be left.
info = {'expected_objs': [self.fake_hosts[2]],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_force_host_and_many_nodes(self):
# Ensure all nodes returned for a host with many nodes
fake_properties = {'force_hosts': ['fake_multihost']}
info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
self.fake_hosts[6], self.fake_hosts[7]],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_force_nodes(self):
fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
'fake-node9']}
# [5] is fake-node2, [7] is fake-node4
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_force_hosts_and_nodes(self):
# Ensure only overlapping results if both force host and node
fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
'force_nodes': ['fake-node2', 'fake-node9']}
# [5] is fake-node2
info = {'expected_objs': [self.fake_hosts[5]],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_force_hosts_and_wrong_nodes(self):
# Ensure non-overlapping force_node and force_host yield no result
fake_properties = {'force_hosts': ['fake_multihost'],
'force_nodes': ['fake-node']}
info = {'expected_objs': [],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_ignore_hosts_and_force_nodes(self):
# Ensure ignore_hosts can coexist with force_nodes
fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
'ignore_hosts': ['fake_host1', 'fake_host2']}
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
def test_get_hosts_with_ignore_hosts_and_force_same_nodes(self):
# Ensure ignore_hosts is processed before force_nodes
fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
'ignore_hosts': ['fake_multihost']}
info = {'expected_objs': [],
'expected_fprops': fake_properties,
'got_fprops': []}
result = self.host_manager.get_hosts_stripping_ignored_and_forced(
self.fake_hosts, fake_properties)
self._verify_result(info, result)
class SolverSchedulerHostManagerChangedNodesTestCase(test.NoDBTestCase):
"""Test case for HostManager class."""
# reserved for future uses
pass
class SolverSchedulerHostStateTestCase(test.NoDBTestCase):
"""Test case for SolverSchedulerHostState class."""
# reserved for future uses
pass
|
apache-2.0
| -4,972,781,149,865,470,000
| 41.888889
| 78
| 0.587565
| false
| 3.977331
| true
| false
| false
|
waaaaargh/katzenblog
|
katzenblog/model.py
|
1
|
2234
|
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from katzenblog import db
from katzenblog.util import slugify
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String)
passwordhash = db.Column(db.String)
email = db.Column(db.String)
screenname = db.Column(db.String)
bio = db.Column(db.String)
def __init__(self, username, email, password, screenname, bio):
self.username = username
self.email = email
self.screenname = screenname
self.bio = bio
self.passwordhash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.passwordhash, password)
def set_password(self, password):
self.passwordhash = generate_password_hash(password)
class Category(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
def __init__(self, name):
self.name = name
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String)
text = db.Column(db.String)
published = db.Column(db.Boolean)
slug = db.Column(db.String)
create_time = db.Column(db.DateTime)
last_edit_time = db.Column(db.DateTime)
owner = db.relationship('User', backref=db.backref('posts',
lazy='dynamic'))
owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))
category = db.relationship('Category', backref=db.backref('posts',
lazy='dynamic'))
category_id = db.Column(db.Integer, db.ForeignKey('category.id'))
def __init__(self, title, text, owner):
self.title = title
self.text = text
self.owner = owner
self.slug = slugify(title)
self.create_time = datetime.now()
self.last_edit_time = datetime.now()
def edit(self, title, text):
self.title = title
self.text = text
self.slug = slugify(title)
self.last_edit_time = datetime.now()
|
gpl-3.0
| 453,065,405,940,258,600
| 30.464789
| 78
| 0.601164
| false
| 3.851724
| false
| false
| false
|
Kwentar/ImageDownloader
|
Internet.py
|
1
|
4529
|
import os
import shutil
from threading import Thread
import urllib
import requests
from tqdm import tqdm
class Internet:
@staticmethod
def write_to_failed_image_urls_file(file_name, image_url, failed_image_urls_file):
"""
Check image in file and write it if need
:param file_name: image file name
:param image_url: image URL
:param failed_image_urls_file: name of file with fails
:return: None
"""
with open(failed_image_urls_file, 'a+') as need_reload:
need_reload.seek(0)
lines = need_reload.readlines()
founded = False
for line in lines:
if line.startswith(image_url):
print('File is here')
founded = True
break
if not founded:
need_reload.write(image_url + "," + file_name + '\n')
@staticmethod
def write_response_to_file(response, file_name):
with open(file_name, 'wb') as f:
for chunk in response.iter_content(chunk_size=2048):
f.write(chunk)
@staticmethod
def load_image_chunk(image_url, file_name, dir_):
"""
Loading image by URL
:param image_url: URL of image
:param file_name: destination file name
:param dir_: destination directory
:return: None
"""
r = requests.get(image_url, stream=True)
if r.status_code == requests.codes.ok:
try:
Internet.write_response_to_file(r, file_name)
except OSError as err_:
print(err_.__str__(), 'try redownload...')
index = 0
while True:
file_name = os.path.join(dir_, index.__str__() + '.jpg')
if not os.path.exists(file_name):
break
index += 1
Internet.write_response_to_file(r, file_name)
else:
print(r)
@staticmethod
def load_image2(image, file_name, need_reload_file):
r = requests.get(image, stream=True)
if r.status_code == 200:
with open(file_name, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
else:
print(r)
@staticmethod
def load_image(image, file_name, need_reload_file):
try:
if os.path.exists(file_name):
print("file ", file_name, " exist now")
else:
urllib.request.urlretrieve(image, file_name)
print("".join(['downloaded ', image]))
except urllib.error.ContentTooShortError as err_:
print("".join(['ERROR ', err_.__str__()]))
if need_reload_file is not None:
Internet.write_to_failed_image_urls_file(file_name, image, need_reload_file)
except urllib.error.URLError as err_:
print("".join(['ERROR ', err_.__str__()]))
if need_reload_file is not None:
Internet.write_to_failed_image_urls_file(file_name, image, need_reload_file)
@staticmethod
def load_images(image_url_list, dir_, failed_image_urls_file, number, delay=5):
"""
Loading list of images
:param number: current number of user from all amount of users
:param image_url_list: list of image urls
:param dir_: destination dir
:param failed_image_urls_file: name of file with unsuccessful urls
:param delay: delay for thread
:return:None
"""
abs_failed_image_urls_file = os.path.join(dir_, failed_image_urls_file)
if not os.path.exists(abs_failed_image_urls_file):
with open(abs_failed_image_urls_file, 'w') as _:
pass
for index, image in tqdm(enumerate(image_url_list), total=len(image_url_list), desc=str(number)):
f = os.path.join(dir_, image.split('/')[-1])
if os.path.exists(f):
print("file ", f, " exist now")
else:
# print('downloading {}: {}...'.format(index, f))
t = Thread(target=Internet.load_image_chunk, args=(image, f, dir_))
t.start()
t.join(delay)
if t.isAlive():
print('Bad, bad thread!')
if abs_failed_image_urls_file is not None:
Internet.write_to_failed_image_urls_file(f, image, abs_failed_image_urls_file)
|
mit
| -2,301,983,070,617,782,500
| 38.043103
| 105
| 0.539633
| false
| 4.029359
| false
| false
| false
|
annahs/atmos_research
|
util_migrate_sqlite_table_to_mysql.py
|
1
|
2193
|
import sys
import os
import numpy as np
import sqlite3
import mysql.connector
#connect to sqlite database
conn = sqlite3.connect('C:/projects/dbs/SP2_data.db')
c = conn.cursor()
#connect to mysql database
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
add_stats = ('INSERT INTO polar6_coating_2015'
'(sp2b_file,file_index,instrument,instrument_locn,particle_type,particle_dia,UNIX_UTC_ts,actual_scat_amp,actual_peak_posn,actual_zero_x_posn,FF_scat_amp,FF_peak_posn,FF_gauss_width,incand_amp,LF_scat_amp,LF_baseline_pct_diff,rBC_mass_fg,coat_thickness_nm)'
'VALUES (%(sp2b_file)s,%(file_index)s,%(instr)s,%(instr_locn)s,%(particle_type)s,%(particle_dia)s,%(unix_ts_utc)s,%(actual_scat_amp)s,%(actual_peak_pos)s,%(zero_crossing_posn)s,%(FF_scat_amp)s,%(FF_peak_pos)s,%(FF_gauss_width)s,%(incand_amp)s,%(LF_scat_amp)s,%(LF_baseline_pct_diff)s,%(rBC_mass_fg)s,%(coat_thickness_nm)s)')
errs =0
instrument = 'UBCSP2'
instrument_locn = 'POLAR6'
for row in c.execute('''SELECT
sp2b_file,
file_index,
instr,
instr_locn,
particle_type,
particle_dia,
unix_ts_utc,
actual_scat_amp,
actual_peak_pos,
zero_crossing_posn,
FF_scat_amp,
FF_peak_pos,
FF_gauss_width,
incand_amp,
LF_scat_amp,
LF_baseline_pct_diff,
rBC_mass_fg,
coat_thickness_nm
FROM SP2_coating_analysis WHERE instr = ? and instr_locn=?
ORDER BY unix_ts_utc''',
(instrument, instrument_locn)):
stats = {
'sp2b_file': row[0],
'file_index': row[1],
'instr' : row[2],
'instr_locn': row[3],
'particle_type': row[4],
'particle_dia': row[5],
'unix_ts_utc': row[6],
'actual_scat_amp': row[7],
'actual_peak_pos': row[8],
'zero_crossing_posn': row[9],
'FF_scat_amp': row[10],
'FF_peak_pos': row[11],
'FF_gauss_width': row[12],
'incand_amp': row[13],
'LF_scat_amp': row[14],
'LF_baseline_pct_diff': row[15],
'rBC_mass_fg': row[16],
'coat_thickness_nm': row[17],
}
try:
cursor.execute(add_stats, stats)
except mysql.connector.Error as err:
print("Something went wrong: {}".format(err))
errs += 1
cnx.commit()
print 'errors', errs
conn.close()
cnx.close()
|
mit
| 8,911,286,735,066,189,000
| 25.433735
| 338
| 0.670315
| false
| 2.323093
| false
| false
| false
|
yeraydiazdiaz/nonrel-blog
|
django_mongodb_engine/base.py
|
1
|
8661
|
import copy
import datetime
import decimal
import sys
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.signals import connection_created
from django.db.utils import DatabaseError
from pymongo.collection import Collection
from pymongo.connection import Connection
# handle pymongo backward compatibility
try:
from bson.objectid import ObjectId
from bson.errors import InvalidId
except ImportError:
from pymongo.objectid import ObjectId, InvalidId
from djangotoolbox.db.base import (
NonrelDatabaseClient,
NonrelDatabaseFeatures,
NonrelDatabaseIntrospection,
NonrelDatabaseOperations,
NonrelDatabaseValidation,
NonrelDatabaseWrapper
)
from djangotoolbox.db.utils import decimal_to_string
from .creation import DatabaseCreation
from .utils import CollectionDebugWrapper
class DatabaseFeatures(NonrelDatabaseFeatures):
supports_microsecond_precision = False
supports_long_model_names = False
class DatabaseOperations(NonrelDatabaseOperations):
compiler_module = __name__.rsplit('.', 1)[0] + '.compiler'
def max_name_length(self):
return 254
def check_aggregate_support(self, aggregate):
import aggregations
try:
getattr(aggregations, aggregate.__class__.__name__)
except AttributeError:
raise NotImplementedError("django-mongodb-engine doesn't support "
"%r aggregates." % type(aggregate))
def sql_flush(self, style, tables, sequence_list, allow_cascade=False):
"""
Returns a list of SQL statements that have to be executed to
drop all `tables`. No SQL in MongoDB, so just clear all tables
here and return an empty list.
"""
for table in tables:
if table.startswith('system.'):
# Do not try to drop system collections.
continue
self.connection.database[table].remove()
return []
def validate_autopk_value(self, value):
"""
Mongo uses ObjectId-based AutoFields.
"""
if value is None:
return None
return unicode(value)
def _value_for_db(self, value, field, field_kind, db_type, lookup):
"""
Allows parent to handle nonrel fields, convert AutoField
keys to ObjectIds and date and times to datetimes.
Let everything else pass to PyMongo -- when the value is used
the driver will raise an exception if it got anything
unacceptable.
"""
if value is None:
return None
# Parent can handle iterable fields and Django wrappers.
value = super(DatabaseOperations, self)._value_for_db(
value, field, field_kind, db_type, lookup)
# Convert decimals to strings preserving order.
if field_kind == 'DecimalField':
value = decimal_to_string(
value, field.max_digits, field.decimal_places)
# Anything with the "key" db_type is converted to an ObjectId.
if db_type == 'key':
try:
return ObjectId(value)
# Provide a better message for invalid IDs.
except (TypeError, InvalidId):
if isinstance(value, (str, unicode)) and len(value) > 13:
value = value[:10] + '...'
msg = "AutoField (default primary key) values must be " \
"strings representing an ObjectId on MongoDB (got " \
"%r instead)." % value
if field.model._meta.db_table == 'django_site':
# Also provide some useful tips for (very common) issues
# with settings.SITE_ID.
msg += " Please make sure your SITE_ID contains a " \
"valid ObjectId string."
raise DatabaseError(msg)
# PyMongo can only process datatimes?
elif db_type == 'date':
return datetime.datetime(value.year, value.month, value.day)
elif db_type == 'time':
return datetime.datetime(1, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
return value
def _value_from_db(self, value, field, field_kind, db_type):
"""
Deconverts keys, dates and times (also in collections).
"""
# It is *crucial* that this is written as a direct check --
# when value is an instance of serializer.LazyModelInstance
# calling its __eq__ method does a database query.
if value is None:
return None
# All keys have been turned into ObjectIds.
if db_type == 'key':
value = unicode(value)
# We've converted dates and times to datetimes.
elif db_type == 'date':
value = datetime.date(value.year, value.month, value.day)
elif db_type == 'time':
value = datetime.time(value.hour, value.minute, value.second,
value.microsecond)
# Revert the decimal-to-string encoding.
if field_kind == 'DecimalField':
value = decimal.Decimal(value)
return super(DatabaseOperations, self)._value_from_db(
value, field, field_kind, db_type)
class DatabaseClient(NonrelDatabaseClient):
pass
class DatabaseValidation(NonrelDatabaseValidation):
pass
class DatabaseIntrospection(NonrelDatabaseIntrospection):
def table_names(self, cursor=None):
return self.connection.database.collection_names()
def sequence_list(self):
# Only required for backends that use integer primary keys.
pass
class DatabaseWrapper(NonrelDatabaseWrapper):
"""
Public API: connection, database, get_collection.
"""
def __init__(self, *args, **kwargs):
self.collection_class = kwargs.pop('collection_class', Collection)
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
self.connected = False
del self.connection
def get_collection(self, name, **kwargs):
if (kwargs.pop('existing', False) and
name not in self.connection.database.collection_names()):
return None
collection = self.collection_class(self.database, name, **kwargs)
if settings.DEBUG:
collection = CollectionDebugWrapper(collection, self.alias)
return collection
def __getattr__(self, attr):
if attr in ['connection', 'database']:
assert not self.connected
self._connect()
return getattr(self, attr)
raise AttributeError(attr)
def _connect(self):
settings = copy.deepcopy(self.settings_dict)
def pop(name, default=None):
return settings.pop(name) or default
db_name = pop('NAME')
host = pop('HOST')
port = pop('PORT')
user = pop('USER')
password = pop('PASSWORD')
options = pop('OPTIONS', {})
self.operation_flags = options.pop('OPERATIONS', {})
if not any(k in ['save', 'delete', 'update']
for k in self.operation_flags):
# Flags apply to all operations.
flags = self.operation_flags
self.operation_flags = {'save': flags, 'delete': flags,
'update': flags}
# Lower-case all OPTIONS keys.
for key in options.iterkeys():
options[key.lower()] = options.pop(key)
try:
self.connection = Connection(host=host, port=port, **options)
self.database = self.connection[db_name]
except TypeError:
exc_info = sys.exc_info()
raise ImproperlyConfigured, exc_info[1], exc_info[2]
if user and password:
if not self.database.authenticate(user, password):
raise ImproperlyConfigured("Invalid username or password.")
self.connected = True
connection_created.send(sender=self.__class__, connection=self)
def _reconnect(self):
if self.connected:
del self.connection
del self.database
self.connected = False
self._connect()
def _commit(self):
pass
def _rollback(self):
pass
def close(self):
pass
|
bsd-3-clause
| -4,339,837,880,499,807,000
| 32.700389
| 78
| 0.608013
| false
| 4.614278
| false
| false
| false
|
jmesteve/saas3
|
openerp/addons_extra/l10n_es_payment_order/remesas.py
|
1
|
6715
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2006 ACYSOS S.L.. (http://acysos.com) All Rights Reserved.
# Pedro Tarrafeta <pedro@acysos.com>
#
# Corregido para instalación TinyERP estándar 4.2.0: Zikzakmedia S.L. 2008
# Jordi Esteve <jesteve@zikzakmedia.com>
#
# Añadidas cuentas de remesas y tipos de pago. 2008
# Pablo Rocandio <salbet@gmail.com>
#
# Corregido para instalación OpenERP 5.0.0 sobre account_payment_extension: Zikzakmedia S.L. 2009
# Jordi Esteve <jesteve@zikzakmedia.com>
#
# Adaptación para instalación OpenERP 6.0.0 sobre account_payment_extension: Zikzakmedia S.L. 2010
# Jordi Esteve <jesteve@zikzakmedia.com>
#
# Añadidos conceptos extras del CSB 19: Acysos S.L. 2011
# Ignacio Ibeas <ignacio@acysos.com>
#
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
class payment_mode(osv.osv):
_name= 'payment.mode'
_inherit = 'payment.mode'
def onchange_partner(self, cr, uid, ids, partner_id):
if partner_id:
#pool = self.pooler.get_pool(cr.dbname)
obj = self.pool.get('res.partner')
field = ['name']
ids = [partner_id]
filas = obj.read(cr, uid, ids, field)
return {'value':{'nombre': filas[0]["name"][:40]}}
return {'value':{'nombre': ""}}
_columns = {
'tipo': fields.selection([('none','None'),('csb_19','CSB 19'),('csb_32','CSB 32'),('csb_34','CSB 34'),('34_01','CSB 34-01'),('csb_58','CSB 58')], 'Type of payment file', size=6, select=True, required=True),
'sufijo': fields.char('suffix',size=3, select=True),
'partner_id': fields.many2one('res.partner', 'Partner', select=True),
'nombre': fields.char('Company name in file', size=40),
'cif': fields.related('partner_id','vat', type='char', string='VAT code', select=True),
# Código INE (9 dígitos)
'ine': fields.char('INE code',size=9),
'cedente': fields.char('Cedente', size=15),
# Incluir registro obligatorio de domicilio (para no domiciliados)
'inc_domicile': fields.boolean('Include domicile', help='Add partner domicile records to the exported file (CSB 58)'),
# Usar formato alternativo para el registro de domicilio
'alt_domicile_format': fields.boolean('Alt. domicile format', help='Alternative domicile record format'),
# Require bank account?
'require_bank_account': fields.boolean('Require bank account', help='If your bank allows you to send orders without the bank account info, you may disable this option'),
'csb34_type': fields.selection([('transfer', 'Transfer'),('promissory_note', 'Promissory Note'),('cheques', 'Cheques'),('certified_payments', 'Certified Payments')], 'Type of CSB 34 payment'),
'text1': fields.char('Line 1', size=36, help='Enter text and/or select a field of the invoice to include as a description in the letter. The possible values are: ${amount}, ${communication}, {communication2}, {date}, {ml_maturity_date}, {create_date}, {ml_date_created}'),
'text2': fields.char('Line 2', size=36, help='Enter text and/or select a field of the invoice to include as a description in the letter. The possible values are: ${amount}, ${communication}, {communication2}, {date}, {ml_maturity_date}, {create_date}, {ml_date_created}'),
'text3': fields.char('Line 3', size=36, help='Enter text and/or select a field of the invoice to include as a description in the letter. The possible values are: ${amount}, ${communication}, {communication2}, {date}, {ml_maturity_date}, {create_date}, {ml_date_created}'),
'payroll_check': fields.boolean('Payroll Check', help='Check it if you want to add the 018 data type in the file (the vat of the recipient is added in the 018 data type).'),
'add_date': fields.boolean('Add Date', help='Check it if you want to add the 910 data type in the file to include the payment date.'),
'send_type':fields.selection([
('mail','Ordinary Mail'),
('certified_mail','Certified Mail'),
('other','Other'),
],'Send Type', help="The sending type of the payment file"),
'not_to_the_order':fields.boolean('Not to the Order'),
'barred':fields.boolean('Barred'),
'cost_key':fields.selection([
('payer','Expense of the Payer'),
('recipient','Expense of the Recipient'),
],'Cost Key'),
'concept':fields.selection([
('payroll','Payroll'),
('pension','Pension'),
('other','Other'),
],'Concept of the Order', help="Concept of the Order."),
'direct_pay_order':fields.boolean('Direct Pay Order', help="By default 'Not'."),
'csb19_extra_concepts': fields.boolean('Extra Concepts', help='Check it if you want to add the invoice lines to the extra concepts (Max. 15 lines)'),
}
_defaults = {
'tipo': lambda *a: 'none',
'sufijo': lambda *a: '000',
'inc_domicile': lambda *a: False,
'alt_domicile_format': lambda *a: False,
# Override default: We want to be safe so we require bank account by default
'require_bank_account': lambda *a: True,
'csb34_type': lambda *a: 'transfer',
'text1': lambda self,cr,uid,context: _('Dear Sir'),
'text2': lambda self,cr,uid,context: _('Payment ref.')+' ${communication}',
'text3': lambda self,cr,uid,context: _('Total:')+' ${amount}',
'send_type': lambda *a: 'mail',
'not_to_the_order': lambda *a: True,
'barred': lambda *a: True,
'cost_key': lambda *a: 'payer',
'concept': lambda *a: 'other',
'direct_pay_order': lambda *a: False,
}
payment_mode()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 5,191,690,641,885,414,000
| 53.422764
| 282
| 0.626681
| false
| 3.459432
| false
| false
| false
|
fieldOfView/pyQNodesEditor
|
qneport.py
|
1
|
4898
|
# Copyright (c) 2014, ALDO HOEBEN
# Copyright (c) 2012, STANISLAW ADASZEWSKI
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of STANISLAW ADASZEWSKI nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL STANISLAW ADASZEWSKI BE LIABLE FOR ANY
#DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from PySide.QtCore import (Qt)
from PySide.QtGui import (QBrush, QColor, QPainter, QPainterPath, QPen)
from PySide.QtGui import (QGraphicsItem, QGraphicsPathItem, QGraphicsTextItem)
class QNEPort(QGraphicsPathItem):
(NamePort, TypePort) = (1, 2)
(Type) = (QGraphicsItem.UserType +1)
def __init__(self, parent):
super(QNEPort, self).__init__(parent)
self.label = QGraphicsTextItem(self)
self.radius_ = 4
self.margin = 3
path = QPainterPath()
path.addEllipse(-self.radius_, -self.radius_, 2*self.radius_, 2*self.radius_);
self.setPath(path)
self.setPen(QPen(Qt.darkRed))
self.setBrush(Qt.red)
self.setFlag(QGraphicsItem.ItemSendsScenePositionChanges)
self.m_portFlags = 0
self.isOutput_ = False
self.m_block = None
self.m_connections = []
def __del__(self):
#print("Del QNEPort %s" % self.name)
pass
def delete(self):
for connection in self.m_connections:
connection.delete()
self.scene().removeItem(self)
self.m_block = None
self.m_connections = []
def setName(self, name):
self.name = name
self.label.setPlainText(name)
def setIsOutput(self, isOutput):
self.isOutput_ = isOutput
if self.isOutput_:
self.label.setPos(-self.radius_ - self.margin - self.label.boundingRect().width(),
-self.label.boundingRect().height()/2);
else:
self.label.setPos(self.radius_ + self.margin,
-self.label.boundingRect().height()/2);
def setNEBlock(self, block):
self.m_block = block
def setPortFlags(self, flags):
self.m_portFlags = flags
if self.m_portFlags & self.TypePort:
font = self.scene().font()
font.setItalic(True)
self.label.setFont(font)
self.setPath(QPainterPath())
elif self.m_portFlags & self.NamePort:
font = self.scene().font()
font.setBold(True)
self.label.setFont(font)
self.setPath(QPainterPath())
def setPtr(self, ptr):
self.m_ptr = ptr
def type(self):
return self.Type
def radius(self):
return self.radius_
def portName(self):
return self.name
def isOutput(self):
return self.isOutput_
def block(self):
return self.m_block
def portFlags(self):
return self.m_portFlags
def ptr(self):
return self.m_ptr;
def addConnection(self, connection):
self.m_connections.append(connection)
def removeConnection(self, connection):
try:
self.m_connections.remove(connection)
except: pass
def connections(self):
return self.m_connections
def isConnected(self, other):
for connection in self.m_connections:
if connection.port1() == other or connection.port2() == other:
return True
return False
def itemChange(self, change, value):
if change == QGraphicsItem.ItemScenePositionHasChanged:
for connection in self.m_connections:
connection.updatePosFromPorts()
connection.updatePath()
return value
|
bsd-3-clause
| 2,503,830,618,461,107,700
| 28.154762
| 94
| 0.652511
| false
| 4.024651
| false
| false
| false
|
norling/metlab
|
metlab/external.py
|
1
|
2186
|
#!/usr/bin/env python2.7
import os
import time
import logging
import threading
from subprocess import Popen, PIPE
class External(threading.Thread):
def __init__(self, name="", args = [], log_name = "external", pid = 0, log_level = logging.INFO, wd=None):
threading.Thread.__init__(self)
self.name = name
self.args = args
self.pid = pid
self.log = logging.getLogger( log_name )
self.log.setLevel( log_level )
self.status = "idle"
self.retval = None
self._stop = threading.Event()
self.started = False
self.wd = wd if wd else os.getcwd()
self.log.info("External: %s" % name)
self.log.info(" args: %s" % args)
def run(self):
try:
self.status = "running"
self.log.info("Starting %s" % self.name)
self.log.info("cmd: %s" % ([self.name] + self.args))
try:
self.started = True
if self.args[-1].startswith(">"):
self.process = Popen([self.name] + self.args[:-1], stdout=open(self.args[-1][1:], "w"), stderr=PIPE, cwd=self.wd)
else:
self.process = Popen([self.name] + self.args, stdout=PIPE, cwd=self.wd)
self.retval = self.process.communicate()[0]
self.retval = self.retval.strip() if self.retval else self.retval
except Exception as e:
self.log.error(e)
if self._stop.isSet():
self.log.warning("%s aborted" % self.name)
self.process.kill()
self.status = "aborted"
elif self.process.returncode != 0:
self.log.error("Failed Running %s, retval %s" % (self.name, self.process.returncode))
self.status = "failed"
else:
self.log.info("Finished Running %s" % self.name)
self.status = "completed"
except Exception as e:
self.log.warning(e)
self.status = "failed"
return self.retval
def stop(self):
self.process.kill()
self._stop.set()
|
gpl-3.0
| -6,979,680,684,814,166,000
| 35.433333
| 133
| 0.521043
| false
| 3.931655
| false
| false
| false
|
kelceydamage/raspi
|
raspi/sensors/grove/i2c/color.py
|
1
|
15702
|
import smbus2 as smbus
import time
import math
import RPi.GPIO
"""
## License
The MIT License (MIT)
Copyright (c) 2016 Frederic Aguiard
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class GroveI2CColorSensor:
""" Provides access to the Grove I2C color sensor from Seeedstudio.
This library supports 2 of the operating modes of the sensor:
- Continuous, back-to-back color measures ('integrations') of pre-defined durations
- Single measure of arbitrary duration
The other sensor operating modes (using an external SYNC pin, interrupts...) which are not supported by this
library.
Usage:
1. Use either use_continuous_integration() or use_manual_integration() to select operating mode
2. If necessary, adjust gain and prescaler to obtain a color measure of sufficient precision without saturating the
sensor.
3. Start integration using start_integration()
4. In manual integration mode: use stop_integration() after the desired duration
5. Use one of the read functions to get the measured color value
Reference documentation:
- Seeedstudio wiki: http://www.seeedstudio.com/wiki/index.php?title=Twig_-_I2C_Color_Sensor_v0.9b
- TCS3414-A Datasheet: http://www.seeedstudio.com/wiki/File:TCS3404_TCS3414-A.pdf
"""
# Common colors coordinates (CIE xy and RGB)
COLOR_TABLE = {"Red": {"x": 0.64, "y": 0.33, "r": 255, "g": 0, "b": 0},
"Green": {"x": 0.3, "y": 0.6, "r": 0, "g": 255, "b": 0},
"Blue": {"x": 0.15, "y": 0.06, "r": 0, "g": 0, "b": 255},
"Yellow": {"x": 0.419, "y": 0.505, "r": 255, "g": 255, "b": 0},
"Magenta": {"x": 0.321, "y": 0.154, "r": 255, "g": 0, "b": 255},
"Cyan": {"x": 0.225, "y": 0.329, "r": 0, "g": 255, "b": 255},
"Deep pink": {"x": 0.466, "y": 0.238, "r": 255, "g": 20, "b": 147},
"Orange": {"x": 0.5, "y": 0.441, "r": 255, "g": 165, "b": 0},
"Saddle brown": {"x": 0.526, "y": 0.399, "r": 139, "g": 69, "b": 19},
"Grey / White": {"x": 0.313, "y": 0.329, "r": 255, "g": 255, "b": 255},
"Black": {"x": 0, "y": 0, "r": 0, "g": 0, "b": 0}}
# Sensor address on SMBus / I2C bus
_I2C_SENSOR_ADDRESS = 0X39
# Sensor registers addresses
_REGISTER_COMMAND = 0X80
_REGISTER_CONTROL = _REGISTER_COMMAND | 0X00
_REGISTER_TIMING = _REGISTER_COMMAND | 0X01
_REGISTER_INTERRUPT_CONTROL = _REGISTER_COMMAND | 0X02
_REGISTER_INT_SOURCE = _REGISTER_COMMAND | 0X03
_REGISTER_ID = _REGISTER_COMMAND | 0X04
_REGISTER_GAIN = _REGISTER_COMMAND | 0X07
_REGISTER_INTERRUPT_LOW_THRESH_LOW_BYTE = _REGISTER_COMMAND | 0X08
_REGISTER_INTERRUPT_LOW_THRESH_HIGH_BYTE = _REGISTER_COMMAND | 0X09
_REGISTER_INTERRUPT_HIGH_THRESH_LOW_BYTE = _REGISTER_COMMAND | 0X0A
_REGISTER_INTERRUPT_HIGH_THRESH_HIGH_BYTE = _REGISTER_COMMAND | 0X0B
_REGISTER_DATA_GREEN_LOW = _REGISTER_COMMAND | 0X10
_REGISTER_DATA_GREEN_HIGH = _REGISTER_COMMAND | 0X11
_REGISTER_DATA_RED_LOW = _REGISTER_COMMAND | 0X012
_REGISTER_DATA_RED_HIGH = _REGISTER_COMMAND | 0X13
_REGISTER_DATA_BLUE_LOW = _REGISTER_COMMAND | 0X14
_REGISTER_DATA_BLUE_HIGH = _REGISTER_COMMAND | 0X15
_REGISTER_DATA_CLEAR_LOW = _REGISTER_COMMAND | 0X16
_REGISTER_DATA_CLEAR_HIGH = _REGISTER_COMMAND | 0X17
_REGISTER_INTERRUPT_CLEAR = _REGISTER_COMMAND | 0X60
# Values for control register
_CONTROL_ADC_IS_VALID = 0X10
_CONTROL_ADC_ENABLE = 0X02
_CONTROL_ADC_DISABLE = 0X00
_CONTROL_ADC_POWER_ON = 0X01
_CONTROL_ADC_POWER_OFF = 0X00
# Values for timing register
_TIMING_SYNC_EDGE = 0X40
_TIMING_INTEGRATION_MODE_CONTINUOUS = 0X00
_TIMING_INTEGRATION_MODE_MANUAL = 0X10
_TIMING_INTEGRATION_MODE_SYNC_SINGLE_PULSE = 0X20
_TIMING_INTEGRATION_MODE_SYNC_MULTIPLE_PULSE = 0X30
_TIMING_PARAM_INTEGRATION_TIME_12MS = 0X00
_TIMING_PARAM_INTEGRATION_TIME_100MS = 0X01
_TIMING_PARAM_INTEGRATION_TIME_400MS = 0X02
_TIMING_PARAM_SYNC_PULSE_COUNT_1 = 0X00
_TIMING_PARAM_SYNC_PULSE_COUNT_2 = 0X01
_TIMING_PARAM_SYNC_PULSE_COUNT_4 = 0X02
_TIMING_PARAM_SYNC_PULSE_COUNT_8 = 0X03
_TIMING_PARAM_SYNC_PULSE_COUNT_16 = 0X04
_TIMING_PARAM_SYNC_PULSE_COUNT_32 = 0X05
_TIMING_PARAM_SYNC_PULSE_COUNT_64 = 0X06
_TIMING_PARAM_SYNC_PULSE_COUNT_128 = 0X07
_TIMING_PARAM_SYNC_PULSE_COUNT_256 = 0X08
# Values for interrupt control register
_INTERRUPT_CONTROL_MODE_DISABLE = 0X00
_INTERRUPT_CONTROL_MODE_LEVEL = 0X10
_INTERRUPT_CONTROL_MODE_SMB_ALERT = 0x20
_INTERRUPT_CONTROL_PERSIST_EVERY_CYCLE = 0X00
_INTERRUPT_CONTROL_PERSIST_OUTSIDE_RANGE_ONCE = 0X01
_INTERRUPT_CONTROL_PERSIST_OUTSIDE_RANGE_100MS = 0X02
_INTERRUPT_CONTROL_PERSIST_OUTSIDE_RANGE_1000MS = 0X03
# Values for interrupt source register
_INTERRUPT_SOURCE_GREEN = 0X00
_INTERRUPT_SOURCE_RED = 0X01
_INTERRUPT_SOURCE_BLUE = 0X10
_INTERRUPT_SOURCE_CLEAR = 0X03
# Values for gain register
_GAIN_1X = 0X00
_GAIN_4X = 0X10
_GAIN_16X = 0X20
_GAIN_64X = 0X30
_PRESCALER_1 = 0X00
_PRESCALER_2 = 0X01
_PRESCALER_4 = 0X02
_PRESCALER_8 = 0X03
_PRESCALER_16 = 0X04
_PRESCALER_32 = 0X05
_PRESCALER_64 = 0X06
# Wait time introduced after each register write (except integration start)
_SLEEP_VALUE = 0.05
def __init__(self, bus_number=None):
"""Initialize i2c communication with the sensor and sets default parameters.
Default parameters: continuous integration (not started) with 12ms cycles, gain 1x, pre-scale 1.
:param bus_number: the i2c bus number (usually 0 or 1, depending on the hardware). Use the i2cdetect command
line tool to identify the right bus. If set to None, will use the Raspberry Pi revision number to guess which
bus to use.
"""
if bus_number is None:
# Use Rasbperry Pi revision to choose bus number
board_revision = RPi.GPIO.RPI_REVISION
if board_revision == 2 or board_revision == 3:
bus_number = 1
else:
bus_number = 0
self.bus = smbus.SMBus(bus_number)
self.use_continuous_integration()
self.set_gain_and_prescaler(1, 1)
def use_continuous_integration(self, integration_time_in_ms=12):
"""Configure the sensor to perform continuous, back-to-back integrations of pre-defined duration.
Continuous integration will begin after calling start_integration() and will stop after calling
stop_integration().
:param integration_time_in_ms: supported values in ms are 12, 100 and 400.
"""
assert integration_time_in_ms == 12 \
or integration_time_in_ms == 100 \
or integration_time_in_ms == 400, \
"Continuous integration supports only 12ms, 100ms or 400ms integration durations"
# Convert integration time value into the corresponding byte values expected by the sensor.
if integration_time_in_ms == 12:
integration_time_reg = self._TIMING_PARAM_INTEGRATION_TIME_12MS
elif integration_time_in_ms == 100:
integration_time_reg = self._TIMING_PARAM_INTEGRATION_TIME_100MS
elif integration_time_in_ms == 400:
integration_time_reg = self._TIMING_PARAM_INTEGRATION_TIME_400MS
else:
integration_time_reg = self._TIMING_PARAM_INTEGRATION_TIME_12MS
self.bus.write_i2c_block_data(self._I2C_SENSOR_ADDRESS,
self._REGISTER_TIMING,
[self._TIMING_INTEGRATION_MODE_CONTINUOUS | integration_time_reg])
time.sleep(self._SLEEP_VALUE)
def use_manual_integration(self):
"""Configure the sensor to perform a single integration manually started and stopped.
Manual integration will begin after calling start_integration(), and will stop after calling stop_integration().
"""
self.bus.write_i2c_block_data(self._I2C_SENSOR_ADDRESS,
self._REGISTER_TIMING,
[self._TIMING_INTEGRATION_MODE_MANUAL])
time.sleep(self._SLEEP_VALUE)
def set_gain_and_prescaler(self, gain_multiplier=1, prescaler_divider=1):
"""Configure the sensor gain and prescaler.
:param gain_multiplier: Gain sets the sensibility of the sensor, effectively extending the dynamic range of the
sensor but eventually inducing saturation. Supported values are 1, 4, 16 and 64.
:param prescaler_divider: Prescaler scales the values by dividing them before storage in the output registers,
hence reducing saturation at the cost of reducing measurement precision. Supported prescaler dividers are 1, 2,
4, 8, 16, 32 and 64.
"""
assert gain_multiplier == 1 or gain_multiplier == 4 or gain_multiplier == 16 or gain_multiplier == 64, \
"Supported gain multipliers: 1, 4, 16 and 64"
assert prescaler_divider == 1 \
or prescaler_divider == 2 \
or prescaler_divider == 4 \
or prescaler_divider == 8 \
or prescaler_divider == 16 \
or prescaler_divider == 32 \
or prescaler_divider == 64, \
"Supported prescaler dividers: 1, 2, 4, 8, 16, 32 and 64"
# Convert gain multiplier into the corresponding byte values expected by the sensor.
if gain_multiplier == 1:
gain_reg = self._GAIN_1X
elif gain_multiplier == 4:
gain_reg = self._GAIN_4X
elif gain_multiplier == 16:
gain_reg = self._GAIN_16X
elif gain_multiplier == 64:
gain_reg = self._GAIN_64X
else:
gain_reg = self._GAIN_1X
# Convert prescaler divider into the corresponding byte values expected by the sensor.
if prescaler_divider == 1:
prescaler_reg = self._PRESCALER_1
elif prescaler_divider == 2:
prescaler_reg = self._PRESCALER_2
elif prescaler_divider == 4:
prescaler_reg = self._PRESCALER_4
elif prescaler_divider == 8:
prescaler_reg = self._PRESCALER_8
elif prescaler_divider == 16:
prescaler_reg = self._PRESCALER_16
elif prescaler_divider == 32:
prescaler_reg = self._PRESCALER_32
elif prescaler_divider == 64:
prescaler_reg = self._PRESCALER_64
else:
prescaler_reg = self._PRESCALER_1
self.bus.write_i2c_block_data(self._I2C_SENSOR_ADDRESS, self._REGISTER_GAIN, [gain_reg | prescaler_reg])
time.sleep(self._SLEEP_VALUE)
def start_integration(self):
"""Start the integration.
"""
self.bus.write_i2c_block_data(
self._I2C_SENSOR_ADDRESS,
self._REGISTER_CONTROL,
[self._CONTROL_ADC_ENABLE | self._CONTROL_ADC_POWER_ON])
def stop_integration(self):
"""Stop the integration.
"""
self.bus.write_i2c_block_data(
self._I2C_SENSOR_ADDRESS,
self._REGISTER_CONTROL,
[self._CONTROL_ADC_DISABLE | self._CONTROL_ADC_POWER_ON])
def is_integration_complete(self):
""" Checks if an integration has been successfully completed and color data is ready to be read.
:return: True if integration is completed.
"""
integration_status = self.bus.read_i2c_block_data(self._I2C_SENSOR_ADDRESS, self._REGISTER_CONTROL, 1)
return integration_status[0] & self._CONTROL_ADC_IS_VALID == self._CONTROL_ADC_IS_VALID
def read_rgbc_word(self):
""" Reads the measured color, split over 4 channels: red, green, blue, clear.
Each value is provided as a word.
:return: a (r,g,b,c) tuple of the 4 word values measured by the red/green/blue/clear channels
"""
# Integration result registers are 8 consecutive bytes starting by lower value of green channel.
# Reading them in a single pass.
raw_color = self.bus.read_i2c_block_data(self._I2C_SENSOR_ADDRESS, self._REGISTER_DATA_GREEN_LOW, 8)
return (raw_color[2] + raw_color[3] * 256,
raw_color[0] + raw_color[1] * 256,
raw_color[4] + raw_color[5] * 256,
raw_color[6] + raw_color[7] * 256)
def read_rgbc(self):
""" Reads the measured color, split over 4 channels: red, green, blue, clear (unfiltered).
Each value is provided as a byte.
:return: a (r,g,b,c) tuple of the 4 byte values measured by the red/green/blue/clear channels
"""
# Integration result registers are 8 consecutive bytes starting by lower value of green channel.
# Reading them in a single pass.
raw_color = self.bus.read_i2c_block_data(self._I2C_SENSOR_ADDRESS, self._REGISTER_DATA_GREEN_LOW, 8)
# Discard lower byte of each channel
return (raw_color[3],
raw_color[1],
raw_color[5],
raw_color[7])
def read_xy(self):
""" Reads the measured color and converts it as CIE x,y coordinates.
See http://www.techmind.org/colour/ and https://en.wikipedia.org/wiki/CIE_1931_color_space for more information.
:return: a (x, y) tuple
"""
rgbc = self.read_rgbc_word()
x_bar = -0.14282 * rgbc[0] + 1.54924 * rgbc[1] + -0.95641 * rgbc[2]
y_bar = -0.32466 * rgbc[0] + 1.57837 * rgbc[1] + -0.73191 * rgbc[2]
z_bar = -0.68202 * rgbc[0] + 0.77073 * rgbc[1] + 0.563320 * rgbc[2]
x = x_bar / (x_bar + y_bar + z_bar)
y = y_bar / (x_bar + y_bar + z_bar)
return [x, y]
def read_color_name(self):
""" Reads the measured color and maps it to the nearest color present in COLOR_TABLE.
Warning: current implementation does not work well with white / grey / black or dark colors.
:return: The color name used as a key in COLOR_TABLE.
"""
xy = self.read_xy()
closest_color = None
closest_distance = 1
for current_color in self.COLOR_TABLE:
current_coordinates = self.COLOR_TABLE[current_color]
current_dist = math.sqrt(
(current_coordinates["y"] - xy[1])**2 + (current_coordinates["x"] - xy[0])**2)
if current_dist < closest_distance:
closest_color = current_color
closest_distance = current_dist
return closest_color
|
apache-2.0
| 6,583,448,653,865,636,000
| 44.648256
| 120
| 0.626735
| false
| 3.449473
| false
| false
| false
|
gbolet/BlenderCacheManager
|
clearCache.py
|
1
|
4262
|
bl_info = {
"name": "BVH Cache Manager",
"category": "Render",
"description":"Easily delete cached BVH data!",
"location":"Properties Editor > Render > BVH Cache Manager",
"author":"Gregory Bolet",
"version":"001",
"warning":"Alpha Version"
}
import bpy
import os
import shutil
cacheDirectory = ""
class InterfacePanel(bpy.types.Panel): #[ref: Panel(bpy_struct)]
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context= "render"
bl_label = "BVH Cache Manager"
#this will create some UI elements
print("***Staring Interface Panel...***")
bpy.types.Scene.manualDirectory = bpy.props.StringProperty(name = "Cache Folder", default = "You can leave me blank!",
description = "Manually select cache folder directory",
subtype = 'DIR_PATH'
)
print("***Interface Ready!***")
#draw function gets called A LOT,
#object edits cannot be performed here, only UI updates
def draw(self, context):
layout = self.layout
col = layout.column(align=True) #adds a column to the UI
col.label("Manual Entry:")
col.prop(context.scene, 'manualDirectory', expand=False)
print("Manual Directory IS:"+ context.scene.manualDirectory)
col.label("")
col.operator("blender.execute",text="Clear Cache")
return None
class ClearCacheButton(bpy.types.Operator):
bl_idname = "blender.execute"
bl_label = "Clear BVH Cache"
bl_options = {"UNDO"}
def __init__(self):
global cacheDirectory
from sys import platform as _platform
manualDir = bpy.context.scene.manualDirectory
if (os.path.isdir(manualDir) == False and manualDir != ""):
print("Invalid manual entry directory. Using default cache folder...")
elif (os.path.isdir(manualDir) == False and manualDir == ""):
print("Looking for default cache folder...")
if(manualDir != "" and os.path.isdir(manualDir)):
cacheDirectory = manualDir[:-1] #removes extra slash
elif _platform == "linux" or _platform == "linux2":
#This will always work on Linux
#$HOME/.config/blender/2.76/
cacheDirectory += "$HOME/.config/blender/"
cacheDirectory += '{:.4}'.format(bpy.app.version_string)
cacheDirectory += "/cache"
elif _platform == "darwin":
#This will always work on Mac OSX
#/Users/$USER/Library/Application Support/Blender/2.76/
cacheDirectory += "~/Library/Application Support/Blender/"
cacheDirectory += '{:.4}'.format(bpy.app.version_string)
cacheDirectory += "/cache"
elif _platform == "win32":
#this always works on Windows machines
#C:\Documents and Settings\$USERNAME\AppData\Roaming\Blender Foundation\Blender\2.76\
cacheDirectory += os.getenv('APPDATA')
cacheDirectory += "\Blender Foundation\Blender\\"
cacheDirectory += '{:.4}'.format(bpy.app.version_string)
cacheDirectory += "\cache"
print("User Cache Directory: "+cacheDirectory)
return None;
def clearCache(self):
global cacheDirectory
if(os.path.isdir(cacheDirectory)):
shutil.rmtree(cacheDirectory)
if(os.path.isdir(cacheDirectory) == False):
os.makedirs(cacheDirectory)
print("\nNo cache directory exists, creating one...")
print("New cache folder directory: "+cacheDirectory+"\n")
return None;
def execute(self, context):
global cacheDirectory
print("\nStarting process...")
self.clearCache()
cacheDirectory = ""
print("FINISHED! \n\n\n")
return {"FINISHED"}
#end invoke
def register():
bpy.utils.register_class(InterfacePanel)
bpy.utils.register_class(ClearCacheButton)
def unregister():
bpy.utils.unregister_class(ClearCacheButton)
bpy.utils.unregister_class(InterfacePanel)
if __name__ == "__main__":
register()
|
mit
| -270,100,889,357,932,200
| 29.442857
| 122
| 0.591272
| false
| 4.178431
| false
| false
| false
|
simudream/GeoIP2-python
|
setup.py
|
1
|
1626
|
#!/usr/bin/env python
import codecs
import os
import sys
import geoip2
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
packages = ['geoip2']
requirements = [i.strip() for i in open("requirements.txt").readlines()]
setup(
name='geoip2',
version=geoip2.__version__,
description='MaxMind GeoIP2 API',
long_description=codecs.open('README.rst', 'r', 'utf-8').read(),
author='Gregory Oschwald',
author_email='goschwald@maxmind.com',
url='http://www.maxmind.com/',
packages=['geoip2'],
package_data={'': ['LICENSE']},
package_dir={'geoip2': 'geoip2'},
include_package_data=True,
install_requires=requirements,
extras_require={
':python_version=="2.6" or python_version=="2.7"': ['ipaddr']},
tests_require=['requests_mock'],
test_suite="tests",
license=geoip2.__license__,
classifiers=(
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python',
'Topic :: Internet :: Proxy Servers',
'Topic :: Internet',
),
)
|
apache-2.0
| -3,090,010,712,760,477,700
| 28.563636
| 72
| 0.619311
| false
| 3.880668
| false
| false
| false
|
itpubs/reflecting
|
apps/users/models.py
|
1
|
1816
|
from django.contrib.auth.models import AbstractUser
from django.db import models
from utils.default_model import random_nick_name
from blog.models import Article
__all__ = [
'UserProfile',
'EmailVerifyCode',
'Message',
'Comment',
'Reply'
]
# Create your models here.
class UserProfile(AbstractUser):
gender_choices = (
('male', '男'),
('female', '女'),
('unknown', '未知')
)
nick_name = models.CharField(max_length=100, default=random_nick_name)
gender = models.CharField(choices=gender_choices, default='unknown', max_length=20)
image = models.ImageField(upload_to='avatar/%Y/%m', max_length=100, default='avatar/avatar.png')
def get_message_count(self):
return Message.objects.filter(status=False).count()
def get_comment_count(self):
return Comment.objects.filter(status=False).count()
class EmailVerifyCode(models.Model):
code = models.CharField(max_length=20, unique=True)
email = models.EmailField(max_length=50)
send_time = models.DateTimeField(auto_now_add=True)
class Message(models.Model):
add_time = models.DateTimeField(auto_now_add=True)
body = models.CharField(max_length=200)
status = models.BooleanField(default=False)
class Comment(models.Model):
user = models.ForeignKey(UserProfile)
article = models.ForeignKey(Article, related_name='article_comment')
body = models.TextField()
add_time = models.DateTimeField(auto_now_add=True)
status = models.BooleanField(default=False)
def get_reply(self):
return Reply.objects.filter(comment=self.pk)
class Reply(models.Model):
user = models.ForeignKey(UserProfile)
comment = models.ForeignKey(Comment)
body = models.TextField()
add_time = models.DateTimeField(auto_now_add=True)
|
mit
| -6,211,186,313,144,723,000
| 28.639344
| 100
| 0.69635
| false
| 3.623246
| false
| false
| false
|
dhocker/athomepowerlineserver
|
commands/delete_device_program.py
|
1
|
1356
|
#
# Delete a device program
# Copyright © 2020 Dave Hocker
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# See the LICENSE file for more details.
#
from commands.ServerCommand import ServerCommand
from database.program_assignments import ProgramAssignments
class DeleteDeviceProgram(ServerCommand):
"""
Command handler for deleting a device timer program
"""
def Execute(self, request):
args = request["args"]
device_id = int(args["device-id"])
program_id = int(args["program-id"])
# Generate a successful response
r = self.CreateResponse(request["request"])
# Remove program from database
pa = ProgramAssignments()
result = pa.delete(device_id, program_id)
if result:
r['result-code'] = DeleteDeviceProgram.SUCCESS
r['device-id'] = args["device-id"]
r['program-id'] = args["program-id"]
r['message'] = DeleteDeviceProgram.MSG_SUCCESS
else:
r['result-code'] = pa.last_error_code
r['device-id'] = args["device-id"]
r['program-id'] = args["program-id"]
r['message'] = pa.last_error
return r
|
gpl-3.0
| -1,568,335,787,459,006,700
| 30.511628
| 70
| 0.631734
| false
| 4.131098
| false
| false
| false
|
vsoch/docfish
|
scripts/import/upload_storage.py
|
1
|
5156
|
#!/usr/bin/env python
# This is an example script to upload data (images, text, metadata) to
# google cloud storage and datastore (for general data)
# Preparation of Pubmed Open Access Data
ftp_base = "ftp://ftp.ncbi.nlm.nih.gov/pub/pmc"
file_list = "ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_file_list.txt"
from glob import glob
import requests
import signal
import xmltodict
import imghdr
import tempfile
import shutil
import tarfile
import numpy
import urllib
import pandas
import os
import re
import pwd
######################################################################
# Pubmed Functions
######################################################################
def get_userhome():
'''get userhome gets the user's home based on the uid,
and NOT the environment variable which is not consistent'''
return pwd.getpwuid(os.getuid())[5]
def get_pubmed(download_base=None):
'''retrieve pubmed database, either from locally saved file,
or if not yet generated, obtain from FTP server
'''
if download_base is None:
download_base = get_userhome()
output_file = '%s/pmc.tsv' %(download_base)
if not os.path.exists(output_file):
download_folder = tempfile.mkdtemp()
pmc_file = '%s/pmc.txt' %download_folder
urllib.request.urlretrieve(file_list, pmc_file)
pmc = pandas.read_csv(pmc_file,sep="\t",skiprows=1,header=None)
pmc.columns = ["TARGZ_FILE","JOURNAL","PMCID","PMID","LICENSE"]
pmc.to_csv(output_file,sep="\t",index=None)
return pmc
return pandas.read_csv(output_file,sep="\t")
def read_file(file_path):
with open (file_path, "r") as myfile:
return myfile.read().replace('\n', '')
def read_xml(xml_file):
with open(xml_file) as fd:
return xmltodict.parse(fd.read())
def format_name(name):
'''format name will ensure that all collection names have
periods removed, lowercase, and spaces replaced with -
I'm not sure if this is best, but it's safer than allowing anything
'''
return name.replace('.','').lower().replace(" ",'-')
def get_metadata(row):
'''get_uid will return the metadata for a row, with the uid corresponding
first to the PMID, and the PMC id if that is not defined
'''
pmid = row[1].PMID
if not isinstance(pmid,str):
if numpy.isnan(pmid):
pmid = row[1].PMCID
download_url = "%s/%s" %(ftp_base,row[1].TARGZ_FILE)
metadata = {"pmcid":row[1].PMCID,
"type":"article",
"uid":pmid,
"publication_date": publication_date,
"download_url":download_url,
"license":row[1].LICENSE}
if not isinstance(row[1].PMID,str):
if not numpy.isnan(row[1].PMID):
metadata['pmid'] = row[1].PMID
return metadata
def create_article(metadata):
tmpdir = tempfile.mkdtemp()
pmc_file = '%s/article.tar.gz' %(tmpdir)
print('Downloading: %s' %(metadata['uid']))
urllib.request.urlretrieve(metadata['download_url'], pmc_file)
tar = tarfile.open(pmc_file, "r:gz")
tar.extractall(tmpdir)
files = glob('%s/%s/*' %(tmpdir,metadata['pmcid']))
images = [x for x in files if imghdr.what(x) is not None]
pdf_files = [x for x in files if x.lower().endswith('pdf')]
xml_file = [x for x in files if x.lower().endswith('xml')]
images = images + pdf_files
general_client.upload_dataset(images=images,
texts=xml_file,
collection=collection,
uid=metadata['uid'],
metadata=metadata)
shutil.rmtree(tmpdir)
######################################################################
# Signals
######################################################################
def signal_handler(signum, frame):
raise Exception("Timed out!")
# Only allow each paper a 30 seconds to download
signal.signal(signal.SIGALRM, signal_handler)
######################################################################
# Preparation of Pubmed Open Access Data
######################################################################
import os
import tempfile
import tarfile
import imghdr
import urllib
from glob import glob
# Obtain 1.5 million pmc articles
pmc = get_pubmed()
# Start google storage client for pmc-stanford
from som.api.google.storage.general import Client
general_client = Client(bucket_name='pmc-stanford')
timeouts = []
current = 625
for row in pmc.iterrows():
if row[0] >= current:
try:
signal.alarm(30)
journal_name = row[1].JOURNAL
date_match = re.search("\d{4}",journal_name)
publication_date = journal_name[date_match.start():]
journal_name = format_name(journal_name[:date_match.start()].strip())
collection = general_client.get_collection(uid=journal_name)
metadata = get_metadata(row)
article = create_article(metadata)
except:
timeouts.append(row[0])
general_client = Client(bucket_name='pmc-stanford')
current+=1
|
mit
| -8,730,548,330,262,620,000
| 31.427673
| 81
| 0.579519
| false
| 3.780059
| false
| false
| false
|
aleroddepaz/python-samples
|
justanotherchat/chat.py
|
1
|
1885
|
import os
import json
import random
import logging
import urlparse
import webapp2
from google.appengine.api import channel
from google.appengine.ext import db
from google.appengine.ext.webapp import template
class Client(db.Model):
username = db.StringProperty(required=True)
token = db.StringProperty(required=False)
class MainPage(webapp2.RequestHandler):
def get(self):
default_username = 'john.doe' + str(random.randint(0, 1000))
username = self.request.get('username', default_username)
client = Client(username = username)
db.put(client)
client.token = channel.create_channel(str(client.key().id()))
db.put(client)
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(template.render(path, {
'token': client.token,
'username': username
}))
def post(self):
qs = urlparse.parse_qs(self.request.body)
data = json.dumps({
'message': qs['message'][0],
'username': qs['username'][0]
})
for client in Client.all():
client_id = str(client.key().id())
logging.info('Sending data to {}...'.format(client_id))
channel.send_message(client_id, data)
class ConnectedHandler(webapp2.RequestHandler):
def post(self):
client_id = self.request.get('from')
logging.info("{} joined the party".format(client_id))
class DisconnectedHandler(webapp2.RequestHandler):
def post(self):
client_id = self.request.get('from')
logging.info("Goodbye {}!".format(client_id))
db.delete(Client.get_by_id(int(client_id)))
application = webapp2.WSGIApplication([
('/', MainPage),
('/_ah/channel/connected/', ConnectedHandler),
('/_ah/channel/disconnected/', DisconnectedHandler)
], debug=True)
|
mit
| -4,153,536,256,127,778,000
| 28.920635
| 69
| 0.632361
| false
| 3.846939
| false
| false
| false
|
beeftornado/plex-custom-media-scanner
|
Scanners/Movies/Tivo Movie Scanner.py
|
1
|
5478
|
#!/usr/bin/env python
# Copyright (C) 2013 Casey Duquette
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import re, os, os.path
import sys
# I needed some plex libraries, you may need to adjust your plex install location accordingly
sys.path.append("/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Scanners/Movies")
sys.path.append("/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-ins/Scanners.bundle/Contents/Resources/Common/")
import Media, VideoFiles, Stack, Utils
from mp4file import mp4file, atomsearch
__author__ = "Casey Duquette"
__copyright__ = "Copyright 2013"
__credits__ = ["Casey Duquette"]
__license__ = "GPLv2"
__version__ = "1.0"
__maintainer__ = "Casey Duquette"
__email__ = ""
episode_regexps = [
'(Ep[^0-9a-z](?P<season>[0-9]{1,2})(?P<ep>[0-9]{2})[_]?)?(?P<title>[\w\s,.\-:;\'\"]+?)\s\(Rec.*$', # Ep#112_Bad Wolf (Rec 08_19_2012).mp4, Blink (Rec 09_13_2012).mp4
'(?P<show>.*?)[sS](?P<season>[0-9]+)[\._ ]*[eE](?P<ep>[0-9]+)([- ]?[Ee+](?P<secondEp>[0-9]+))?', # S03E04-E05
'(?P<show>.*?)[sS](?P<season>[0-9]{2})[\._\- ]+(?P<ep>[0-9]+)', # S03-03
'(?P<show>.*?)([^0-9]|^)(?P<season>[0-9]{1,2})[Xx](?P<ep>[0-9]+)(-[0-9]+[Xx](?P<secondEp>[0-9]+))?', # 3x03
'(.*?)[^0-9a-z](?P<season>[0-9]{1,2})(?P<ep>[0-9]{2})([\.\-][0-9]+(?P<secondEp>[0-9]{2})([ \-_\.]|$)[\.\-]?)?([^0-9a-z%]|$)' # .602.
]
date_regexps = [
'(?P<year>[0-9]{4})[^0-9a-zA-Z]+(?P<month>[0-9]{2})[^0-9a-zA-Z]+(?P<day>[0-9]{2})([^0-9]|$)', # 2009-02-10
'(?P<month>[0-9]{2})[^0-9a-zA-Z]+(?P<day>[0-9]{2})[^0-9a-zA-Z(]+(?P<year>[0-9]{4})([^0-9a-zA-Z]|$)', # 02-10-2009
]
standalone_episode_regexs = [
'(.*?)( \(([0-9]+)\))? - ([0-9]+)+x([0-9]+)(-[0-9]+[Xx]([0-9]+))?( - (.*))?', # Newzbin style, no _UNPACK_
'(.*?)( \(([0-9]+)\))?[Ss]([0-9]+)+[Ee]([0-9]+)(-[0-9]+[Xx]([0-9]+))?( - (.*))?' # standard s00e00
]
season_regex = '.*?(?P<season>[0-9]+)$' # folder for a season
just_episode_regexs = [
'(?P<ep>[0-9]{1,3})[\. -_]of[\. -_]+[0-9]{1,3}', # 01 of 08
'^(?P<ep>[0-9]{1,3})[^0-9]', # 01 - Foo
'e[a-z]*[ \.\-_]*(?P<ep>[0-9]{2,3})([^0-9c-uw-z%]|$)', # Blah Blah ep234
'.*?[ \.\-_](?P<ep>[0-9]{2,3})[^0-9c-uw-z%]+', # Flah - 04 - Blah
'.*?[ \.\-_](?P<ep>[0-9]{2,3})$', # Flah - 04
'.*?[^0-9x](?P<ep>[0-9]{2,3})$' # Flah707
]
ends_with_number = '.*([0-9]{1,2})$'
ends_with_episode = ['[ ]*[0-9]{1,2}x[0-9]{1,3}$', '[ ]*S[0-9]+E[0-9]+$']
# Look for episodes.
def Scan(path, files, mediaList, subdirs):
# Scan for video files.
VideoFiles.Scan(path, files, mediaList, subdirs)
# Take top two as show/season, but require at least the top one.
paths = Utils.SplitPath(path)
if len(paths) > 0 and len(paths[0]) > 0:
done = False
if done == False:
(title, year) = VideoFiles.CleanName(paths[0])
for i in files:
done = False
is_movie = False
file = os.path.basename(i)
(file, ext) = os.path.splitext(file)
# See if there's a pytivo metadata file to peek at
meta = dict()
metadata_filename = '{0}.txt'.format(i.replace('_LQ', ''))
if os.path.isfile(metadata_filename):
with open(metadata_filename, 'r') as f:
for line in f:
line = line.strip()
if line and len(line):
line_a = line.split(' : ')
if len(line_a) > 1:
key, value = line.split(' : ')
meta[key] = value
#print "pytivo metadata, ", meta
# Skip tv shows based on pytivo metadata file and backup to filename if not present
if 'isEpisode' in meta:
if meta['isEpisode'] == 'false':
is_movie = True
elif file.strip().startswith('(Rec'):
is_movie = True
# If we still think it is not a movie then skip it
if is_movie == False:
print "File {0} is determined to be a tv show by pytivo metadata file, skipping".format(file)
continue
if 'title' in meta:
title = meta['title']
if 'movieYear' in meta:
year = meta['movieYear']
# Create the movie
movie = Media.Movie(title, year)
movie.source = VideoFiles.RetrieveSource(i)
movie.parts.append(i)
mediaList.append(movie)
# Stack the results.
Stack.Scan(path, files, mediaList, subdirs)
def find_data(atom, name):
child = atomsearch.find_path(atom, name)
data_atom = child.find('data')
if data_atom and 'data' in data_atom.attrs:
return data_atom.attrs['data']
if __name__ == '__main__':
print "Hello, world!"
path = sys.argv[1]
files = [os.path.join(path, file) for file in os.listdir(path)]
media = []
Scan(path[1:], files, media, [])
print "Media:", media
|
gpl-2.0
| -1,551,859,463,111,051,500
| 37.577465
| 180
| 0.529755
| false
| 2.833937
| false
| false
| false
|
gorbyo/admin_couchdb
|
admin_couchdb/couch_set_repl.py
|
1
|
2231
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# couch_set_repl.py
#
# Copyright 2013 Oleh Horbachov <gorbyo@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
"""
The script help you to enable push replication between 2 servers
Usage:
python couch_set_repl.py --source/-s source_host:port --target/-t target_host:port
Example:
python couch_set_repl.py -s couch-src.example.com:5984 -t couch-trg.example.com:5984
"""
import couchquery
import couchdb
import argparse
from argparse import RawTextHelpFormatter
def arguments():
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,
description="This script create push replication for all databases")
parser.add_argument('-t', '--target', help='Target COUCHDB Server')
parser.add_argument('-s', '--source', help='Source COUCHDB Server')
return parser
def main(dbsource, dbtarget):
couchdbserver = couchdb.Server('http://'+dbsource+'/')
dbrep = couchquery.Database('http://' + dbsource + '/' + '_replicator')
for id in couchdbserver:
if id != '_replicator' and id != '_users':
dbrep.create({'_id': id+'_to_'+dbtarget, 'source': id, 'target': 'http://'+dbtarget+'/'+id,
'create_target': True, 'continuous': True})
return 0
if __name__ == '__main__':
try:
dbsource = arguments().parse_args().source
dbtarget = arguments().parse_args().target
main(dbsource, dbtarget)
except:
arguments().print_help()
|
gpl-3.0
| -5,853,721,085,586,909,000
| 32.298507
| 105
| 0.670551
| false
| 3.718333
| false
| false
| false
|
kjedruczyk/phabricator-tools
|
py/abd/abdi_processrepoarglist.py
|
1
|
16178
|
"""Process a list of repository arguments."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdi_processrepoarglist
#
# Public Functions:
# do
# determine_max_workers_default
# fetch_if_needed
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import json
import logging
import multiprocessing
import os
import time
import phlcon_reviewstatecache
import phlgitx_refcache
import phlmp_cyclingpool
import phlsys_conduit
import phlsys_fs
import phlsys_git
import phlsys_strtotime
import phlsys_subprocess
import phlsys_timer
import phlurl_watcher
import abdmail_mailer
import abdt_classicnaming
import abdt_compositenaming
import abdt_conduit
import abdt_differresultcache
import abdt_errident
import abdt_exhandlers
import abdt_fs
import abdt_git
import abdt_logging
import abdt_rbranchnaming
import abdt_tryloop
import abdi_processexitcodes
import abdi_processrepo
import abdi_repoargs
_LOGGER = logging.getLogger(__name__)
def do(
repo_configs,
sys_admin_emails,
sleep_secs,
is_no_loop,
external_report_command,
mail_sender,
max_workers,
overrun_secs):
conduit_manager = _ConduitManager()
fs_accessor = abdt_fs.make_default_accessor()
url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
fs_accessor.layout.urlwatcher_cache_path)
# decide max workers based on number of CPUs if no value is specified
if max_workers == 0:
max_workers = determine_max_workers_default()
repo_list = []
for name, config in repo_configs:
repo_list.append(
_ArcydManagedRepository(
name,
config,
conduit_manager,
url_watcher_wrapper,
sys_admin_emails,
mail_sender))
# if we always overrun half our workers then the loop is sustainable, if we
# overrun more than that then we'll be lagging too far behind. In the event
# that we only have one worker then we can't overrun any.
max_overrun_workers = max_workers // 2
pool = phlmp_cyclingpool.CyclingPool(
repo_list, max_workers, max_overrun_workers)
cycle_timer = phlsys_timer.Timer()
cycle_timer.start()
exit_code = None
while exit_code is None:
# This timer needs to be separate from the cycle timer. The cycle timer
# must be reset every time it is reported. The sleep timer makes sure
# that each run of the loop takes a minimum amount of time.
sleep_timer = phlsys_timer.Timer()
sleep_timer.start()
# refresh git snoops
with abdt_logging.remote_io_read_event_context(
'refresh-git-snoop', ''):
abdt_tryloop.critical_tryloop(
url_watcher_wrapper.watcher.refresh,
abdt_errident.GIT_SNOOP,
'')
with abdt_logging.remote_io_read_event_context('refresh-conduit', ''):
conduit_manager.refresh_conduits()
with abdt_logging.misc_operation_event_context(
'process-repos',
'{} workers, {} repos'.format(max_workers, len(repo_list))):
if max_workers > 1:
for i, res in pool.cycle_results(overrun_secs=overrun_secs):
repo = repo_list[i]
repo.merge_from_worker(res)
else:
for r in repo_list:
r()
# important to do this before stopping arcyd and as soon as possible
# after doing fetches
url_watcher_wrapper.save()
# report cycle stats
report = {
"cycle_time_secs": cycle_timer.restart(),
"overrun_jobs": pool.num_active_jobs,
}
_LOGGER.debug("cycle-stats: {}".format(report))
if external_report_command:
report_json = json.dumps(report)
full_path = os.path.abspath(external_report_command)
with abdt_logging.misc_operation_event_context(
'external-report-command', external_report_command):
try:
phlsys_subprocess.run(full_path, stdin=report_json)
except phlsys_subprocess.CalledProcessError as e:
_LOGGER.error(
"External command: {} failed with exception: "
"{}.".format(
external_report_command, type(e).__name__))
_LOGGER.error("VERBOSE MESSAGE: CycleReportJson:{}".format(
e))
if is_no_loop:
exit_code = abdi_processexitcodes.ExitCodes.ec_exit
elif os.path.isfile(fs_accessor.layout.killfile):
exit_code = abdi_processexitcodes.ExitCodes.ec_exit
if phlsys_fs.read_text_file(fs_accessor.layout.killfile):
_LOGGER.info("Reason for stopping arcyd: {}".format(
phlsys_fs.read_text_file(fs_accessor.layout.killfile)))
os.remove(fs_accessor.layout.killfile)
elif os.path.isfile(fs_accessor.layout.reloadfile):
exit_code = abdi_processexitcodes.ExitCodes.ec_reload
os.remove(fs_accessor.layout.reloadfile)
# sleep to pad out the cycle
secs_to_sleep = float(sleep_secs) - float(sleep_timer.duration)
if secs_to_sleep > 0 and exit_code is None:
with abdt_logging.misc_operation_event_context(
'sleep', secs_to_sleep):
time.sleep(secs_to_sleep)
# finish any jobs that overran
for i, res in pool.finish_results():
repo = repo_list[i]
repo.merge_from_worker(res)
# important to do this before stopping arcyd and as soon as
# possible after doing fetches
url_watcher_wrapper.save()
return exit_code
def determine_max_workers_default():
max_workers = 1
try:
# use the same default as multiprocessing.Pool
max_workers = multiprocessing.cpu_count()
_LOGGER.debug(
"max_workers unspecified, defaulted to cpu_count: {}".format(
max_workers))
except NotImplementedError:
_LOGGER.warning(
"multiprocessing.cpu_count() not supported, disabling "
"multiprocessing. Specify max workers explicitly to enable.")
return max_workers
class _RecordingWatcherWrapper(object):
def __init__(self, watcher):
self._watcher = watcher
self._tested_urls = set()
def peek_has_url_recently_changed(self, url):
return self._watcher.peek_has_url_recently_changed(url)
def has_url_recently_changed(self, url):
self._tested_urls.add(url)
return self._watcher.has_url_recently_changed(url)
def get_data_for_merging(self):
data = self._watcher.get_data_for_merging()
tested = self._tested_urls
new_data = {k: v for k, v in data.iteritems() if k in tested}
return new_data
class _RepoActiveRetryState(object):
"""Determine when a repo is active and when to retry it."""
def __init__(self, retry_timestr_list):
self._is_active = True
self._reactivate_time = None
self._retry_delays = [
phlsys_strtotime.duration_string_to_time_delta(s)
for s in retry_timestr_list
]
def calc_active(self):
if not self._is_active and self._reactivate_time is not None:
if datetime.datetime.utcnow() >= self._reactivate_time:
self._is_active = True
self._reactivate_time = None
return self._is_active
@property
def is_active(self):
return self._is_active
def disable(self):
self._is_active = False
retry_delay = None
if self._retry_delays:
retry_delay = self._retry_delays.pop(0)
self._reactivate_time = datetime.datetime.utcnow() + retry_delay
else:
self._reactivate_time = None
return retry_delay
@property
def reactivate_time(self):
return self._reactivate_time
class _ArcydManagedRepository(object):
def __init__(
self,
repo_name,
repo_args,
conduit_manager,
url_watcher_wrapper,
sys_admin_emails,
mail_sender):
self._active_state = _RepoActiveRetryState(
retry_timestr_list=["10 seconds", "10 minutes", "1 hours"])
sys_repo = phlsys_git.Repo(repo_args.repo_path)
self._refcache_repo = phlgitx_refcache.Repo(sys_repo)
self._differ_cache = abdt_differresultcache.Cache(self._refcache_repo)
self._abd_repo = abdt_git.Repo(
self._refcache_repo,
self._differ_cache,
"origin",
repo_args.repo_desc)
self._name = repo_name
self._args = repo_args
self._conduit_manager = conduit_manager
conduit_cache = conduit_manager.get_conduit_and_cache_for_args(
repo_args)
self._arcyd_conduit, self._review_cache = conduit_cache
self._mail_sender = mail_sender
self._url_watcher_wrapper = url_watcher_wrapper
self._mail_sender = mail_sender
self._on_exception = abdt_exhandlers.make_exception_delay_handler(
sys_admin_emails, repo_name)
def __call__(self):
watcher = _RecordingWatcherWrapper(
self._url_watcher_wrapper.watcher)
old_active_reviews = set(self._review_cache.active_reviews)
was_active = self._active_state.is_active
if self._active_state.calc_active():
if not was_active:
_LOGGER.info(
'repo-event: {} re-enabled'.format(self._name))
try:
_process_repo(
self._abd_repo,
self._name,
self._args,
self._arcyd_conduit,
watcher,
self._mail_sender)
except Exception:
retry_delay = self._active_state.disable()
_LOGGER.info(
'repo-event: {} disabled, retry in {}'.format(
self._name, retry_delay))
self._on_exception(retry_delay)
else:
_LOGGER.debug(
'repo-status: {} is inactive until {}'.format(
self._name, self._active_state.reactivate_time))
return (
self._review_cache.active_reviews - old_active_reviews,
self._active_state,
watcher.get_data_for_merging(),
self._refcache_repo.peek_hash_ref_pairs(),
self._differ_cache.get_cache()
)
def merge_from_worker(self, results):
(
active_reviews,
active_state,
watcher_data,
hash_ref_pairs,
differ_cache
) = results
self._review_cache.merge_additional_active_reviews(active_reviews)
self._active_state = active_state
self._refcache_repo.set_hash_ref_pairs(hash_ref_pairs)
self._differ_cache.set_cache(differ_cache)
# merge in the consumed urls from the worker
self._url_watcher_wrapper.watcher.merge_data_consume_only(watcher_data)
class _ConduitManager(object):
def __init__(self):
super(_ConduitManager, self).__init__()
self._conduits_caches = {}
def get_conduit_and_cache_for_args(self, args):
key = (
args.instance_uri,
args.arcyd_user,
args.arcyd_cert,
args.https_proxy
)
if key not in self._conduits_caches:
# create an array so that the 'connect' closure binds to the
# 'conduit' variable as we'd expect, otherwise it'll just
# modify a local variable and this 'conduit' will remain 'None'
# XXX: we can _process_repo better in python 3.x (nonlocal?)
conduit = [None]
def connect():
# XXX: we'll rebind in python 3.x, instead
# nonlocal conduit
conduit[0] = phlsys_conduit.MultiConduit(
args.instance_uri,
args.arcyd_user,
args.arcyd_cert,
https_proxy=args.https_proxy)
abdt_tryloop.tryloop(
connect, abdt_errident.CONDUIT_CONNECT, args.instance_uri)
multi_conduit = conduit[0]
cache = phlcon_reviewstatecache.make_from_conduit(multi_conduit)
arcyd_conduit = abdt_conduit.Conduit(multi_conduit, cache)
self._conduits_caches[key] = (arcyd_conduit, cache)
else:
arcyd_conduit, cache = self._conduits_caches[key]
return arcyd_conduit, cache
def refresh_conduits(self):
for conduit, cache in self._conduits_caches.itervalues():
abdt_tryloop.critical_tryloop(
cache.refresh_active_reviews,
abdt_errident.CONDUIT_REFRESH,
conduit.describe())
def fetch_if_needed(url_watcher, snoop_url, repo, repo_desc):
did_fetch = False
# fetch only if we need to
if not snoop_url or url_watcher.peek_has_url_recently_changed(snoop_url):
abdt_tryloop.tryloop(
repo.checkout_master_fetch_prune,
abdt_errident.FETCH_PRUNE,
repo_desc)
did_fetch = True
if did_fetch and snoop_url:
# consume the 'newness' of this repo, since fetching succeeded
url_watcher.has_url_recently_changed(snoop_url)
return did_fetch
def _process_repo(
repo,
unused_repo_name,
args,
arcyd_conduit,
url_watcher,
mail_sender):
fetch_if_needed(
url_watcher,
abdi_repoargs.get_repo_snoop_url(args),
repo,
args.repo_desc)
admin_emails = set(_flatten_list(args.admin_emails))
# TODO: this should be a URI for users not conduit
mailer = abdmail_mailer.Mailer(
mail_sender,
admin_emails,
args.repo_desc,
args.instance_uri)
branch_url_callable = None
if args.branch_url_format:
def make_branch_url(branch_name):
return args.branch_url_format.format(
branch=branch_name,
repo_url=args.repo_url)
branch_url_callable = make_branch_url
branch_naming = abdt_compositenaming.Naming(
abdt_classicnaming.Naming(),
abdt_rbranchnaming.Naming())
branches = abdt_git.get_managed_branches(
repo,
args.repo_desc,
branch_naming,
branch_url_callable)
abdi_processrepo.process_branches(branches, arcyd_conduit, mailer)
def _flatten_list(hierarchy):
for x in hierarchy:
# recurse into hierarchy if it's a list
if hasattr(x, '__iter__') and not isinstance(x, str):
for y in _flatten_list(x):
yield y
else:
yield x
# -----------------------------------------------------------------------------
# Copyright (C) 2014-2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
apache-2.0
| -3,659,248,281,344,877,600
| 31.949084
| 79
| 0.580232
| false
| 3.901133
| false
| false
| false
|
dmsovetov/pygling
|
Pygling/Target/Executable.py
|
1
|
1531
|
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Dmitry Sovetov
#
# https://github.com/dmsovetov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from Target import Target
# class Executable
class Executable( Target ):
# ctor
def __init__( self, name, sources = None, paths = None, defines = None, link = None ):
Target.__init__( self, name, sources = sources, paths = paths, defines = defines, link = link, linkTo = Target.Executable )
# shouldLinkLibraries
@property
def shouldLinkLibraries( self ):
return True
|
mit
| 8,647,078,688,976,270,000
| 41.555556
| 125
| 0.755715
| false
| 4.115591
| false
| false
| false
|
malja/cvut-python
|
cviceni08/01_trideni_karet.py
|
1
|
1202
|
import copy
cards = [[0, 'Q'], [2, '6'], [1, 'K'],
[1, '8'], [2, '10'], [2, '4'],
[3, '4'], [0, '4'], [1, '3'],
[2, '5'], [0, 'K'], [3, 'A'],
[1, 'J'], [0, '3'], [0, '9']]
def cardTypeAsInt( card ):
if card[1].isdigit():
return int(card[1])
if card[1] == "J":
return 11
elif card[1] == "Q":
return 12
elif card[1] == "K":
return 13
else:
return 14
def compareCards( card1, card2 ):
print("porovnávám karty:", card1, card2)
if (card1[0] == card2[0]):
print("rovny")
if ( cardTypeAsInt( card1 ) < cardTypeAsInt( card2 ) ):
print("barva1")
return True
else:
print("barva2")
return False
else:
print("else")
return card1[0] < card2[0]
def bubbleSort( array, swap_fn ):
sorted = copy.deepcopy(array)
for i in range( len( sorted ) ):
while( swap_fn( sorted[i], sorted[i-1] ) ):
tmp = sorted[i-1]
sorted[i-1] = sorted[i]
sorted[i] = tmp
return sorted
print( cards )
print( bubbleSort( cards, compareCards) )
|
mit
| -2,594,362,512,075,148,000
| 20.818182
| 63
| 0.446667
| false
| 3.015075
| false
| false
| false
|
tswicegood/Dolt
|
dolt/__init__.py
|
1
|
8781
|
import httplib2
import urllib
try:
import json
except ImportError:
import simplejson as json
try:
from decorator import decorator
except ImportError:
# No decorator package available. Create a no-op "decorator".
def decorator(f):
def decorate(_func):
def inner(*args, **kwargs):
return f(_func, *args, **kwargs)
return inner
return decorate
@decorator
def _makes_clone(_func, *args, **kw):
"""
A decorator that returns a clone of the current object so that
we can re-use the object for similar requests.
"""
self = args[0]._clone()
_func(self, *args[1:], **kw)
return self
class Dolt(object):
"""
A dumb little wrapper around RESTful interfaces.
Subclass `Dolt` to create specific APIs.
Example::
class MyApi(Dolt):
_api_url = 'https://api.example.com'
_url_template = '%(domain)s/%(generated_url)s.json'
api = MyApi()
print api.images()
"""
_api_url = ''
"""The base url for this API"""
_url_template = '%(domain)s/%(generated_url)s'
"""
Template used to generate URLs.
- `%(domain)s` is the `_api_url`
- `%(generated_url)s` is where the URL parts go.
"""
_stack_collapser = '/'.join
_params_template = '?%s'
def __init__(self, http=None):
self._supported_methods = ("GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS")
self._attribute_stack = []
self._method = "GET"
self._body = None
self._http = http or httplib2.Http()
self._params = {}
self._headers = {}
def __call__(self, *args, **kwargs):
url = self.get_url(*[str(a) for a in args], **kwargs)
response, data = self._http.request(url, self._method, body=self._body, headers=self._headers)
return self._handle_response(response, data)
def _generate_params(self, params):
return self._params_template % urllib.urlencode(params)
def _handle_response(self, response, data):
"""
Deserializes JSON if the content-type matches, otherwise returns the response
body as is.
"""
# Content-Type headers can include additional parameters(RFC 1521), so
# we split on ; to match against only the type/subtype
if data and response.get('content-type', '').split(';')[0] in (
'application/json',
'application/x-javascript',
'text/javascript',
'text/x-javascript',
'text/x-json'
):
return json.loads(data)
else:
return data
@_makes_clone
def __getitem__(self, name):
"""
Adds `name` to the URL path.
"""
self._attribute_stack.append(name)
return self
@_makes_clone
def __getattr__(self, name):
"""
Sets the HTTP method for the request or adds `name` to the URL path.
::
>>> dolt.GET._method == 'GET'
True
>>> dolt.foo.bar.get_url()
'/foo/bar'
"""
if name in self._supported_methods:
self._method = name
elif not name.endswith(')'):
self._attribute_stack.append(name)
return self
@_makes_clone
def with_params(self, **params):
"""
Add/overwrite URL query parameters to the request.
"""
self._params.update(params)
return self
@_makes_clone
def with_body(self, body=None, **params):
"""
Add a body to the request.
When `body` is a:
- string, it will be used as is.
- dict or list of (key, value) pairs, it will be form encoded
- None, remove request body
- anything else, a TypeError will be raised
If `body` is a dict or None you can also pass in keyword
arguments to add to the body.
::
>>> dolt.with_body(dict(key='val'), foo='bar')._body
'foo=bar&key=val'
"""
if isinstance(body, (tuple, list)):
body = dict(body)
if params:
# Body must be None or able to be a dict
if isinstance(body, dict):
body.update(params)
elif body is None:
body = params
else:
raise ValueError('Body must be None or a dict if used with params, got: %r' % body)
if isinstance(body, basestring):
self._body = body
elif isinstance(body, dict):
self._body = urllib.urlencode(body)
elif body is None:
self._body = None
else:
raise TypeError('Invalid body type %r' % body)
return self
def with_json(self, data=None, **params):
"""
Add a json body to the request.
:param data: A json string, a dict, or a list of key, value pairs
:param params: A dict of key value pairs to JSON encode
"""
if isinstance(data, (tuple, list)):
data = dict(data)
if params:
# data must be None or able to be a dict
if isinstance(data, dict):
data.update(params)
elif data is None:
data = params
else:
raise ValueError('Data must be None or a dict if used with params, got: %r' % data)
req = self.with_headers({'Content-Type': 'application/json', 'Accept': 'application/json'})
if isinstance(data, basestring):
# Looks like it's already been encoded
return req.with_body(data)
else:
return req.with_body(json.dumps(data))
@_makes_clone
def with_headers(self, headers=None, **params):
"""
Add headers to the request.
:param headers: A dict, or a list of key, value pairs
:param params: A dict of key value pairs
"""
if isinstance(headers, (tuple, list)):
headers = dict(headers)
if params:
if isinstance(headers, dict):
headers.update(params)
elif headers is None:
headers = params
self._headers.update(headers)
return self
def get_url(self, *paths, **params):
"""
Returns the URL for this request.
:param paths: Additional URL path parts to add to the request
:param params: Additional query parameters to add to the request
"""
path_stack = self._attribute_stack[:]
if paths:
path_stack.extend(paths)
u = self._stack_collapser(path_stack)
url = self._url_template % {
"domain": self._api_url,
"generated_url" : u,
}
if self._params or params:
internal_params = self._params.copy()
internal_params.update(params)
url += self._generate_params(internal_params)
return url
def _clone(self):
"""
Clones the state of the current operation.
The state is cloned so that you can freeze the state at a certain point for re-use.
::
>>> cat = dolt.cat
>>> cat.get_url()
'/cat'
>>> o = cat.foo
>>> o.get_url()
'/cat/foo'
>>> cat.get_url()
'/cat'
"""
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
q._params = self._params.copy()
q._headers = self._headers.copy()
q._attribute_stack = self._attribute_stack[:]
return q
try:
__IPYTHON__
def __dir__(self):
return [
'_supported_methods',
'_attribute_stack',
'_method',
'_body',
'_http',
'_params',
'_headers',
'_api_url',
'_url_template',
'_stack_collapser',
'_params_template',
'__init__',
'__call__',
'_handle_response',
'__getattr__',
'get_url',
'__dir__',
]
_getAttributeNames = trait_names = __dir__
except NameError:
pass
class Simpleton(Dolt):
"""
A dumber little wrapper around RESTful interfaces.
Example::
api = Simpleton('http://api.example.com')
print api.images()
"""
def __init__(self, base_url, http=None):
super(Simpleton, self).__init__(http=http)
self._api_url = base_url
|
bsd-3-clause
| -1,242,726,497,648,566,800
| 27.237942
| 102
| 0.512015
| false
| 4.285505
| false
| false
| false
|
nitheesh/AutoMoveMouse
|
appind.py
|
1
|
6321
|
#!/usr/bin/env python
import os
import os.path
import pygtk
pygtk.require('2.0')
import gtk
import time
import subprocess
import threading
import atexit
import commands
import appindicator
from Xlib import display
MaxIdle = 10
lockFile = "/tmp/automouse.lck"
appFile = "/tmp/appfile.lck"
# Touch the signal file on script startup
open(appFile, 'a').close()
class AppIndicatorMouse:
def __init__(self):
self.ind = appindicator.Indicator ("AutoMouseMove-Indicator", "indicator-messages", appindicator.CATEGORY_APPLICATION_STATUS)
self.ind.set_status (appindicator.STATUS_ACTIVE)
self.ind.set_attention_icon ("indicator-messages-new")
self.ind.set_icon("distributor-logo")
self.start = True
self.timer = None
self.timer_text = ""
# create a menu
self.menu = gtk.Menu()
_radio = gtk.RadioMenuItem(None, "Demo")
_radio1 = gtk.RadioMenuItem(None, "Demo")
radio = gtk.RadioMenuItem(_radio, "Start")
radio.connect("activate", self.start_btn_pressed)
radio.show()
self.menu.append(radio)
radio1 = gtk.RadioMenuItem(_radio, "Stop")
radio1.connect("activate", self.stop_btn_pressed)
radio1.show()
self.menu.append(radio1)
self.dis_web = gtk.CheckMenuItem("kuku")
self.dis_web.connect("toggled", self.disable_webcam)
self.dis_web.show()
self.menu.append(self.dis_web)
button = gtk.MenuItem(label="Timer")
button.connect("activate", self.TimerpopUp)
button.show()
self.menu.append(button)
image = gtk.ImageMenuItem(gtk.STOCK_QUIT)
image.connect("activate", self.quit)
image.show()
self.menu.append(image)
self.menu.show()
self.ind.set_menu(self.menu)
self.thread = threading.Thread(target=self.StartbashScript)
self.thread.daemon = True
self.thread.start()
# self.thread.join()
def quit(self, widget, data=None):
# print self.thread
try:
self._bash.kill()
except:
pass
gtk.main_quit()
def start_btn_pressed(self, widget):
print "Start button clicked."
try:
os.remove(appFile)
except:
print "Unable to remove appFile"
def stop_btn_pressed(self, widget):
print "Stop clicked."
open(appFile, 'a').close()
# self.ind.set_label("Stopped")
def StartbashScript(self):
self._bash = None
self.thread1 = None
prev_pos = None
count = 0
# self.timer = 30
while True:
if self.timer is not None:
count = count + 1
if int(count) >= int(self.timer) and not os.path.isfile(lockFile):
try:
print "Timer reached"
count = 0
self.timer = None
open(appFile, 'a').close()
except:
print "Timer encountered an error!!"
pass
else:
count = 0
if os.path.isfile(appFile):
print "App is on stop mode!!"
time.sleep(1)
continue
else:
if not os.path.isfile(lockFile):
self._bash = None
prev_pos = None
idle = commands.getstatusoutput('expr $(xprintidle) / 1000')[1]
if (int(idle) > MaxIdle):
if self._bash is None:
print "system goes idle..!"
self.thread1 = threading.Thread(target=self.AutoMouseMove)
self.thread1.daemon = True
self.thread1.start()
self.thread1.join()
else:
print str(idle) + str(" : system active")
if self._bash is not None:
# print("The mouse position on the screen is {0}".format(self.mousepos()))
cur_pos = self.mousepos()
print "Current postion" + str(cur_pos)
if prev_pos is not None and cur_pos != prev_pos:
subprocess.Popen("exec " + "xte 'keyup Control_L' && xte 'keyup Alt_L'", shell=True, stdout=subprocess.PIPE)
print "System activated by user input"
self._bash.terminate()
self._bash = None
print "Lock file removed!"
os.remove(lockFile)
prev_pos = cur_pos
FirstRun = False
time.sleep(1)
def mousepos(self):
"""mousepos() --> (x, y) get the mouse coordinates on the screen (linux, Xlib)."""
data = display.Display().screen().root.query_pointer()._data
return data["root_x"]
def AutoMouseMove(self):
open(lockFile, 'a').close()
self._bash = subprocess.Popen("exec " + "./start-mouse.sh", shell=True, stdout=subprocess.PIPE)
print self._bash.pid
def TimerpopUp(self,btn):
#base this on a message dialog
dialog = gtk.MessageDialog(
None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_OK,
None)
dialog.set_markup('Please set the <b>timer</b>:')
#create the text input field
entry = gtk.Entry()
entry.set_text(self.timer_text)
#allow the user to press enter to do ok
entry.connect("activate", self.responseToDialog, dialog, gtk.RESPONSE_OK)
entry.connect('changed', self.on_changed)
#create a horizontal box to pack the entry and a label
hbox = gtk.HBox()
hbox.pack_start(gtk.Label("Timer (min):"), False, 5, 5)
hbox.pack_end(entry)
#some secondary text
# dialog.format_secondary_markup("This will be used for <i>identification</i> purposes")
#add it and show it
dialog.vbox.pack_end(hbox, True, True, 0)
dialog.show_all()
#go go go
dialog.run()
text = entry.get_text()
dialog.destroy()
if text == '':
self.timer_text = ""
self.timer = None
else:
self.timer_text = text
self.timer = int(text) * 60
print self.timer_text
print "Automation will be active for next " + str(self.timer_text) + str(" mins")
def on_changed(self, entry):
text = entry.get_text().strip()
entry.set_text(''.join([i for i in text if i in '123456789']))
def responseToDialog(entry, dialog, response):
dialog.response(response)
def disable_webcam(self, widget, data=None):
if widget.get_active():
os.system("echo 'passwd' | sudo -S modprobe -r uvcvideo")
else:
os.system("echo 'passwd' | sudo -S modprobe uvcvideo")
if __name__ == "__main__":
gtk.gdk.threads_init()
# test = Test1()
indicator = AppIndicatorMouse()
gtk.main()
|
gpl-2.0
| 8,106,481,789,371,758,000
| 28.820755
| 129
| 0.615567
| false
| 3.529313
| false
| false
| false
|
EduPepperPDTesting/pepper2013-testing
|
lms/djangoapps/sso/sp_metadata.py
|
1
|
7015
|
from mitxmako.shortcuts import render_to_response
import xmltodict
from django.http import HttpResponse
import json
from django.conf import settings
from collections import defaultdict
import os
from OpenSSL import crypto
import re
from path import path
from permissions.decorators import user_has_perms
BASEDIR = settings.PROJECT_HOME + "/sso/sp"
PEPPER_ENTITY_ID = "www.pepperpd.com"
@user_has_perms('sso', 'administer')
def edit(request):
return render_to_response('sso/manage/sp_metadata.html')
@user_has_perms('sso', 'administer')
def save(request):
data = json.loads(request.POST.get('data'))
entities = []
for d in data:
sso_name = d.get('sso_name', '')
sso_type = d.get('sso_type')
path = BASEDIR + "/" + sso_name
if not os.path.isdir(path):
os.makedirs(path)
typed = d.get('typed')
sso_type = d.get('sso_type')
if typed.get('saml_metadata'):
mdfile = open(path + "/FederationMetadata.xml", "w")
mdfile.write(typed.get('saml_metadata'))
del typed['saml_metadata']
typed_setting = []
for k, v in typed.items():
typed_setting.append('''
<setting name="%s">%s</setting>''' % (k, v))
attributes = []
for a in d.get('attributes'):
attributes.append('''
<attribute name="%s" map="%s"></attribute>''' % (a['name'], a['map']))
entities.append('''
<entity type="%s" name="%s">%s%s
</entity>''' % (d.get('sso_type', ''),
sso_name,
''.join(typed_setting),
''.join(attributes)
))
content = '''<?xml version="1.0"?>
<entities xmlns:ds="http://www.w3.org/2000/09/xmldsig#">%s
</entities>''' % ''.join(entities)
xmlfile = open(BASEDIR + "/metadata.xml", "w")
xmlfile.write(content)
xmlfile.close()
# post process
for d in data:
sso_name = d.get('sso_name', '')
sso_type = d.get('sso_type')
if sso_type == 'SAML':
create_saml_config_files(sso_name)
return HttpResponse("{}", content_type="application/json")
@user_has_perms('sso', 'administer')
def all_json(request):
xmlfile = open(BASEDIR + "/metadata.xml", "r")
parsed_data = xmltodict.parse(xmlfile.read(),
dict_constructor=lambda *args, **kwargs: defaultdict(list, *args, **kwargs))
entity_list = []
if 'entity' in parsed_data['entities'][0]:
for entity in parsed_data['entities'][0]['entity']:
entity_list.append(parse_one_sp(entity))
return HttpResponse(json.dumps(entity_list), content_type="application/json")
def sp_by_name(name):
xmlfile = open(BASEDIR + "/metadata.xml", "r")
parsed_data = xmltodict.parse(xmlfile.read(),
dict_constructor=lambda *args, **kwargs: defaultdict(list, *args, **kwargs))
if 'entity' in parsed_data['entities'][0]:
for entity in parsed_data['entities'][0]['entity']:
if entity['@name'] == name:
return parse_one_sp(entity)
def parse_one_sp(entity):
attribute_list = []
if 'attribute' in entity:
for attribute in entity['attribute']:
attr = {
# 'type': attribute['@type'],
'name': attribute['@name'],
'map': attribute['@map']
}
attribute_list.append(attr)
typed_setting = {}
if 'setting' in entity:
for attribute in entity['setting']:
typed_setting[attribute['@name']] = attribute['#text']
# path = BASEDIR + "/" + entity['@name'] + "/FederationMetadata.xml"
# if os.path.isfile(path):
# mdfile = open(path, "r")
# typed_setting['saml_metadata'] = mdfile.read()
return {
'sso_type': entity['@type'],
'sso_name': entity['@name'],
'attributes': attribute_list,
'typed': typed_setting
}
def create_self_signed_cert(CN, C="US", ST="unknown", L="unknown", O="unknown", OU="unknown", serial_number=1, notBefore=0, notAfter=365*24*60*60):
"""
"""
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
# create a self-signed cert
cert = crypto.X509()
cert.get_subject().C = C
cert.get_subject().ST = ST
cert.get_subject().L = L
cert.get_subject().O = O
cert.get_subject().OU = OU
cert.get_subject().CN = CN # most important part
cert.set_serial_number(1000)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10*365*24*60*60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha1')
cert = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
key = crypto.dump_privatekey(crypto.FILETYPE_PEM, k)
return cert, key
def create_saml_config_files(name):
ms = sp_by_name(name)
cert_file = BASEDIR + '/' + name + "/cert.pem"
key_file = BASEDIR + '/' + name + "/key.pem"
if not os.path.isfile(cert_file) or not os.path.isfile(key_file):
cert, key = create_self_signed_cert(name)
open(cert_file, "wt").write(cert)
open(key_file, "wt").write(key)
cert = open(cert_file, "r").read()
key = open(key_file, "r").read()
cert = re.sub('-----.*?-----\n?', '', cert)
key = re.sub('-----.*?-----\n?', '', key)
auth = "http://docs.oasis-open.org/wsfed/authorization/200706"
temp_dir = path(__file__).abspath().dirname()
template = open(temp_dir + "/metadata_templates/sp.xml", "r").read()
attr_tags = ""
for attr in ms.get('attributes'):
mapped_name = attr['map'] if 'map' in attr else attr['name']
attr_tags += '''
<ns0:RequestedAttribute isRequired="true" NameFormat="urn:mace:dir:attribute-def:%s"
Name="%s" FriendlyName="%s"/>''' % (mapped_name, mapped_name, mapped_name)
content = template.format(cert=cert,
entityID=name,
auth=auth,
attr_tags=attr_tags,
slo_post_url="",
slo_redirect_url="",
acs_url=ms.get('typed').get('sso_acs_url'))
f = BASEDIR + '/' + name + "/sp.xml"
open(f, "wt").write(content)
template = open(temp_dir + "/metadata_templates/idp.xml", "r").read()
content = template.format(cert=cert, entityID=PEPPER_ENTITY_ID, auth=auth)
f = BASEDIR + '/' + name + "/idp.xml"
open(f, "wt").write(content)
def download_saml_federation_metadata(request):
name = request.GET.get("name")
ms = sp_by_name(name)
if not ms:
return HttpResponse("SP with name '%s' does not exist." % name)
f = BASEDIR + '/' + name + "/idp.xml"
response = HttpResponse(content_type='application/x-download')
response['Content-Disposition'] = ('attachment; filename=idp.xml')
response.write(open(f, "r").read())
return response
|
agpl-3.0
| 8,215,661,476,016,799,000
| 30.886364
| 147
| 0.566358
| false
| 3.49005
| false
| false
| false
|
memsharded/conan
|
.ci/jenkins/pr_tags.py
|
1
|
2199
|
import argparse
import json
import os
from github import Github
def _get_value(body, tag):
pos = body.lower().find(tag.lower())
if pos != -1:
cl = body[pos + len(tag):].splitlines()[0]
return cl.strip()
return None
def get_tag_from_pr(pr_number, tag):
"""Given a PR number and a tag to search, it returns the line written in the body"""
gh_token = os.getenv("GH_TOKEN")
g = Github(gh_token)
repo = g.get_repo("conan-io/conan")
pr = repo.get_pull(pr_number)
body = pr.body
value = _get_value(body, tag)
return value
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Launch tests in a venv')
parser.add_argument('output_file', help='e.g.: file.json')
parser.add_argument('branch_name', help='e.g.: PR-23')
args = parser.parse_args()
TAG_PYVERS = "@PYVERS:"
TAG_TAGS = "@TAGS:"
TAG_REVISIONS = "@REVISIONS:"
out_file = args.output_file
branch = args.branch_name
if not branch.startswith("PR-"):
print("The branch is not a PR")
exit(-1)
pr_number = int(branch.split("PR-")[1])
def clean_list(the_list):
if not the_list:
return []
return [a.strip() for a in the_list.split(",")]
# Read tags to include
tags = clean_list(get_tag_from_pr(pr_number, TAG_TAGS))
# Read pythons to include
tmp = clean_list(get_tag_from_pr(pr_number, TAG_PYVERS))
pyvers = {"Windows": [], "Linux": [], "Macos": []}
for t in tmp:
if "@" in t:
the_os, pyver = t.split("@")
if the_os not in ["Macos", "Linux", "Windows"]:
print("Invalid os: %s" % the_os)
exit(-1)
pyvers[the_os].append(pyver)
else:
pyvers["Macos"].append(t)
pyvers["Linux"].append(t)
pyvers["Windows"].append(t)
# Rest revisions?
tmp = get_tag_from_pr(pr_number, TAG_REVISIONS)
revisions = tmp.strip().lower() in ["1", "true"] if tmp else False
with open(out_file, "w") as f:
the_json = {"tags": tags, "pyvers": pyvers, "revisions": revisions}
f.write(json.dumps(the_json))
|
mit
| -1,919,777,983,848,685,300
| 27.192308
| 88
| 0.56935
| false
| 3.277198
| false
| false
| false
|
yandy/sea
|
setup.py
|
1
|
2143
|
import os
import re
import ast
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
_root = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(_root, 'sea/__init__.py')) as f:
version = str(ast.literal_eval(_version_re.search(
f.read()).group(1)))
with open(os.path.join(_root, 'requirements.txt')) as f:
requirements = f.readlines()
with open(os.path.join(_root, 'README.md')) as f:
readme = f.read()
def find_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return filepaths
setup(
name='sea',
version=version,
description='shanbay rpc framework',
long_description=readme,
url='https://github.com/shanbay/sea',
author='Michael Ding',
author_email='yandy.ding@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: POSIX',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords=['rpc', 'grpc'],
packages=find_packages(exclude=['tests']),
package_data={'sea': find_package_data('sea')},
python_requires='>=3',
install_requires=requirements,
entry_points={
'console_scripts': [
'sea=sea.cli:main'
],
'sea.jobs': [
'celery=sea.contrib.extensions.celery.cmd:main',
]
}
)
|
mit
| -2,994,986,565,607,564,000
| 28.763889
| 70
| 0.597294
| false
| 3.840502
| false
| false
| false
|
dragoon/edX-AI-course
|
search/util.py
|
1
|
29256
|
# util.py
# -------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import sys
import inspect
import heapq, random
import cStringIO
class FixedRandom:
def __init__(self):
fixedState = (3, (2147483648L, 507801126L, 683453281L, 310439348L, 2597246090L, \
2209084787L, 2267831527L, 979920060L, 3098657677L, 37650879L, 807947081L,
3974896263L, \
881243242L, 3100634921L, 1334775171L, 3965168385L, 746264660L,
4074750168L, 500078808L, \
776561771L, 702988163L, 1636311725L, 2559226045L, 157578202L, 2498342920L,
2794591496L, \
4130598723L, 496985844L, 2944563015L, 3731321600L, 3514814613L,
3362575829L, 3038768745L, \
2206497038L, 1108748846L, 1317460727L, 3134077628L, 988312410L,
1674063516L, 746456451L, \
3958482413L, 1857117812L, 708750586L, 1583423339L, 3466495450L,
1536929345L, 1137240525L, \
3875025632L, 2466137587L, 1235845595L, 4214575620L, 3792516855L,
657994358L, 1241843248L, \
1695651859L, 3678946666L, 1929922113L, 2351044952L, 2317810202L,
2039319015L, 460787996L, \
3654096216L, 4068721415L, 1814163703L, 2904112444L, 1386111013L,
574629867L, 2654529343L, \
3833135042L, 2725328455L, 552431551L, 4006991378L, 1331562057L,
3710134542L, 303171486L, \
1203231078L, 2670768975L, 54570816L, 2679609001L, 578983064L, 1271454725L,
3230871056L, \
2496832891L, 2944938195L, 1608828728L, 367886575L, 2544708204L,
103775539L, 1912402393L, \
1098482180L, 2738577070L, 3091646463L, 1505274463L, 2079416566L,
659100352L, 839995305L, \
1696257633L, 274389836L, 3973303017L, 671127655L, 1061109122L, 517486945L,
1379749962L, \
3421383928L, 3116950429L, 2165882425L, 2346928266L, 2892678711L,
2936066049L, 1316407868L, \
2873411858L, 4279682888L, 2744351923L, 3290373816L, 1014377279L,
955200944L, 4220990860L, \
2386098930L, 1772997650L, 3757346974L, 1621616438L, 2877097197L,
442116595L, 2010480266L, \
2867861469L, 2955352695L, 605335967L, 2222936009L, 2067554933L,
4129906358L, 1519608541L, \
1195006590L, 1942991038L, 2736562236L, 279162408L, 1415982909L,
4099901426L, 1732201505L, \
2934657937L, 860563237L, 2479235483L, 3081651097L, 2244720867L,
3112631622L, 1636991639L, \
3860393305L, 2312061927L, 48780114L, 1149090394L, 2643246550L,
1764050647L, 3836789087L, \
3474859076L, 4237194338L, 1735191073L, 2150369208L, 92164394L, 756974036L,
2314453957L, \
323969533L, 4267621035L, 283649842L, 810004843L, 727855536L, 1757827251L,
3334960421L, \
3261035106L, 38417393L, 2660980472L, 1256633965L, 2184045390L, 811213141L,
2857482069L, \
2237770878L, 3891003138L, 2787806886L, 2435192790L, 2249324662L,
3507764896L, 995388363L, \
856944153L, 619213904L, 3233967826L, 3703465555L, 3286531781L,
3863193356L, 2992340714L, \
413696855L, 3865185632L, 1704163171L, 3043634452L, 2225424707L,
2199018022L, 3506117517L, \
3311559776L, 3374443561L, 1207829628L, 668793165L, 1822020716L,
2082656160L, 1160606415L, \
3034757648L, 741703672L, 3094328738L, 459332691L, 2702383376L,
1610239915L, 4162939394L, \
557861574L, 3805706338L, 3832520705L, 1248934879L, 3250424034L,
892335058L, 74323433L, \
3209751608L, 3213220797L, 3444035873L, 3743886725L, 1783837251L,
610968664L, 580745246L, \
4041979504L, 201684874L, 2673219253L, 1377283008L, 3497299167L,
2344209394L, 2304982920L, \
3081403782L, 2599256854L, 3184475235L, 3373055826L, 695186388L,
2423332338L, 222864327L, \
1258227992L, 3627871647L, 3487724980L, 4027953808L, 3053320360L,
533627073L, 3026232514L, \
2340271949L, 867277230L, 868513116L, 2158535651L, 2487822909L,
3428235761L, 3067196046L, \
3435119657L, 1908441839L, 788668797L, 3367703138L, 3317763187L,
908264443L, 2252100381L, \
764223334L, 4127108988L, 384641349L, 3377374722L, 1263833251L,
1958694944L, 3847832657L, \
1253909612L, 1096494446L, 555725445L, 2277045895L, 3340096504L,
1383318686L, 4234428127L, \
1072582179L, 94169494L, 1064509968L, 2681151917L, 2681864920L, 734708852L,
1338914021L, \
1270409500L, 1789469116L, 4191988204L, 1716329784L, 2213764829L,
3712538840L, 919910444L, \
1318414447L, 3383806712L, 3054941722L, 3378649942L, 1205735655L,
1268136494L, 2214009444L, \
2532395133L, 3232230447L, 230294038L, 342599089L, 772808141L, 4096882234L,
3146662953L, \
2784264306L, 1860954704L, 2675279609L, 2984212876L, 2466966981L,
2627986059L, 2985545332L, \
2578042598L, 1458940786L, 2944243755L, 3959506256L, 1509151382L,
325761900L, 942251521L, \
4184289782L, 2756231555L, 3297811774L, 1169708099L, 3280524138L,
3805245319L, 3227360276L, \
3199632491L, 2235795585L, 2865407118L, 36763651L, 2441503575L,
3314890374L, 1755526087L, \
17915536L, 1196948233L, 949343045L, 3815841867L, 489007833L, 2654997597L,
2834744136L, \
417688687L, 2843220846L, 85621843L, 747339336L, 2043645709L, 3520444394L,
1825470818L, \
647778910L, 275904777L, 1249389189L, 3640887431L, 4200779599L, 323384601L,
3446088641L, \
4049835786L, 1718989062L, 3563787136L, 44099190L, 3281263107L, 22910812L,
1826109246L, \
745118154L, 3392171319L, 1571490704L, 354891067L, 815955642L, 1453450421L,
940015623L, \
796817754L, 1260148619L, 3898237757L, 176670141L, 1870249326L,
3317738680L, 448918002L, \
4059166594L, 2003827551L, 987091377L, 224855998L, 3520570137L, 789522610L,
2604445123L, \
454472869L, 475688926L, 2990723466L, 523362238L, 3897608102L, 806637149L,
2642229586L, \
2928614432L, 1564415411L, 1691381054L, 3816907227L, 4082581003L,
1895544448L, 3728217394L, \
3214813157L, 4054301607L, 1882632454L, 2873728645L, 3694943071L,
1297991732L, 2101682438L, \
3952579552L, 678650400L, 1391722293L, 478833748L, 2976468591L, 158586606L,
2576499787L, \
662690848L, 3799889765L, 3328894692L, 2474578497L, 2383901391L,
1718193504L, 3003184595L, \
3630561213L, 1929441113L, 3848238627L, 1594310094L, 3040359840L,
3051803867L, 2462788790L, \
954409915L, 802581771L, 681703307L, 545982392L, 2738993819L, 8025358L,
2827719383L, \
770471093L, 3484895980L, 3111306320L, 3900000891L, 2116916652L,
397746721L, 2087689510L, \
721433935L, 1396088885L, 2751612384L, 1998988613L, 2135074843L,
2521131298L, 707009172L, \
2398321482L, 688041159L, 2264560137L, 482388305L, 207864885L, 3735036991L,
3490348331L, \
1963642811L, 3260224305L, 3493564223L, 1939428454L, 1128799656L,
1366012432L, 2858822447L, \
1428147157L, 2261125391L, 1611208390L, 1134826333L, 2374102525L,
3833625209L, 2266397263L, \
3189115077L, 770080230L, 2674657172L, 4280146640L, 3604531615L,
4235071805L, 3436987249L, \
509704467L, 2582695198L, 4256268040L, 3391197562L, 1460642842L,
1617931012L, 457825497L, \
1031452907L, 1330422862L, 4125947620L, 2280712485L, 431892090L,
2387410588L, 2061126784L, \
896457479L, 3480499461L, 2488196663L, 4021103792L, 1877063114L,
2744470201L, 1046140599L, \
2129952955L, 3583049218L, 4217723693L, 2720341743L, 820661843L,
1079873609L, 3360954200L, \
3652304997L, 3335838575L, 2178810636L, 1908053374L, 4026721976L,
1793145418L, 476541615L, \
973420250L, 515553040L, 919292001L, 2601786155L, 1685119450L, 3030170809L,
1590676150L, \
1665099167L, 651151584L, 2077190587L, 957892642L, 646336572L, 2743719258L,
866169074L, \
851118829L, 4225766285L, 963748226L, 799549420L, 1955032629L, 799460000L,
2425744063L, \
2441291571L, 1928963772L, 528930629L, 2591962884L, 3495142819L,
1896021824L, 901320159L, \
3181820243L, 843061941L, 3338628510L, 3782438992L, 9515330L, 1705797226L,
953535929L, \
764833876L, 3202464965L, 2970244591L, 519154982L, 3390617541L, 566616744L,
3438031503L, \
1853838297L, 170608755L, 1393728434L, 676900116L, 3184965776L,
1843100290L, 78995357L, \
2227939888L, 3460264600L, 1745705055L, 1474086965L, 572796246L,
4081303004L, 882828851L, \
1295445825L, 137639900L, 3304579600L, 2722437017L, 4093422709L,
273203373L, 2666507854L, \
3998836510L, 493829981L, 1623949669L, 3482036755L, 3390023939L,
833233937L, 1639668730L, \
1499455075L, 249728260L, 1210694006L, 3836497489L, 1551488720L,
3253074267L, 3388238003L, \
2372035079L, 3945715164L, 2029501215L, 3362012634L, 2007375355L,
4074709820L, 631485888L, \
3135015769L, 4273087084L, 3648076204L, 2739943601L, 1374020358L,
1760722448L, 3773939706L, \
1313027823L, 1895251226L, 4224465911L, 421382535L, 1141067370L,
3660034846L, 3393185650L, \
1850995280L, 1451917312L, 3841455409L, 3926840308L, 1397397252L,
2572864479L, 2500171350L, \
3119920613L, 531400869L, 1626487579L, 1099320497L, 407414753L,
2438623324L, 99073255L, \
3175491512L, 656431560L, 1153671785L, 236307875L, 2824738046L,
2320621382L, 892174056L, \
230984053L, 719791226L, 2718891946L, 624L), None)
self.random = random.Random()
self.random.setstate(fixedState)
"""
Data structures useful for implementing SearchAgents
"""
class Stack:
"A container with a last-in-first-out (LIFO) queuing policy."
def __init__(self):
self.list = []
def push(self, item):
"Push 'item' onto the stack"
self.list.append(item)
def pop(self):
"Pop the most recently pushed item from the stack"
return self.list.pop()
def isEmpty(self):
"Returns true if the stack is empty"
return len(self.list) == 0
class Queue:
"A container with a first-in-first-out (FIFO) queuing policy."
def __init__(self):
self.list = []
def push(self, item):
"Enqueue the 'item' into the queue"
self.list.insert(0, item)
def pop(self):
"""
Dequeue the earliest enqueued item still in the queue. This
operation removes the item from the queue.
"""
return self.list.pop()
def isEmpty(self):
"Returns true if the queue is empty"
return len(self.list) == 0
class PriorityQueue:
"""
Implements a priority queue data structure. Each inserted item
has a priority associated with it and the client is usually interested
in quick retrieval of the lowest-priority item in the queue. This
data structure allows O(1) access to the lowest-priority item.
Note that this PriorityQueue does not allow you to change the priority
of an item. However, you may insert the same item multiple times with
different priorities.
"""
def __init__(self):
self.heap = []
self.count = 0
def push(self, item, priority):
# FIXME: restored old behaviour to check against old results better
# FIXED: restored to stable behaviour
entry = (priority, self.count, item)
# entry = (priority, item)
heapq.heappush(self.heap, entry)
self.count += 1
def pop(self):
(_, _, item) = heapq.heappop(self.heap)
# (_, item) = heapq.heappop(self.heap)
return item
def isEmpty(self):
return len(self.heap) == 0
class PriorityQueueWithFunction(PriorityQueue):
"""
Implements a priority queue with the same push/pop signature of the
Queue and the Stack classes. This is designed for drop-in replacement for
those two classes. The caller has to provide a priority function, which
extracts each item's priority.
"""
def __init__(self, priorityFunction):
"priorityFunction (item) -> priority"
self.priorityFunction = priorityFunction # store the priority function
PriorityQueue.__init__(self) # super-class initializer
def push(self, item):
"Adds an item to the queue with priority from the priority function"
PriorityQueue.push(self, item, self.priorityFunction(item))
def manhattanDistance(xy1, xy2):
"Returns the Manhattan distance between points xy1 and xy2"
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
"""
Data structures and functions useful for various course projects
The search project should not need anything below this line.
"""
class Counter(dict):
"""
A counter keeps track of counts for a set of keys.
The counter class is an extension of the standard python
dictionary type. It is specialized to have number values
(integers or floats), and includes a handful of additional
functions to ease the task of counting data. In particular,
all keys are defaulted to have value 0. Using a dictionary:
a = {}
print a['test']
would give an error, while the Counter class analogue:
>>> a = Counter()
>>> print a['test']
0
returns the default 0 value. Note that to reference a key
that you know is contained in the counter,
you can still use the dictionary syntax:
>>> a = Counter()
>>> a['test'] = 2
>>> print a['test']
2
This is very useful for counting things without initializing their counts,
see for example:
>>> a['blah'] += 1
>>> print a['blah']
1
The counter also includes additional functionality useful in implementing
the classifiers for this assignment. Two counters can be added,
subtracted or multiplied together. See below for details. They can
also be normalized and their total count and arg max can be extracted.
"""
def __getitem__(self, idx):
self.setdefault(idx, 0)
return dict.__getitem__(self, idx)
def incrementAll(self, keys, count):
"""
Increments all elements of keys by the same count.
>>> a = Counter()
>>> a.incrementAll(['one','two', 'three'], 1)
>>> a['one']
1
>>> a['two']
1
"""
for key in keys:
self[key] += count
def argMax(self):
"""
Returns the key with the highest value.
"""
if len(self.keys()) == 0: return None
all = self.items()
values = [x[1] for x in all]
maxIndex = values.index(max(values))
return all[maxIndex][0]
def sortedKeys(self):
"""
Returns a list of keys sorted by their values. Keys
with the highest values will appear first.
>>> a = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> a['third'] = 1
>>> a.sortedKeys()
['second', 'third', 'first']
"""
sortedItems = self.items()
compare = lambda x, y: sign(y[1] - x[1])
sortedItems.sort(cmp=compare)
return [x[0] for x in sortedItems]
def totalCount(self):
"""
Returns the sum of counts for all keys.
"""
return sum(self.values())
def normalize(self):
"""
Edits the counter such that the total count of all
keys sums to 1. The ratio of counts for all keys
will remain the same. Note that normalizing an empty
Counter will result in an error.
"""
total = float(self.totalCount())
if total == 0: return
for key in self.keys():
self[key] = self[key] / total
def divideAll(self, divisor):
"""
Divides all counts by divisor
"""
divisor = float(divisor)
for key in self:
self[key] /= divisor
def copy(self):
"""
Returns a copy of the counter
"""
return Counter(dict.copy(self))
def __mul__(self, y):
"""
Multiplying two counters gives the dot product of their vectors where
each unique label is a vector element.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['second'] = 5
>>> a['third'] = 1.5
>>> a['fourth'] = 2.5
>>> a * b
14
"""
sum = 0
x = self
if len(x) > len(y):
x, y = y, x
for key in x:
if key not in y:
continue
sum += x[key] * y[key]
return sum
def __radd__(self, y):
"""
Adding another counter to a counter increments the current counter
by the values stored in the second counter.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> a += b
>>> a['first']
1
"""
for key, value in y.items():
self[key] += value
def __add__(self, y):
"""
Adding two counters gives a counter with the union of all keys and
counts of the second added to counts of the first.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> (a + b)['first']
1
"""
addend = Counter()
for key in self:
if key in y:
addend[key] = self[key] + y[key]
else:
addend[key] = self[key]
for key in y:
if key in self:
continue
addend[key] = y[key]
return addend
def __sub__(self, y):
"""
Subtracting a counter from another gives a counter with the union of all keys and
counts of the second subtracted from counts of the first.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> (a - b)['first']
-5
"""
addend = Counter()
for key in self:
if key in y:
addend[key] = self[key] - y[key]
else:
addend[key] = self[key]
for key in y:
if key in self:
continue
addend[key] = -1 * y[key]
return addend
def raiseNotDefined():
fileName = inspect.stack()[1][1]
line = inspect.stack()[1][2]
method = inspect.stack()[1][3]
print "*** Method not implemented: %s at line %s of %s" % (method, line, fileName)
sys.exit(1)
def normalize(vectorOrCounter):
"""
normalize a vector or counter by dividing each value by the sum of all values
"""
normalizedCounter = Counter()
if type(vectorOrCounter) == type(normalizedCounter):
counter = vectorOrCounter
total = float(counter.totalCount())
if total == 0: return counter
for key in counter.keys():
value = counter[key]
normalizedCounter[key] = value / total
return normalizedCounter
else:
vector = vectorOrCounter
s = float(sum(vector))
if s == 0: return vector
return [el / s for el in vector]
def nSample(distribution, values, n):
if sum(distribution) != 1:
distribution = normalize(distribution)
rand = [random.random() for i in range(n)]
rand.sort()
samples = []
samplePos, distPos, cdf = 0, 0, distribution[0]
while samplePos < n:
if rand[samplePos] < cdf:
samplePos += 1
samples.append(values[distPos])
else:
distPos += 1
cdf += distribution[distPos]
return samples
def sample(distribution, values=None):
if type(distribution) == Counter:
items = sorted(distribution.items())
distribution = [i[1] for i in items]
values = [i[0] for i in items]
if sum(distribution) != 1:
distribution = normalize(distribution)
choice = random.random()
i, total = 0, distribution[0]
while choice > total:
i += 1
total += distribution[i]
return values[i]
def sampleFromCounter(ctr):
items = sorted(ctr.items())
return sample([v for k, v in items], [k for k, v in items])
def getProbability(value, distribution, values):
"""
Gives the probability of a value under a discrete distribution
defined by (distributions, values).
"""
total = 0.0
for prob, val in zip(distribution, values):
if val == value:
total += prob
return total
def flipCoin(p):
r = random.random()
return r < p
def chooseFromDistribution(distribution):
"Takes either a counter or a list of (prob, key) pairs and samples"
if type(distribution) == dict or type(distribution) == Counter:
return sample(distribution)
r = random.random()
base = 0.0
for prob, element in distribution:
base += prob
if r <= base: return element
def nearestPoint(pos):
"""
Finds the nearest grid point to a position (discretizes).
"""
( current_row, current_col ) = pos
grid_row = int(current_row + 0.5)
grid_col = int(current_col + 0.5)
return ( grid_row, grid_col )
def sign(x):
"""
Returns 1 or -1 depending on the sign of x
"""
if ( x >= 0 ):
return 1
else:
return -1
def arrayInvert(array):
"""
Inverts a matrix stored as a list of lists.
"""
result = [[] for i in array]
for outer in array:
for inner in range(len(outer)):
result[inner].append(outer[inner])
return result
def matrixAsList(matrix, value=True):
"""
Turns a matrix into a list of coordinates matching the specified value
"""
rows, cols = len(matrix), len(matrix[0])
cells = []
for row in range(rows):
for col in range(cols):
if matrix[row][col] == value:
cells.append(( row, col ))
return cells
def lookup(name, namespace):
"""
Get a method or class from any imported module from its name.
Usage: lookup(functionName, globals())
"""
dots = name.count('.')
if dots > 0:
moduleName, objName = '.'.join(name.split('.')[:-1]), name.split('.')[-1]
module = __import__(moduleName)
return getattr(module, objName)
else:
modules = [obj for obj in namespace.values() if str(type(obj)) == "<type 'module'>"]
options = [getattr(module, name) for module in modules if name in dir(module)]
options += [obj[1] for obj in namespace.items() if obj[0] == name]
if len(options) == 1: return options[0]
if len(options) > 1: raise Exception, 'Name conflict for %s'
raise Exception, '%s not found as a method or class' % name
def pause():
"""
Pauses the output stream awaiting user feedback.
"""
print "<Press enter/return to continue>"
raw_input()
# code to handle timeouts
#
# FIXME
# NOTE: TimeoutFuncton is NOT reentrant. Later timeouts will silently
# disable earlier timeouts. Could be solved by maintaining a global list
# of active time outs. Currently, questions which have test cases calling
# this have all student code so wrapped.
#
import signal
import time
class TimeoutFunctionException(Exception):
"""Exception to raise on a timeout"""
pass
class TimeoutFunction:
def __init__(self, function, timeout):
self.timeout = timeout
self.function = function
def handle_timeout(self, signum, frame):
raise TimeoutFunctionException()
def __call__(self, *args, **keyArgs):
# If we have SIGALRM signal, use it to cause an exception if and
# when this function runs too long. Otherwise check the time taken
# after the method has returned, and throw an exception then.
if hasattr(signal, 'SIGALRM'):
old = signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.timeout)
try:
result = self.function(*args, **keyArgs)
finally:
signal.signal(signal.SIGALRM, old)
signal.alarm(0)
else:
startTime = time.time()
result = self.function(*args, **keyArgs)
timeElapsed = time.time() - startTime
if timeElapsed >= self.timeout:
self.handle_timeout(None, None)
return result
_ORIGINAL_STDOUT = None
_ORIGINAL_STDERR = None
_MUTED = False
class WritableNull:
def write(self, string):
pass
def mutePrint():
global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
if _MUTED:
return
_MUTED = True
_ORIGINAL_STDOUT = sys.stdout
# _ORIGINAL_STDERR = sys.stderr
sys.stdout = WritableNull()
#sys.stderr = WritableNull()
def unmutePrint():
global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
if not _MUTED:
return
_MUTED = False
sys.stdout = _ORIGINAL_STDOUT
# sys.stderr = _ORIGINAL_STDERR
|
mit
| -1,549,650,670,089,234,400
| 36.945525
| 100
| 0.556262
| false
| 3.473759
| false
| false
| false
|
MrHamdulay/myjvm
|
defaultclassloader.py
|
1
|
1741
|
from __future__ import absolute_import
import os.path
try:
# let's not use rzipfile for now (it's really really slow in python)
raise Exception
from rpython.rlib.rzipfile import RZipFile
ZipFile = RZipFile
except:
RZipFile = None
from zipfile import ZipFile
from classreader import ClassReader
from excep import ClassNotFoundException
class DefaultClassLoader:
def __init__(self, classpath):
self.classpath = classpath
self.lazy_classes = {}
def load_jar(self, jarfilename):
jar = ZipFile(jarfilename)
for zipinfo in jar.filelist:
classname = zipinfo.filename
if not classname.endswith('.class'):
continue
self.lazy_classes[classname.split('.class')[0]] = jar
def load_class_from_jar(self, classname):
if RZipFile:
return self.lazy_classes[classname].read(classname+'.class')
else:
return self.lazy_classes[classname].open(classname+'.class').read()
def load(self, classname):
class_file = None
if classname in self.lazy_classes:
class_file = self.load_class_from_jar(classname)
else:
parts = classname.split('/')
class_file = None
for classpath in self.classpath:
class_filename = '%s/%s.class' % (classpath, classname)
if os.path.isfile(class_filename):
class_file = open(class_filename).read()
break
else:
raise ClassNotFoundException('class file not found: %s' % classname)
assert class_file is not None
klass = ClassReader(classname, class_file).klass
return klass
|
mit
| 1,773,738,263,681,068,000
| 30.089286
| 84
| 0.608271
| false
| 4.464103
| false
| false
| false
|
Murali-group/GraphSpace
|
graphspace/authorization.py
|
1
|
3499
|
import applications.users as users
import applications.graphs as graphs
from graphspace.exceptions import UserNotAuthorized
from graphspace.utils import get_request_user
class UserRole:
ADMIN = 3
LOGGED_IN = 2
LOGGED_OFF = 1 # When user is not logged in to GraphSpace.
def user_role(request):
"""
Returns the user role for the user making the request.
Parameters
----------
request: HTTP request
Returns
-------
Returns UserRole
"""
user_email = get_request_user(request)
user = users.controllers.get_user(request, user_email) if user_email is not None else None
if user is None:
return UserRole.LOGGED_OFF
elif user.is_admin:
return UserRole.ADMIN
else:
return UserRole.LOGGED_IN
def validate(request, permission, graph_id=None, group_id=None, layout_id=None):
"""
Validates if the user has the given permissions based on information like graph id, group id or layout id.
Returns
-------
Nothing
Raises
-------
UserNotAuthorized - if user doesnt have the given permission.
"""
# TODO: Each application module should implement a validate method.
# Then this validate method can plug into the implemented validate method to expose overall validation functionality for the project.
if graph_id is not None:
if permission == 'GRAPH_READ' and not graphs.controllers.is_user_authorized_to_view_graph(request, username=get_request_user(request), graph_id = graph_id):
raise UserNotAuthorized(request)
if permission == 'GRAPH_UPDATE' and not graphs.controllers.is_user_authorized_to_update_graph(request, username=get_request_user(request), graph_id = graph_id):
raise UserNotAuthorized(request)
if permission == 'GRAPH_DELETE' and not graphs.controllers.is_user_authorized_to_delete_graph(request, username=get_request_user(request), graph_id = graph_id):
raise UserNotAuthorized(request)
if permission == 'GRAPH_SHARE' and not graphs.controllers.is_user_authorized_to_share_graph(request, username=get_request_user(request), graph_id = graph_id):
raise UserNotAuthorized(request)
if group_id is not None:
if permission == 'GROUP_READ' and not users.controllers.is_user_authorized_to_view_group(request, username=get_request_user(request), group_id = group_id):
raise UserNotAuthorized(request)
if permission == 'GROUP_UPDATE' and not users.controllers.is_user_authorized_to_update_group(request, username=get_request_user(request), group_id = group_id):
raise UserNotAuthorized(request)
if permission == 'GROUP_DELETE' and not users.controllers.is_user_authorized_to_delete_group(request, username=get_request_user(request), group_id = group_id):
raise UserNotAuthorized(request)
if permission == 'GROUP_SHARE' and not users.controllers.is_user_authorized_to_share_with_group(request, username=get_request_user(request), group_id = group_id):
raise UserNotAuthorized(request)
if layout_id is not None:
if permission == 'LAYOUT_READ' and not graphs.controllers.is_user_authorized_to_view_layout(request, username=get_request_user(request), layout_id = layout_id):
raise UserNotAuthorized(request)
if permission == 'LAYOUT_UPDATE' and not graphs.controllers.is_user_authorized_to_update_layout(request, username=get_request_user(request), layout_id = layout_id):
raise UserNotAuthorized(request)
if permission == 'LAYOUT_DELETE' and not graphs.controllers.is_user_authorized_to_delete_layout(request, username=get_request_user(request), layout_id = layout_id):
raise UserNotAuthorized(request)
return
|
gpl-2.0
| 5,456,867,437,812,151,000
| 44.454545
| 166
| 0.76136
| false
| 3.481592
| false
| false
| false
|
moagstar/python-uncompyle6
|
uncompyle6/parsers/astnode.py
|
1
|
1501
|
import sys
from uncompyle6 import PYTHON3
from uncompyle6.scanners.tok import NoneToken
from spark_parser.ast import AST as spark_AST
if PYTHON3:
intern = sys.intern
class AST(spark_AST):
def isNone(self):
"""An AST None token. We can't use regular list comparisons
because AST token offsets might be different"""
return len(self.data) == 1 and NoneToken == self.data[0]
def __repr__(self):
return self.__repr1__('', None)
def __repr1__(self, indent, sibNum=None):
rv = str(self.type)
if sibNum is not None:
rv = "%2d. %s" % (sibNum, rv)
enumerate_children = False
if len(self) > 1:
rv += " (%d)" % (len(self))
enumerate_children = True
rv = indent + rv
indent += ' '
i = 0
for node in self:
if hasattr(node, '__repr1__'):
if enumerate_children:
child = node.__repr1__(indent, i)
else:
child = node.__repr1__(indent, None)
else:
inst = node.format(line_prefix='L.')
if inst.startswith("\n"):
# Nuke leading \n
inst = inst[1:]
if enumerate_children:
child = indent + "%2d. %s" % (i, inst)
else:
child = indent + inst
pass
rv += "\n" + child
i += 1
return rv
|
mit
| -742,535,194,500,020,400
| 30.93617
| 67
| 0.476349
| false
| 4.112329
| false
| false
| false
|
miqui/python-hpOneView
|
examples/scripts/get-network-set.py
|
1
|
3747
|
#!/usr/bin/env python
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
import re
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if PY2:
if PYTHON_VERSION < (2, 7, 9):
raise Exception('Must use Python 2.7.9 or later')
elif PYTHON_VERSION < (3, 4):
raise Exception('Must use Python 3.4 or later')
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def getnetset(net):
sets = net.get_networksets()
pprint(sets)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Display Network Sets
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
args = parser.parse_args()
credential = {'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
net = hpov.networking(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
getnetset(net)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
mit
| 5,146,794,782,429,188,000
| 31.582609
| 79
| 0.655991
| false
| 4.024705
| false
| false
| false
|
Renmusxd/RSwarm
|
bot.py
|
1
|
8829
|
import numpy
from collections import OrderedDict
class Bot:
ID_NUM = 0
# Populated by GET_NINPUTS
VISION = None
INPUTS = None
NINPUTS = None
# Populated by GET_NACTIONS
ACTIONS = None
NACTIONS = None
VIEW_DIST = 100.0
FOV = 60 # Angular distance from center
VISION_BINS = 5
MATE_TIMER = 200
MAX_ENERGY = 1000
MOVE_SPEED = 1.0
SPRINT_SPEED = 3.0
TURN_SPEED = 5.0
# Radius for actions like attacking and mating
ACTION_RADIUS = 10
EAT_AMOUNT = 20
# Rewards
DEATH_REWARD = -100.
ATTACK_PRED_PRED_REWARD = 20.
ATTACK_PRED_PREY_REWARD = 50.
ATTACK_PREY_PRED_REWARD = 5.
ATTACK_PREY_PREY_REWARD = -20.
ATTACKED_REWARD = -50.
ATTACK_FAILED_REWARD = -0.0
EAT_REWARD = 100. # Scaled by hunger: R (E - e) / E
MATE_REWARD = 100.
FAILED_MATE_REWARD = -1.0
def __init__(self, x, y, d, world, color, can_graze, energy=MAX_ENERGY):
"""
Construct a bot
:param x: x position
:param y: y position
:param d: direction (0-360)[OPENGL]
:param world: world to ask for information
"""
self.x, self.y, self.d = x, y, d
self.world = world
self.id = Bot.ID_NUM
Bot.ID_NUM += 1
self.can_graze = can_graze
self.energy = energy
self.r, self.g, self.b = color
self.dead = False
# Indicate that this Bot is attempting to mate
self.mating = False
self.attacking = False
self.attacked = False
self.mate_timer = 0
self.mem = None
def senses(self):
# Evaluate vision
vision = Bot.VISION.eval(self)
# Evaluate introspection
body = numpy.array([v(self) for v in Bot.INPUTS.values()])
state = numpy.concatenate((body, vision))
return state
def memory(self):
return self.mem
def set_memory(self, memory):
self.mem = memory
def act(self, action):
reward_acc = 0
still, left, lmov, forward, \
rmov, right, sprint, eat, \
mate, atck = (action == i for i in range(Bot.GET_NACTIONS()))
if eat:
if self.can_graze:
toeat = min(Bot.EAT_AMOUNT, Bot.MAX_ENERGY - self.energy)
eaten = self.world.eat(self.x, self.y, toeat)
self.energy += eaten
# reward_acc += eaten/Bot.EAT_AMOUNT * (Bot.MAX_ENERGY - self.energy)/Bot.MAX_ENERGY * Bot.EAT_REWARD
reward_acc += eaten * Bot.EAT_REWARD * (Bot.MAX_ENERGY - self.energy)/(Bot.EAT_AMOUNT * Bot.MAX_ENERGY)
elif mate:
# Check if meets mating criteria
# Reward will be added later if mate is successful
if self.mate_timer == Bot.MATE_TIMER and self.energy > Bot.MAX_ENERGY/2:
self.mating = True
elif atck:
self.attacking = True
elif sprint:
self.x += Bot.SPRINT_SPEED * numpy.cos(numpy.deg2rad(self.d))
self.y += Bot.SPRINT_SPEED * numpy.sin(numpy.deg2rad(self.d))
self.energy -= (Bot.SPRINT_SPEED - 1)
elif not still:
if left or lmov:
self.d -= Bot.TURN_SPEED
elif right or rmov:
self.d += Bot.TURN_SPEED
if lmov or forward or rmov:
self.x += Bot.MOVE_SPEED * numpy.cos(numpy.deg2rad(self.d))
self.y += Bot.MOVE_SPEED * numpy.sin(numpy.deg2rad(self.d))
self.energy -= 1
self.mate_timer += 1
self.mate_timer = min(self.mate_timer, Bot.MATE_TIMER)
# Punish death
if self.energy <= 0 or self.world.out_of_bounds(self.x,self.y) or self.attacked:
reward_acc += self.DEATH_REWARD
self.dead = True
return reward_acc
def color(self):
return self.r, self.g, self.b
def mate_succeed(self, other_bot):
self.mating = False
self.mate_timer = 0
self.energy -= Bot.MAX_ENERGY/2
return Bot.MATE_REWARD
def mate_failed(self):
self.mating = False
return Bot.FAILED_MATE_REWARD
def attack_succeed(self, other):
"""
Callback for successful attacks
:param other:
:return: Reward
"""
self.attacking = False
other.attacked = True
if self.can_graze:
return Bot.ATTACK_PREY_PREY_REWARD if other.can_graze else Bot.ATTACK_PREY_PRED_REWARD
else:
#self.energy += Bot.MAX_ENERGY + other.energy
self.energy = Bot.MAX_ENERGY
return Bot.ATTACK_PRED_PREY_REWARD if other.can_graze else Bot.ATTACK_PRED_PRED_REWARD
def attack_failed(self):
self.attacking = False
return Bot.ATTACK_FAILED_REWARD
def was_attacked(self, other):
self.attacked = True
return Bot.ATTACKED_REWARD
@staticmethod
def split_senses(senses):
"""
Splits senses into introspection senses and vision
:param senses: raw input
:return: inputs, vision, distance
"""
ins = senses[:len(Bot.INPUTS)]
vis, dist = Bot.VISION.split_vision(senses[len(Bot.INPUTS):])
return ins, vis, dist
@staticmethod
def label_inputs(inputs):
return {k:v for k,v in zip(Bot.INPUTS.keys(),inputs)}
@staticmethod
def label_actions(actions):
return {k:v for k,v in zip(Bot.ACTIONS,actions)}
@staticmethod
def action_label(action):
if 0 <= action < len(Bot.ACTIONS):
return Bot.ACTIONS[action]
else:
return None
@staticmethod
def make_actions_from_label(label):
actindx = Bot.ACTIONS.index(label)
return max(actindx,0) # No -1 values
@staticmethod
def make_brain(braincons, name):
"""
Make a brain suitable for this bot
:param name: brain name
:param braincons: brain constructor function
:return: instance of brain to use
"""
brain = braincons(name, Bot.GET_NINPUTS(), Bot.GET_NACTIONS())
return brain
@staticmethod
def GET_NINPUTS():
if Bot.INPUTS is None:
Bot.INPUTS = OrderedDict()
# Basic senses
Bot.INPUTS['energy'] = lambda b: min(b.energy / Bot.MAX_ENERGY, 1.0)
Bot.INPUTS['mate'] = lambda b: b.mate_timer / Bot.MATE_TIMER
Bot.INPUTS['tile'] = lambda b: b.world.get_tile_perc(b.x,b.y)
# Vision
Bot.VISION = BotVision("gray")
#Bot.VISION = BotVision("rgb")
Bot.NINPUTS = len(Bot.INPUTS) + len(Bot.VISION)
return Bot.NINPUTS
@staticmethod
def GET_NACTIONS():
if Bot.ACTIONS is None:
Bot.ACTIONS = ["still", "left", "lmov", "forward", "rmov",
"right", "sprint", "eat", "mate", "atck"]
Bot.NACTIONS = len(Bot.ACTIONS)
return Bot.NACTIONS
class BotVision:
GRAY_SIZE = 2
RGB_SIZE = 4
def __init__(self,world,color='gray'):
"""
Construct vision mechanic
:param vbins: number of vision bins
:param fov: field of view in degrees
:param color: color format to use (gray or rgb)
"""
self.color = color
self.world = world
if self.color == 'gray':
self.size = Bot.VISION_BINS * BotVision.GRAY_SIZE
self.shape = (Bot.VISION_BINS, BotVision.GRAY_SIZE)
elif self.color == 'rgb':
self.size = Bot.VISION_BINS * BotVision.RGB_SIZE
self.shape = (Bot.VISION_BINS, BotVision.RGB_SIZE)
def eval(self, bot):
# Gets back 3 colors + 1 distance
vision = bot.world.get_vision(bot.x, bot.y, bot.d, Bot.FOV, Bot.VIEW_DIST, Bot.VISION_BINS)
if self.color == "gray":
# Convert to [-1, 1] scale
vscale = (-vision[:, 0] + vision[:, 2])
distances = vision[:, 3]
new_vision = numpy.ndarray(shape=self.shape)
new_vision[:,0] = vscale
new_vision[:,1] = distances
return new_vision.flatten()
else:
return vision.flatten()
def split_vision(self, vision):
"""
Split into vision and distance components
:param vision: raw vision input (as is output from eval)
:return: vision, distance
"""
vis = vision.reshape(self.shape)
return vis[:,:-1], vis[:,-1]
def apply_filter(self, colors):
return BotVision.filter(colors, self.color)
@staticmethod
def filter(colors,colorfilter):
if colorfilter == "gray":
return -colors[:,0] + colors[:,2]
elif colorfilter == "rgb":
return colors
def __len__(self):
return self.size
|
mit
| 7,979,461,654,846,873,000
| 29.236301
| 119
| 0.562691
| false
| 3.45693
| false
| false
| false
|
mvaled/sentry
|
src/sentry/api/endpoints/organization_eventid.py
|
1
|
2224
|
from __future__ import absolute_import
import six
from rest_framework.response import Response
from sentry import eventstore
from sentry.api.base import DocSection
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.models import Project
from sentry.utils.apidocs import scenario, attach_scenarios
@scenario("ResolveEventId")
def resolve_event_id_scenario(runner):
runner.request(
method="GET",
path="/organizations/%s/eventids/%s/" % (runner.org.slug, runner.default_event.event_id),
)
class EventIdLookupEndpoint(OrganizationEndpoint):
doc_section = DocSection.ORGANIZATIONS
@attach_scenarios([resolve_event_id_scenario])
def get(self, request, organization, event_id):
"""
Resolve a Event ID
``````````````````
This resolves a event ID to the project slug and internal issue ID and internal event ID.
:pparam string organization_slug: the slug of the organization the
event ID should be looked up in.
:param string event_id: the event ID to look up.
:auth: required
"""
# Largely copied from ProjectGroupIndexEndpoint
if len(event_id) != 32:
return Response({"detail": "Event ID must be 32 characters."}, status=400)
project_slugs_by_id = dict(
Project.objects.filter(organization=organization).values_list("id", "slug")
)
try:
event = eventstore.get_events(
filter_keys={"project_id": project_slugs_by_id.keys(), "event_id": event_id},
limit=1,
)[0]
except IndexError:
raise ResourceDoesNotExist()
else:
return Response(
{
"organizationSlug": organization.slug,
"projectSlug": project_slugs_by_id[event.project_id],
"groupId": six.text_type(event.group_id),
"eventId": six.text_type(event.id),
"event": serialize(event, request.user),
}
)
|
bsd-3-clause
| -6,328,588,570,410,256,000
| 33.75
| 97
| 0.61241
| false
| 4.377953
| false
| false
| false
|
unomena/tunobase
|
tunobase/corporate/company_info/contact/models.py
|
1
|
1349
|
"""
CONTACT APP
This modules sets up the database structure for the contact app.
Classes:
ContactMessage
Functions:
n/a
Created on 23 Oct 2013
@author: michael
"""
from django.db import models
from django.contrib.sites.models import Site
from django.conf import settings
from tunobase.corporate.company_info.contact import signals
class ContactMessage(models.Model):
"""Contact message sent from the Site."""
user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True)
name = models.CharField(max_length=255)
email = models.EmailField()
mobile_number = models.CharField(max_length=16, blank=True, null=True)
message = models.TextField()
site = models.ForeignKey(Site, blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
"""Return a unicode object."""
return u'%s' % self.name
def send(self):
"""Fire off signal to be received by handlers."""
signals.contact_message_saved.send(
sender=self.__class__,
contact_message_id=self.id
)
def save(self, *args, **kwargs):
""" Save contact form."""
if self.site is None:
self.site = Site.objects.get_current()
super(ContactMessage, self).save(*args, **kwargs)
self.send()
|
bsd-3-clause
| 9,177,308,814,407,269,000
| 23.981481
| 77
| 0.656783
| false
| 3.86533
| false
| false
| false
|
sebastienhupin/qxrad
|
qooxdoo/tool/pylib/ecmascript/frontend/tree.py
|
1
|
24785
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2006-2010 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# LGPL: http://www.gnu.org/licenses/lgpl.html
# EPL: http://www.eclipse.org/org/documents/epl-v10.php
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Sebastian Werner (wpbasti)
# * Fabian Jakobs (fjakobs)
#
################################################################################
import sys, os, copy, re
from misc import util
##
#<h2>Module Description</h2>
#<pre>
# NAME
# tree.py -- providing a tree data structure
#
# SYNTAX
# tree.py --help
#
# or
#
# import tree
# result = tree.Node()
#
# creates a new tree node
#
# DESCRIPTION
# The main provision by this module is the Node class. This lets you create
# arbitrary trees made out of linked nodes (parent - child relation).
#
#</pre>
##
##
# Some nice short description of Foo
#
# @param a Number of foos to bar
class NodeAccessException (Exception):
def __init__ (self, msg, node):
Exception.__init__(self, msg)
self.node = node
NODE_VARIABLE_TYPES = ("dotaccessor", "identifier")
NODE_STATEMENT_CONTAINERS = ("statements", "block")
class Node(object):
def __init__ (self, ntype):
self.type = ntype
self.parent = None
self.children = []
self.attributes = {}
self.dep = None # a potential DependencyItem()
def __str__(self):
return nodeToXmlStringNR(self)
def hasAttributes(self):
#return hasattr(self, "attributes")
# ApiLoader._isNodeIdentical() needs this len() check
# TODO: remove commented calls to hasAttributes() and hasattr(self,attributes)
return len(self.attributes)
def set(self, key, value):
"""Sets an attribute"""
if not isinstance(value, (basestring, int, long, float, complex, bool)):
raise NodeAccessException("'value' is no string or number: " + str(value), self)
#if not self.hasAttributes():
if False:
self.attributes = {}
self.attributes[key] = value
return self
def get(self, key, default = None):
value = None
#if hasattr(self, "attributes") and key in self.attributes:
if key in self.attributes:
value = self.attributes[key]
if value != None:
return value
elif default != None:
return default
else:
raise NodeAccessException("Node " + self.type + " has no attribute " + key, self)
def remove(self, key):
if not key in self.attributes:
return
del self.attributes[key]
if len(self.attributes) == 0:
del self.attributes
##
# Make a default copy of self (this includes instanceof)
def clone(self):
clone_ = copy.copy(self)
# keep .attributes non-shared
if True:
clone_.attributes = copy.copy(self.attributes)
return clone_
##
# Copy the properties of self into other
# (this might not be entirely in sync with treegenerator.symbol())
def patch(self, other):
for attr, val in vars(self).items():
if attr in (
"type", "id", # preserve other's classification
"children", # don't adopt existing children (what would their .parent be?!)
"parent", # avoid tree relations
):
continue
setattr(other, attr, val)
# keep .attributes non-shared
if hasattr(self, "attributes"):
other.attributes = copy.copy(self.attributes)
def hasParent(self):
return self.parent
##
# checks whether the node hierarchy leading to node ends with contextPath,
# ie. if node.parent.type == contextPath[-1], node.parent.parent.type ==
# contextPath[-2] asf. Example: varNode.hasParentContext("call/operand")
# checks whether varNode.parent is "operand" and varNode.parent.parent is
# "call" type, ie. it's a function being called; the wildcard '*' is allowed
# to indicate any type on a particular level, like "value/*/operand"
def hasParentContext(self, contextPath):
path_elems = contextPath.split('/')
currNode = self
for path_elem in reversed(path_elems):
if currNode.parent:
if ( path_elem == '*' or currNode.parent.type == path_elem ):
currNode = currNode.parent
else:
return False
else:
return False # no parent, no match
return True
##
# return the chain of parent (types) of this node
def getParentChain(self):
chain = []
currNode = self
while currNode.parent:
chain.append(currNode.parent.type)
currNode = currNode.parent
return reversed (chain)
##
# return the root of the current tree
def getRoot(self):
rnode = self
while rnode.parent:
rnode = rnode.parent
return rnode
def hasChildren(self, ignoreComments = False):
if not ignoreComments:
return self.children
else:
return [c for c in self.children if c.type not in ("comment", "commentsBefore", "commentsAfter")]
getChildren = hasChildren
def addChild(self, childNode, index = None):
if childNode:
if childNode.parent and childNode in childNode.parent.children:
childNode.parent.removeChild(childNode)
if index != None:
self.children.insert(index, childNode)
else:
self.children.append(childNode)
childNode.parent = self
return self
def removeChild(self, childNode):
if self.children:
self.children.remove(childNode)
#childNode.parent = None
def removeAllChildren(self):
for child in self.children[:]:
self.children.remove(child)
def replaceChild(self, oldChild, newChild):
if oldChild in self.children and oldChild is not newChild:
if newChild.parent and newChild in newChild.parent.children:
newChild.parent.removeChild(newChild)
self.children.insert(self.children.index(oldChild), newChild)
newChild.parent = self
self.children.remove(oldChild)
##
# Get child by type or position
#
def getChild(self, spec, mandatory = True):
if self.children:
for pos,child in enumerate(self.children):
if pos==spec or child.type==spec:
return child
if mandatory:
raise NodeAccessException("Node '%s' has no child with type or position '%s'"
% (self.type, str(spec)), self)
def hasChildRecursive(self, ntype):
if isinstance(ntype, basestring):
if self.type == ntype:
return True
elif isinstance(ntype, util.FinSequenceTypes):
if self.type in ntype:
return True
if self.children:
for child in self.children:
if child.hasChildRecursive(ntype):
return True
return False
##
# Whether <node> is self, or a descendant in the tree rooted by self.
def contains(self, node):
if self is node:
return node
else:
for child in self.children:
if child.contains(node):
return node
return None
##
# TODO: Rename this to hasChildByType
def hasChild(self, ntype):
if self.children:
for child in self.children:
if isinstance(ntype, basestring):
if child.type == ntype:
return True
elif isinstance(ntype, list):
if child.type in ntype:
return True
return False
def getChildrenLength(self, ignoreComments=False):
if self.children:
if ignoreComments:
counter = 0
for child in self.children:
if not child.type in ["comment", "commentsBefore", "commentsAfter"]:
counter += 1
return counter
else:
return len(self.children)
return 0
def makeComplex(self):
makeComplex = self.get("makeComplex", '')
if makeComplex != '':
return makeComplex
else:
makeComplex = False
if self.type == "comment":
makeComplex = True
elif self.type == "block":
if self.children:
counter = 0
for child in self.children:
if child.type != "commentsAfter":
counter += 1
if counter > 1:
makeComplex = True
elif self.type == "loop":
if self.get("loopType") == "IF" and self.parent and self.parent.type == "elseStatement":
pass
else:
makeComplex = True
elif self.type == "function":
makeComplex = self.getChild("body").hasChild("block") and self.getChild("body").getChild("block").getChildrenLength() > 0
elif self.type in ["loop", "switch"]:
makeComplex = True
elif self.hasChild("commentsBefore"):
makeComplex = True
# Final test: Ask the children (slower)
if not makeComplex and not self.type in ["comment", "commentsBefore", "commentsAfter"]:
makeComplex = self.isComplex()
self.set("makeComplex", makeComplex)
# print "makeComplex: %s = %s" % (self.type, makeComplex)
return makeComplex
def isComplex(self):
isComplex = self.get("isComplex", ())
if isComplex != ():
return isComplex
else:
isComplex = False
if not self.children:
isComplex = False
elif self.type == "block":
counter = 0
if self.children:
for child in self.children:
if child.type != "commentsAfter":
counter += 1
if child.hasChild("commentsBefore"):
counter += 1
if counter > 1:
break
if counter > 1:
isComplex = True
else:
if self.getChildrenLength() == 0:
isComplex = False
# in else, try to find the mode of the previous if first
elif self.parent and self.parent.type == "elseStatement":
isComplex = self.parent.parent.getChild("statement").hasComplexBlock()
# in if, try to find the mode of the parent if (if existent)
elif self.parent and self.parent.type == "statement" and self.parent.parent.type == "loop" and self.parent.parent.get("loopType") == "IF":
if self.parent.parent.parent and self.parent.parent.parent.parent:
if self.parent.parent.parent.parent.type == "loop":
isComplex = self.parent.parent.parent.parent.getChild("statement").hasComplexBlock()
# in catch/finally, try to find the mode of the try statement
elif self.parent and self.parent.parent and self.parent.parent.type in ["catch", "finally"]:
isComplex = self.parent.parent.parent.getChild("statement").hasComplexBlock()
elif self.type == "elseStatement":
if self.hasComplexBlock():
isComplex = True
elif self.hasChild("loop") and self.getChild("loop").getChild("statement").hasComplexBlock():
isComplex = True
elif self.type == "array" :
if self.getChildrenLength(True) > 5:
isComplex = True
elif self.type == "map" :
ml = self.getChildrenLength(True)
if ml > 1:
isComplex = True
# Final test: Ask the children (slower)
if not (self.type == "elseStatement" and self.hasChild("loop")):
if not isComplex and self.hasComplexChildren():
isComplex = True
# print self.type + " :: %s" % isComplex
self.set("isComplex", isComplex)
# print "isComplex: %s = %s" % (self.type, isComplex)
return isComplex
def hasComplexChildren(self):
if self.children:
for child in self.children:
if child.makeComplex():
return True
return False
def hasComplexBlock(self):
if self.hasChild("block"):
return self.getChild("block").isComplex()
return False
def hasBlockChildren(self):
if self.hasChild("block"):
return self.getChild("block").hasChildren()
return False
def getChildPosition(self, searchedChild, ignoreComments = False):
if self.children and searchedChild in self.children:
if ignoreComments:
counter = 0
for child in self.children:
if child == searchedChild:
return counter
if not child.type in ["comment", "commentsBefore", "commentsAfter"]:
counter += 1
else:
return self.children.index(searchedChild)
return -1
def getChildByPosition(self, pos, mandatory = True, ignoreComments = False):
if self.children:
i = 0
for child in self.children:
if ignoreComments and child.type in ["comment", "commentsBefore", "commentsAfter"]:
continue
if i == pos:
return child
i += 1
if mandatory:
raise NodeAccessException("Node " + self.type + " has no child as position %s" % pos, self)
##
# List-valued!
def getChildsByTypes(self, type_list):
return [c for c in self.children if c.type in type_list]
def getChildByAttribute(self, key, value, mandatory = True):
if self.children:
for child in self.children:
if child.get(key,mandatory) == value:
return child
if mandatory:
raise NodeAccessException("Node " + self.type + " has no child with attribute " + key + " = " + value, self)
def getChildByTypeAndAttribute(self, ntype, key, value, mandatory = True, recursive = False):
if self.children:
for child in self.children:
if child.type == ntype and child.get(key,mandatory) == value:
return child
elif recursive:
found = child.getChildByTypeAndAttribute(ntype, key, value, False, True)
if found:
return found
if mandatory:
raise NodeAccessException("Node " + self.type + " has no child with type " + ntype + " and attribute " + key + " = " + value, self)
def getFirstChild(self, mandatory = True, ignoreComments = False):
if self.children:
for child in self.children:
if ignoreComments and child.type in ["comment", "commentsBefore", "commentsAfter"]:
continue
return child
if mandatory:
raise NodeAccessException("Node " + self.type + " has no children", self)
def getLastChild(self, mandatory = True, ignoreComments = False):
if self.children:
if not ignoreComments:
return self.children[-1]
else:
pos = len(self.children) - 1
while pos >= 0:
child = self.children[pos]
if ignoreComments and child.type in ["comment", "commentsBefore", "commentsAfter"]:
pos -= 1
continue
return child
if mandatory:
raise NodeAccessException("Node " + self.type + " has no children", self)
def getPreviousSibling(self, mandatory = True, ignoreComments = False):
if self.parent:
prev = None
for child in self.parent.children:
if ignoreComments and child.type in ["comment", "commentsBefore", "commentsAfter"]:
continue
if child == self:
if prev != None:
return prev
else:
break
prev = child
if mandatory:
raise NodeAccessException("Node " + self.type + " has no previous sibling", self)
def getFollowingSibling(self, mandatory = True, ignoreComments = False):
if self.parent:
prev = None
for child in self.parent.children:
if ignoreComments and child.type in ["comment", "commentsBefore", "commentsAfter"]:
continue
if prev != None:
return child
if child == self:
prev = child
if mandatory:
raise NodeAccessException("Node " + self.type + " has no following sibling", self)
def isFirstChild(self, ignoreComments = False):
if not self.parent:
return False
return self.parent.getFirstChild(False, ignoreComments) == self
def isLastChild(self, ignoreComments = False):
if not self.parent:
return False
return self.parent.getLastChild(False, ignoreComments) == self
#def isVar(self):
# return self.type in NODE_VARIABLE_TYPES
def isStatement(self):
return self.parent and self.parent.type in NODE_STATEMENT_CONTAINERS
def addListChild(self, listName, childNode):
listNode = self.getChild(listName, False)
if not listNode:
listNode = Node(listName)
self.addChild(listNode)
listNode.addChild(childNode)
def getListChildByAttribute(self, listName, key, value, mandatory = True):
listNode = self.getChild(listName, False)
if listNode:
return listNode.getChildByAttribute(key, value, mandatory)
if mandatory:
raise NodeAccessException("Node " + self.type + " has no child " + listName, self)
def getFirstListChild(self, listName, mandatory = True):
listNode = self.getChild(listName, False)
if listNode:
return listNode.getFirstChild(mandatory)
if mandatory:
raise NodeAccessException("Node " + self.type + " has no child " + listName, self)
def getAllChildrenOfType(self, ntype):
return self._getAllChildrenOfType(ntype, [])
def _getAllChildrenOfType(self, ntype, found=[]):
if self.children:
for child in self.children:
if child.type == ntype:
found.append(child)
child._getAllChildrenOfType(ntype, found)
return found
def toXml(self, prefix = "", childPrefix = " ", newLine="\n", encoding="utf-8"):
return nodeToXmlString(self, prefix, childPrefix, newLine, encoding)
def toJson(self, prefix = "", childPrefix = " ", newLine="\n"):
return nodeToJsonString(self, prefix, childPrefix, newLine)
def toJavascript(self):
from ecmascript.backend import formatter
optns = formatter.defaultOptions()
result = formatter.formatNode(self, optns, [])
return u''.join(result)
def nodeIter(self):
"A generator/iterator method, to traverse a tree and 'yield' each node"
yield self
if self.children:
for child in self.children:
for node in child.nodeIter():
yield node
def nodeTreeMap(self, fn):
"""As an alternative, a pure recursion walk that applies a function fn to each node.
This allows to control the recursion through fn's return value.
Signature of fn: fn(node,isLeaf)."""
if not self.children:
rc = fn(self,True)
return
else:
rc = fn(self,False)
if rc == 0: # != 0 means prune this subtree
for child in self.children:
child.nodeTreeMap(fn)
return
def nodeToXmlStringNR(node, prefix="", encoding="utf-8"):
hasText = False
asString = prefix + "<" + node.type
#if node.hasAttributes():
if True:
for key in node.attributes:
asString += " " + key + "=\"" + escapeXmlChars(node.attributes[key], True, encoding) + "\""
asString += "/>"
return asString
def nodeToXmlString(node, prefix = "", childPrefix = " ", newLine="\n", encoding="utf-8"):
asString = u''
hasText = False
# comments
for attr in ('comments', 'commentsAfter'):
if hasattr(node, attr) and getattr(node, attr):
cmtStrings = []
for comment in getattr(node, attr):
cmtStrings.append(nodeToXmlString(comment, prefix, childPrefix, newLine, encoding))
asString += u''.join(cmtStrings)
# own str repr
asString += prefix + "<" + node.type
#if node.hasAttributes():
if True:
for key in node.attributes:
if key == "text":
hasText = True
else:
asString += " " + key + "=\"" + escapeXmlChars(node.attributes[key], True, encoding) + "\""
if not node.hasChildren() and not hasText:
asString += "/>" + newLine
else:
asString += ">"
if hasText:
asString += newLine + prefix + childPrefix
asString += "<text>" + escapeXmlChars(node.attributes["text"], False, encoding) + "</text>" + newLine
if node.hasChildren():
asString += newLine
for child in node.children:
asString += nodeToXmlString(child, prefix + childPrefix, childPrefix, newLine, encoding)
asString += prefix + "</" + node.type + ">" + newLine
return asString
def nodeToJsonString(node, prefix = "", childPrefix = " ", newLine="\n"):
asString = prefix + '{"type":"' + escapeJsonChars(node.type) + '"'
#if node.hasAttributes():
if True:
asString += ',"attributes":{'
firstAttribute = True
for key in node.attributes:
if not firstAttribute:
asString += ','
asString += '"' + key + '":"' + escapeJsonChars(node.attributes[key]) + '"'
firstAttribute = False
asString += '}'
if node.hasChildren():
asString += ',"children":[' + newLine
prefix = prefix + childPrefix
for child in node.children:
asString += nodeToJsonString(child, prefix, childPrefix, newLine) + ',' + newLine
# NOTE We remove the ',\n' of the last child
if newLine == "":
asString = asString[:-1] + prefix + ']'
else:
asString = asString[:-2] + newLine + prefix + ']'
asString += '}'
return asString
def getNodeData(node):
data = {
"type" : node.type
}
#if node.hasAttributes():
if True:
data["attributes"] = {}
for key in node.attributes:
data["attributes"][key] = node.attributes[key]
if node.hasChildren():
data["children"] = []
for child in node.children:
data["children"].append(getNodeData(child))
return data
def escapeXmlChars(text, inAttribute, encoding="utf-8"):
if isinstance(text, basestring):
# http://www.w3.org/TR/xml/#dt-escape
text = text.replace("\"", """).replace("'", "'").replace("&", "&").replace("<", "<").replace(">", ">")
elif isinstance(text, bool):
text = str(text).lower()
else:
text = str(text)
return text
def escapeJsonChars(text):
if isinstance(text, basestring):
# http://tools.ietf.org/html/rfc4627#section-2.5
text = text.replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n').replace('\r', '\\r').replace('\t', '\\t').replace('\b', '\\b').replace('\f', '\\f').replace('/', '\\/')
elif isinstance(text, bool):
text = str(text).lower()
else:
text = str(text)
return text
|
lgpl-3.0
| -3,710,500,687,276,511,700
| 30.775641
| 186
| 0.553319
| false
| 4.414856
| false
| false
| false
|
emc-openstack/storops
|
storops_test/unity/jh_mock.py
|
1
|
1253
|
# coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from storops import exception as ex
from storops.unity.resource.job import UnityJob
class MockJobHelper(object):
def __init__(self, cli, interval=5):
self._cli = cli
self.started = True
def wait_job(self, job, async_timeout, async_interval):
if job.id == 'N-3078':
return UnityJob(_id=job.id, cli=self._cli)
if job.id == 'N-3079':
ret_job = UnityJob(_id=job.id, cli=self._cli)
raise ex.JobStateError(ret_job)
if job.id == 'N-3080':
raise ex.JobTimeoutException()
|
apache-2.0
| -8,038,427,901,715,396,000
| 35.852941
| 78
| 0.667997
| false
| 3.642442
| false
| false
| false
|
mdehus/goose-IEC61850-scapy
|
goose.py
|
1
|
5622
|
import struct
import binascii
from scapy.all import *
import BER
class ASNType(object):
tag = ''
def __init__(self, data='', length=0):
pass
def unpack(self, data):
raise NotImplemented()
def pack(self, data):
raise NotImplemented()
def __str__(self):
return str(self.data)
def __repr__(self):
return str(self.data)
class Integer(ASNType):
def __init__(self, data='', length=0):
self.data = BER.unpack_varint(data, length)
def pack(self):
if isinstance(self.data, int):
if self.data <= 255:
return struct.pack('!B', self.data)
elif self.data <= 65535:
return struct.pack('!h', self.data)
else:
return struct.pack('!i', self.data)
if isinstance(self.data, long):
return struct.pack('!l', self.data)
class VisibleString(ASNType):
def __init__(self, data='', length=0):
self.data = data
def __repr__(self):
return "'" + self.data + "'"
def pack(self):
return self.data
class Boolean(ASNType):
ID = 3
def __init__(self, data='', length=0):
self.data = struct.unpack('!b', data)[0]
def __repr__(self):
if self.data:
return "True"
else:
return "False"
def pack(self):
return struct.pack('!b', self.data)
class UTCTime(ASNType):
def __init__(self, data='', length=0):
self.data = struct.unpack('!d', data)[0]
def pack(self):
return struct.pack('!d', self.data)
class UnsignedInteger(ASNType):
def __init__(self, data='', length=0):
self.data = struct.unpack()
class Float(ASNType):
def __init__(self, data='', length=0):
self.data = struct.unpack('!f', data)[0]
def pack(self):
return struct.data('!f', data)
class Real(Float):
pass
class OctetString(ASNType):
def __init__(self, data='', length=0):
self.data = struct.unpack('!d', data)[0]
class BitString(ASNType):
ID = 4
def __init__(self, data='', length=0):
c = {'0': '0000', '1': '0001', '2': '0010',
'3':'0011', '4':'0100', '5':'0101',
'6':'0110', '7':'0111', '8':'1000',
'9':'1001', 'a':'1010', 'b':'1011',
'c':'1100', 'd':'1101', 'e':'1110',
'f':'1111'}
self.padding = struct.unpack('!h', '\x00'+data[:1])[0]
h = binascii.b2a_hex(data[1:])
self.data = ''
for i in h:
self.data += c[i]
def pack(self):
packed_padding = struct.pack('!B', self.padding)
packed_data = struct.pack('!h', int(self.data, 2))
return packed_padding + packed_data
class ObjectID(ASNType):
pass
class BCD(ASNType):
pass
class BooleanArray(ASNType):
pass
class UTF8String(ASNType):
pass
class Data(object):
tag = ''
tagmap = {(128,0,3):('boolean', Boolean),
(128,0,4):('bitstring', BitString),
(128,0,5):('integer', Integer),
(129,0,6):('unsigned', UnsignedInteger),
(128,0,7):('float', Float),
(128,0,8):('real', Real),
(128,0,9):('octetstring', OctetString),
(129,0,10):('visiblestring', VisibleString),
(128,0,12):('binarytime', UTCTime),
(128,0,13):('bcd', BCD),
(129,0,14):('booleanarray', BooleanArray),
(128,0,15):('objID', ObjectID),
(128,0,16):('mMSString', UTF8String),
(128,0,17):('utcstring', UTCTime)}
def __init__(self, data=None, length=0):
self.tagmap[(128,32,1)] = ('array', Data)
self.tagmap[(128,32,2)] = ('structure', Data)
self.data = BER.decoder(data, self.tagmap, decode_as_list=True)
def __getitem__(self, index):
return self.data[index]
def __repr__(self):
return repr(self.data)
def pack(self):
""" This is a hack, and should probably be integrated in to
the BER encoder at some point.
"""
packed_data = ''
for i in self.data:
tag = i.tag[0] + i.tag[1] + i.tag[2]
tag = struct.pack('!B', tag)
package = i.pack()
if len(package) < 128:
length = struct.pack('!B', len(package))
else: # HACK.. this will only support lengths up to 254.
length = struct.pack('!BB', 129, len(package))
packed_data += tag + length + package
return packed_data
class GOOSEPDU(object):
ID = 97
tagmap = {(128,0,0):('gocbRef', VisibleString),
(128,0,1):('timeAllowedToLive', Integer),
(128,0,2):('datSet', VisibleString),
(128,0,3):('goID', VisibleString),
(128,0,4):('t', UTCTime),
(128,0,5):('stNum', Integer),
(128,0,6):('sqNum', Integer),
(128,0,7):('test',Boolean),
(128,0,8):('confRev', Integer),
(128,0,9):('ndsCom', Boolean),
(128,0,10):('numDataSetEntries', Integer),
(128,32,11):('allData', Data)}
def __init__(self, data=None, length=0):
self.__dict__ = BER.decoder(data, self.tagmap)
def pack(self):
return BER.encoder(self.__dict__, self.tagmap)
class GOOSE(Packet):
name = "GOOSE"
fields_desc = [ ShortField("APPID", 3),
ShortField("Length", None),
ShortField("Reserved1", 0),
ShortField("Reserved2", 0),
]
|
gpl-2.0
| -9,140,397,460,529,846,000
| 28.129534
| 71
| 0.505692
| false
| 3.421789
| false
| false
| false
|
denys-duchier/Scolar
|
ZopeProducts/exUserFolder/__init__.py
|
1
|
3400
|
#
# Extensible User Folder
#
# (C) Copyright 2000-2005 The Internet (Aust) Pty Ltd
# ACN: 082 081 472 ABN: 83 082 081 472
# All Rights Reserved
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: Andrew Milton <akm@theinternet.com.au>
# $Id: __init__.py,v 1.18 2004/11/10 14:15:33 akm Exp $
import exUserFolder
import CryptoSources
import AuthSources
import PropSources
import MembershipSources
import GroupSources
from GroupSource import GroupSource
from App.ImageFile import ImageFile
import OFS
#
# Install a dummy ZBabel setup if we don't have ZBabel installed.
#
import dummyZBabelTag
# Methods we need access to from any ObjectManager context
legacy_methods = (
('manage_addexUserFolderForm', exUserFolder.manage_addexUserFolderForm),
('manage_addexUserFolder', exUserFolder.manage_addexUserFolder),
('getAuthSources', exUserFolder.getAuthSources),
#('getPropSources', exUserFolder.getPropSources),
('getCryptoSources', exUserFolder.getCryptoSources),
('getMembershipSources', exUserFolder.getMembershipSources),
('getGroupSources', exUserFolder.getGroupSources),
('doAuthSourceForm', exUserFolder.doAuthSourceForm),
#('doPropSourceForm', exUserFolder.doPropSourceForm),
('doMembershipSourceForm', exUserFolder.doMembershipSourceForm),
# ('doGroupSourceForm', exUserFolder.doGroupSourceForm),
('getVariableType', exUserFolder.getVariableType),
('DialogHeader', exUserFolder.exUserFolder.DialogHeader),
('DialogFooter', exUserFolder.exUserFolder.DialogFooter),
#('MailHostIDs', exUserFolder.MailHostIDs),
)
# Image files to place in the misc_ object so they are accesible from misc_/exUserFolder
misc_={'exUserFolder.gif': ImageFile('exUserFolder.gif', globals()),
'exUserFolderPlugin.gif': ImageFile('exUserFolderPlugin.gif', globals()),
'exUser.gif': ImageFile('exUser.gif', globals()),
}
def initialize(context):
"""
Register base classes
"""
context.registerClass(exUserFolder.exUserFolder,
meta_type="ex User Folder",
permission="Add exUser Folder",
constructors=(exUserFolder.manage_addexUserFolderForm,
exUserFolder.manage_addexUserFolder,),
legacy=legacy_methods,
icon="exUserFolder.gif")
context.registerClass(GroupSource.GroupSource,
meta_type="ex User Folder Group Source",
permission="Add exUser Folder",
constructors=(GroupSource.manage_addGroupSourceForm,
GroupSource.manage_addGroupSource,),
icon="exUserFolderPlugin.gif")
|
gpl-2.0
| -6,918,314,890,740,817,000
| 39
| 88
| 0.717353
| false
| 3.859251
| false
| false
| false
|
openworm/PyOpenWorm
|
post_install.py
|
1
|
1749
|
from __future__ import absolute_import
from __future__ import print_function
import os, shutil, sys
from sysconfig import get_path
from glob import glob
from pkgutil import get_loader
from subprocess import call
def get_library_location(package):
# get abs path of a package in the library, rather than locally
library_package_paths = glob(os.path.join(get_path('platlib'), '*'))
sys.path = library_package_paths + sys.path
package_path = os.path.dirname(get_loader(package).get_filename())
sys.path = sys.path[len(library_package_paths):]
return package_path
package_location = get_library_location('owmeta')
pwd = os.path.dirname(os.path.realpath(__file__))
user_id = os.stat(pwd).st_uid # this is the person that cloned the repo
script_location = os.path.join(pwd, 'OpenWormData', 'scripts')
user_script = 'insert_worm.py' # script(s) we want to be run as non-root
print('Running {} as UID {}'.format(user_script, user_id))
pid = os.fork()
if pid == 0:
#child process
db_files = glob(os.path.join(script_location, 'worm.db*'))
for x in db_files:
os.unlink(x)
try:
os.seteuid(user_id)
call([sys.executable, user_script], cwd = script_location)
finally:
os._exit(0)
os.waitpid(pid, 0)
# move created database files to your library's package directory
db_files = glob(os.path.join(script_location, 'worm.db*'))
for db_file in db_files:
print(('copying {} to {}'.format(db_file, package_location)))
new_location = os.path.join(package_location, os.path.basename(db_file))
shutil.copy(db_file, package_location)
os.chmod(new_location, 0o777)
# change directory owner to allow writing and reading from db in that dir
os.chown(package_location, user_id, -1)
|
mit
| 6,506,810,623,881,900,000
| 38.75
| 76
| 0.699257
| false
| 3.293785
| false
| false
| false
|
2013Commons/HUE-SHARK
|
apps/beeswax/src/beeswax/urls.py
|
1
|
3280
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('beeswax.views',
url(r'^$', 'index', name='index'),
url(r'^execute/(?P<design_id>\d+)?$', 'execute_query', name='execute_query'),
url(r'^explain_parameterized/(?P<design_id>\d+)$', 'explain_parameterized_query', name='explain_parameterized_query'),
url(r'^execute_parameterized/(?P<design_id>\d+)$', 'execute_parameterized_query', name='execute_parameterized_query'),
url(r'^watch/(?P<id>\d+)$', 'watch_query', name='watch_query'),
url(r'^watch/json/(?P<id>\d+)$', 'watch_query_refresh_json', name='watch_query_refresh_json'),
url(r'^cancel_operation/(?P<query_id>\d+)?$', 'cancel_operation', name='cancel_operation'),
url(r'^results/(?P<id>\d+)/(?P<first_row>\d+)$', 'view_results', name='view_results'),
url(r'^download/(?P<id>\d+)/(?P<format>\w+)$', 'download', name='download'),
url(r'^save_results/(?P<id>\d+)$', 'save_results', name='save_results'),
url(r'^save_design_properties$', 'save_design_properties', name='save_design_properties'), # Ajax
url(r'^autocomplete/$', 'autocomplete', name='autocomplete'),
url(r'^autocomplete/(?P<database>\w+)/$', 'autocomplete', name='autocomplete'),
url(r'^autocomplete/(?P<database>\w+)/(?P<table>\w+)$', 'autocomplete', name='autocomplete'),
url(r'^my_queries$', 'my_queries', name='my_queries'),
url(r'^list_designs$', 'list_designs', name='list_designs'),
url(r'^list_trashed_designs$', 'list_trashed_designs', name='list_trashed_designs'),
url(r'^delete_designs$', 'delete_design', name='delete_design'),
url(r'^restore_designs$', 'restore_design', name='restore_design'),
url(r'^clone_design/(?P<design_id>\d+)$', 'clone_design', name='clone_design'),
url(r'^query_history$', 'list_query_history', name='list_query_history'),
url(r'^configuration$', 'configuration', name='configuration'),
url(r'^install_examples$', 'install_examples', name='install_examples'),
url(r'^query_cb/done/(?P<server_id>\S+)$', 'query_done_cb', name='query_done_cb'),
)
urlpatterns += patterns(
'beeswax.create_database',
url(r'^create/database$', 'create_database', name='create_database'),
)
urlpatterns += patterns(
'beeswax.create_table',
url(r'^create/create_table/(?P<database>\w+)$', 'create_table', name='create_table'),
url(r'^create/import_wizard/(?P<database>\w+)$', 'import_wizard', name='import_wizard'),
url(r'^create/auto_load/(?P<database>\w+)$', 'load_after_create', name='load_after_create'),
)
|
apache-2.0
| 5,420,627,907,366,661,000
| 50.25
| 120
| 0.685671
| false
| 3.340122
| false
| false
| false
|
cuckoo5/soap
|
Soap_know/handler/formula.py
|
1
|
1592
|
#coding=utf-8
import tornado.web
import util.config as config
import util.constants as constants
from db.manager import FormulaManager, OilManager
from handler.base import BaseHandler
class FormulaHandler(BaseHandler):
def initialize(self):
self.all_use = constants.use
self.all_difficult_degree = constants.difficult_degree
self.oil_manager = OilManager()
self.formula_manager = FormulaManager()
self.all_skin_types = self.formula_manager.get_all_skin_types()
@tornado.web.authenticated
def get(self, param):
print "param = %s" %param
url = 'formula/%s.html' %param
cur_user = self.get_current_user()
switch = {'index' : self.index, 'new' : self.new}
switch[param](url, cur_user)
# self.render(url, title=TITLE, cur_user=cur_user)
def index(self, url, cur_user):
skin_type = self.get_argument("skin_type", None)
use = self.get_argument("use", None)
difficult_degree = self.get_argument("difficult_degree", None)
print 'skin_type = ', skin_type
print 'use = ', use
print 'difficult_degree = ', difficult_degree
formulas = self.formula_manager.get_formulas()
self.render(url, title=config.title, cur_user=cur_user, use=self.all_use, skin_types=self.all_skin_types,
difficult_degree=self.all_difficult_degree, formulas=formulas)
def new(self, url, cur_user):
oils = self.oil_manager.get_oils()
self.render(url, title=config.title, cur_user=cur_user, oils=oils)
|
gpl-3.0
| 3,997,204,989,561,479,000
| 36.904762
| 114
| 0.645101
| false
| 3.423656
| false
| false
| false
|
taolei87/sru
|
DrQA/drqa/model.py
|
1
|
5679
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import logging
from torch.autograd import Variable
from .utils import AverageMeter
from .rnn_reader import RnnDocReader
# Modification:
# - change the logger name
# - save & load optimizer state dict
# - change the dimension of inputs (for POS and NER features)
# Origin: https://github.com/facebookresearch/ParlAI/tree/master/parlai/agents/drqa
logger = logging.getLogger(__name__)
class DocReaderModel(object):
"""High level model that handles intializing the underlying network
architecture, saving, updating examples, and predicting examples.
"""
def __init__(self, opt, embedding=None, state_dict=None):
# Book-keeping.
self.opt = opt
self.updates = state_dict['updates'] if state_dict else 0
self.train_loss = AverageMeter()
# Building network.
self.network = RnnDocReader(opt, embedding=embedding)
if state_dict:
new_state = set(self.network.state_dict().keys())
for k in list(state_dict['network'].keys()):
if k not in new_state:
del state_dict['network'][k]
self.network.load_state_dict(state_dict['network'])
# Building optimizer.
parameters = [p for p in self.network.parameters() if p.requires_grad]
if opt['optimizer'] == 'sgd':
self.optimizer = optim.SGD(parameters, opt['learning_rate'],
momentum=opt['momentum'],
weight_decay=opt['weight_decay'])
elif opt['optimizer'] == 'adamax':
self.optimizer = optim.Adamax(parameters, opt['learning_rate'],
weight_decay=opt['weight_decay'])
else:
raise RuntimeError('Unsupported optimizer: %s' % opt['optimizer'])
if state_dict:
self.optimizer.load_state_dict(state_dict['optimizer'])
num_params = sum(p.data.numel() for p in parameters
if p.data.data_ptr() != self.network.embedding.weight.data.data_ptr())
print ("{} parameters".format(num_params))
def update(self, ex):
# Train mode
self.network.train()
# Transfer to GPU
if self.opt['cuda']:
inputs = [Variable(e.cuda(async=True)) for e in ex[:7]]
target_s = Variable(ex[7].cuda(async=True))
target_e = Variable(ex[8].cuda(async=True))
else:
inputs = [Variable(e) for e in ex[:7]]
target_s = Variable(ex[7])
target_e = Variable(ex[8])
# Run forward
score_s, score_e = self.network(*inputs)
# Compute loss and accuracies
loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)
self.train_loss.update(loss.data[0], ex[0].size(0))
# Clear gradients and run backward
self.optimizer.zero_grad()
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm(self.network.parameters(),
self.opt['grad_clipping'])
# Update parameters
self.optimizer.step()
self.updates += 1
# Reset any partially fixed parameters (e.g. rare words)
self.reset_parameters()
def predict(self, ex):
# Eval mode
self.network.eval()
# Transfer to GPU
if self.opt['cuda']:
inputs = [Variable(e.cuda(async=True), volatile=True)
for e in ex[:7]]
else:
inputs = [Variable(e, volatile=True) for e in ex[:7]]
# Run forward
score_s, score_e = self.network(*inputs)
# Transfer to CPU/normal tensors for numpy ops
score_s = score_s.data.cpu()
score_e = score_e.data.cpu()
# Get argmax text spans
text = ex[-2]
spans = ex[-1]
predictions = []
max_len = self.opt['max_len'] or score_s.size(1)
for i in range(score_s.size(0)):
scores = torch.ger(score_s[i], score_e[i])
scores.triu_().tril_(max_len - 1)
scores = scores.numpy()
s_idx, e_idx = np.unravel_index(np.argmax(scores), scores.shape)
s_offset, e_offset = spans[i][s_idx][0], spans[i][e_idx][1]
predictions.append(text[i][s_offset:e_offset])
return predictions
def reset_parameters(self):
# Reset fixed embeddings to original value
if self.opt['tune_partial'] > 0:
offset = self.opt['tune_partial'] + 2
if offset < self.network.embedding.weight.data.size(0):
self.network.embedding.weight.data[offset:] \
= self.network.fixed_embedding
def save(self, filename, epoch):
params = {
'state_dict': {
'network': self.network.state_dict(),
'optimizer': self.optimizer.state_dict(),
'updates': self.updates
},
'config': self.opt,
'epoch': epoch
}
try:
torch.save(params, filename)
logger.info('model saved to {}'.format(filename))
except BaseException:
logger.warn('[ WARN: Saving failed... continuing anyway. ]')
def cuda(self):
self.network.cuda()
|
mit
| -4,923,884,610,044,888,000
| 35.171975
| 83
| 0.573693
| false
| 3.916552
| false
| false
| false
|
anirudhSK/chromium
|
tools/perf/measurements/timeline_based_measurement.py
|
1
|
4448
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from metrics import timeline as timeline_module
from metrics import timeline_interaction_record as tir_module
from telemetry.page import page_measurement
from telemetry.core.timeline import model as model_module
# TimelineBasedMeasurement considers all instrumentation as producing a single
# timeline. But, depending on the amount of instrumentation that is enabled,
# overhead increases. The user of the measurement must therefore chose between
# a few levels of instrumentation.
NO_OVERHEAD_LEVEL = 'no-overhead'
MINIMAL_OVERHEAD_LEVEL = 'minimal-overhead'
DEBUG_OVERHEAD_LEVEL = 'debug-overhead'
ALL_OVERHEAD_LEVELS = [
NO_OVERHEAD_LEVEL,
MINIMAL_OVERHEAD_LEVEL,
DEBUG_OVERHEAD_LEVEL
]
class _TimelineBasedMetrics(object):
def __init__(self, model, renderer_thread):
self._model = model
self._renderer_thread = renderer_thread
def FindTimelineInteractionRecords(self):
# TODO(nduca): Add support for page-load interaction record.
return [tir_module.TimelineInteractionRecord(event) for
event in self._renderer_thread.IterAllAsyncSlices()
if tir_module.IsTimelineInteractionRecord(event.name)]
def CreateMetricsForTimelineInteractionRecord(self, interaction):
res = []
if interaction.is_smooth:
pass # TODO(nduca): res.append smoothness metric instance.
return res
def AddResults(self, results):
interactions = self.FindTimelineInteractionRecords()
if len(interactions) == 0:
raise Exception('Expected at least one Interaction on the page')
for interaction in interactions:
metrics = self.CreateMetricsForTimelineInteractionRecord(interaction)
for m in metrics:
m.AddResults(self._model, self._renderer_thread,
interaction, results)
class TimelineBasedMeasurement(page_measurement.PageMeasurement):
"""Collects multiple metrics pages based on their interaction records.
A timeline measurement shifts the burden of what metrics to collect onto the
page under test, or the pageset running that page. Instead of the measurement
having a fixed set of values it collects about the page, the page being tested
issues (via javascript) an Interaction record into the user timing API that
describing what the page is doing at that time, as well as a standardized set
of flags describing the semantics of the work being done. The
TimelineBasedMeasurement object collects a trace that includes both these
interaction recorsd, and a user-chosen amount of performance data using
Telemetry's various timeline-producing APIs, tracing especially.
It then passes the recorded timeline to different TimelineBasedMetrics based
on those flags. This allows a single run through a page to produce load timing
data, smoothness data, critical jank information and overall cpu usage
information.
For information on how to mark up a page to work with
TimelineBasedMeasurement, refer to the
perf.metrics.timeline_interaction_record module.
"""
def __init__(self):
super(TimelineBasedMeasurement, self).__init__('smoothness')
def AddCommandLineOptions(self, parser):
parser.add_option(
'--overhead-level', type='choice',
choices=ALL_OVERHEAD_LEVELS,
default=NO_OVERHEAD_LEVEL,
help='How much overhead to incur during the measurement.')
def CanRunForPage(self, page):
return hasattr(page, 'smoothness')
def WillNavigateToPage(self, page, tab):
if not tab.browser.supports_tracing:
raise Exception('Not supported')
assert self.options.overhead_level in ALL_OVERHEAD_LEVELS
if self.options.overhead_level == NO_OVERHEAD_LEVEL:
categories = timeline_module.MINIMAL_TRACE_CATEGORIES
elif self.options.overhead_level == \
MINIMAL_OVERHEAD_LEVEL:
categories = ''
else:
categories = '*,disabled-by-default-cc.debug'
tab.browser.StartTracing(categories)
def MeasurePage(self, page, tab, results):
""" Collect all possible metrics and added them to results. """
trace_result = tab.browser.StopTracing()
model = model_module.TimelineModel(trace_result)
renderer_thread = model.GetRendererThreadFromTab(tab)
meta_metrics = _TimelineBasedMetrics(model, renderer_thread)
meta_metrics.AddResults(results)
|
bsd-3-clause
| 5,216,105,921,857,188,000
| 39.436364
| 80
| 0.74955
| false
| 4.172608
| false
| false
| false
|
Sh4kE/fun-with-wiki-science
|
arxiv_import.py
|
1
|
1272
|
from bs4 import BeautifulSoup
import urllib
import config as c
from pymongo import MongoClient
client = MongoClient()
db = client.articles.entries
def gen_index(seed=db.count()):
i = seed
while True:
i +=1
yield i
index = gen_index()
def generate_filename(entry, directory = c.ARTICLE_DIR):
authors = [a.split()[-1] for a in entry['authors']]
authors = authors[0]+'_et.al' if len(authors) > 1 else authors[0]
title = entry['title'].replace(' ', '_')
return ''.join([directory, authors,'-',title , '.pdf'])
def fetch(url):
html_doc = urllib.urlopen(url).read()
s = BeautifulSoup(html_doc)
entries = [{
'pdf' : e.findAll('link',attrs={'type': 'application/pdf'})[0]['href'],
'url' : e.findAll('link',attrs={'type': 'text/html'})[0]['href'],
'authors': [a.text.strip() for a in e.findAll('author')],
'title': str(e.title.next),
'id': str.split(str(e.id.next),'/')[-1],
'index': next(index)
} for e in s.findAll('entry')]
entries = filter(lambda e: db.find_one({'id': e['id']}) == None, entries)
for entry in entries:
entry['path'] = generate_filename(entry)
map(lambda e: urllib.urlretrieve(e['pdf'], e['path']), entries)
if entries:
db.insert(entries)
return [e['index'] for e in entries]
|
mit
| 7,861,832,377,089,369,000
| 30.8
| 75
| 0.623428
| false
| 3.19598
| false
| false
| false
|
sophron/wifiphisher
|
wifiphisher/extensions/roguehostapdinfo.py
|
1
|
4097
|
"""
Extension that interacts with roguehostapd to print relevant information. For example,
information regarding automatic association attacks.
"""
from collections import defaultdict
import wifiphisher.common.constants as constants
class Roguehostapdinfo(object):
"""
Handles for printing KARMA attack information
"""
def __init__(self, data):
"""
Setup the class with all the given arguments.
:param self: A roguehostapdinfo object.
:param data: Shared data from main engine
:type self: roguehostapdinfo
:type data: dictionary
:return: None
:rtype: None
"""
self._data = data
self._packets_to_send = defaultdict(list)
self._mac2ssid_dict = defaultdict()
self._known_beacon_ssids = self._get_known_beacon_ssids()
def get_packet(self, packet):
"""
:param self: A roguehostapdinfo object
:param packet: A scapy.layers.RadioTap object
:type self: roguehostapdinfo
:type packet: scapy.layers.RadioTap
:return: empty list
:rtype: list
"""
return self._packets_to_send
def _get_known_beacon_ssids(self):
"""
:param self: A roguehostapdinfo object
:type self: roguehostapdinfo
:return: None
:rtype: None
"""
known_beacons_ssids = set()
# locate the known WLANS file
if self._data.args.known_beacons:
area_file = constants.KNOWN_WLANS_FILE
with open(area_file) as _file:
for line in _file:
if line.startswith("!"):
continue
essid = line.rstrip()
known_beacons_ssids.add(essid)
return known_beacons_ssids
def send_output(self):
"""
Send the output the extension manager
:param self: A roguehostapdinfo object.
:type self: roguehostapdinfo
:return: A list with the password checking information
:rtype: list
..note: In each packet we ask roguehostapd whether there are victims
associated to rogue AP
"""
info = []
ssid_mac_list = self._data.roguehostapd.get_karma_data()
try:
mac_list, ssid_list = zip(*ssid_mac_list)
except ValueError:
# incase ssid_mac_list is still empty
mac_list = []
ssid_list = []
# remove the one not in the current associated list
pop_macs = []
for mac in self._mac2ssid_dict:
if mac not in mac_list:
pop_macs.append(mac)
for key in pop_macs:
self._mac2ssid_dict.pop(key)
# add new associated victims to the dictionary
for idx, mac in enumerate(mac_list):
if mac not in self._mac2ssid_dict:
self._mac2ssid_dict[mac] = ssid_list[idx]
macssid_pairs = self._mac2ssid_dict.items()
for mac, ssid in macssid_pairs:
if ssid == self._data.target_ap_essid:
outputstr = "Victim " + mac + " probed for WLAN with ESSID: '" + ssid + "' (Evil Twin)"
elif ssid not in self._known_beacon_ssids:
outputstr = "Victim " + mac + " probed for WLAN with ESSID: '" + ssid + "' (KARMA)"
else:
outputstr = "Victim " + mac + " probed for WLAN with ESSID: '" + ssid + "' (Known Beacons)"
info.append(outputstr)
return info
def send_channels(self):
"""
Send channels to subscribe
:param self: A roguehostapdinfo object.
:type self: roguehostapdinfo
:return: empty list
:rtype: list
..note: we don't need to send frames in this extension
"""
return [self._data.target_ap_channel]
def on_exit(self):
"""
Free all the resources regarding to this module
:param self: A roguehostapdinfo object.
:type self: roguehostapdinfo
:return: None
:rtype: None
"""
pass
|
gpl-3.0
| -7,268,679,541,904,218,000
| 32.040323
| 107
| 0.569929
| false
| 3.846948
| false
| false
| false
|
gdreid/job-salary-prediction
|
code/vectorization/text_extractor.py
|
1
|
6819
|
'''
Created on 2016
@author: Graham Reid
Builds a 2-gram vectorizer using scikit learn count vectorizer. Only really
interesting thing here is that I didn't concatenate all of the fields together.
This helps to preserve context.
'''
import random
import pickle
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from scipy.sparse import hstack
import csv
import string
trainPath = '../../data/data_raw.csv'
dataFile = open(trainPath)
reader = csv.reader(dataFile)
headers = reader.next()
target_index = headers.index('SalaryNormalized')
title_index = headers.index('Title')
description_index = headers.index('FullDescription')
location_index = headers.index('LocationRaw')
contract_type_index = headers.index('ContractType')
contract_time_index = headers.index('ContractTime')
company_index = headers.index('Company')
category_index = headers.index('Category')
source_index = headers.index('SourceName')
file_len = 0
for line in reader:
file_len = file_len +1
dataFile.seek(0)
reader.next()
salary_array = []
title_array = []
location_array = []
description_array = []
contract_type_array = []
contract_time_array = []
company_array = []
category_array = []
source_array = []
title_train_array = []
location_train_array = []
description_train_array = []
contract_type_train_array = []
contract_time_train_array = []
company_train_array = []
category_train_array = []
source_train_array = []
def format_string(field) :
return field.lower().translate(string.maketrans("",""), string.punctuation)
read_fraction = 1.0
training_indices = np.random.randint(0, file_len, int(file_len*read_fraction))
print 'reading data'
index = 0
for line in reader:
salary_array.append(np.log(float(line[target_index])))
title_array.append(format_string(line[title_index]))
description_array.append(format_string(line[description_index]))
location_array.append(format_string(line[location_index]))
contract_type_array.append(format_string(line[contract_type_index]))
contract_time_array.append(format_string(line[contract_time_index]))
company_array.append(format_string(line[company_index]))
category_array.append(format_string(line[category_index]))
source_array.append(format_string(line[source_index]))
index = index + 1
'''
for anything larger than unigrams, descriptions might be too large to be loaded
into memory all at once. Need to use some smaller read_fraction of documents
'''
for i in training_indices:
title_train_array.append(title_array[i])
description_train_array.append(description_array[i])
location_train_array.append(location_array[i])
contract_time_train_array.append(contract_time_array[i])
contract_type_train_array.append(contract_type_array[i])
company_train_array.append(company_array[i])
category_train_array.append(category_array[i])
source_train_array.append(source_array[i])
print 'creating vectorizers'
'''
word must be present in at least this fraction of the docments to be
vectorized (removes one-time mispellings, etc)
'''
fraction = 1.0/10000.0
title_vectorizer = CountVectorizer(binary = True, strip_accents='ascii',
min_df = int(fraction*file_len), ngram_range = (1,2))
description_vectorizer = CountVectorizer(binary = True, strip_accents='ascii',
min_df = int(fraction*file_len*read_fraction), ngram_range = (1,2))
location_vectorizer = CountVectorizer(binary = True, strip_accents='ascii',
min_df = int(fraction*file_len), ngram_range = (1,2))
contract_time_vectorizer = CountVectorizer(binary = True, strip_accents='ascii',
min_df = int(fraction*file_len), ngram_range = (1,2))
contract_type_vectorizer = CountVectorizer(binary = True, strip_accents='ascii',
min_df = int(fraction*file_len), ngram_range = (1,2))
company_vectorizer = CountVectorizer(binary = True, strip_accents='ascii',
min_df = int(fraction*file_len), ngram_range = (1,2))
category_vectorizer = CountVectorizer(binary = True, strip_accents='ascii',
min_df = int(fraction*file_len), ngram_range = (1,2))
source_vectorizer = CountVectorizer(binary = True, strip_accents='ascii',
min_df = int(fraction*file_len))
title_vectorizer.fit(title_array)
title_count_array = title_vectorizer.transform(title_array)
print 'title fit, shape: ', title_count_array.shape
description_vectorizer.fit(description_train_array)
description_count_array = description_vectorizer.transform(description_array)
print 'description fit, shape: ', description_count_array.shape
location_vectorizer.fit(location_array)
location_count_array = location_vectorizer.transform(location_array)
print 'location fit, shape: ', location_count_array.shape
contract_time_vectorizer.fit(contract_time_array)
contract_time_count_array = contract_time_vectorizer.transform(contract_time_array)
print 'contract time fit, shape: ', contract_time_count_array.shape
contract_type_vectorizer.fit(contract_type_array)
contract_type_count_array = contract_type_vectorizer.transform(contract_type_array)
print 'contract type fit, shape: ', contract_type_count_array.shape
company_vectorizer.fit(company_array)
company_count_array = company_vectorizer.transform(company_array)
print 'company fit, shape: ', company_count_array.shape
category_vectorizer.fit(category_array)
category_count_array = category_vectorizer.transform(category_array)
print 'category fit, shape: ', category_count_array.shape
source_vectorizer.fit(source_array)
source_count_array = source_vectorizer.transform(source_array)
print 'source fit, shape: ', source_count_array.shape
data_array = hstack([title_count_array, description_count_array,
location_count_array, contract_time_count_array, contract_type_count_array,
company_count_array, category_count_array, source_count_array])
print 'data stacked'
with open('../../data/data_arrays/data_binary_bigram.pk', 'wb') as output:
pickle.dump(data_array, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(salary_array, output, pickle.HIGHEST_PROTOCOL)
with open('../../data/vectorizers/vectorizers_binary_bigram.pk', 'wb') as output:
pickle.dump(title_vectorizer, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(description_vectorizer, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(location_vectorizer, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(contract_time_vectorizer, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(contract_type_vectorizer, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(company_vectorizer, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(category_vectorizer, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(source_vectorizer, output, pickle.HIGHEST_PROTOCOL)
print 'data_array read and written'
print 'data_array shape: ', data_array.shape
|
gpl-3.0
| 5,507,033,565,940,186,000
| 35.859459
| 83
| 0.751723
| false
| 3.419759
| false
| false
| false
|
baseride/pyZabbixSender
|
pyZabbixSender/pyZabbixSenderBase.py
|
1
|
7074
|
# -*- coding: utf-8
# Copyright 2015 Kurt Momberg <kurtqm (at) yahoo(dot)com(dot)ar>
# > Based on work by Klimenko Artyem <aklim007(at)gmail(dot)com>
# >> Based on work by Rob Cherry <zsend(at)lxrb(dot)com>
# >>> Based on work by Enrico Tröger <enrico(dot)troeger(at)uvena(dot)de>
# License: GNU GPLv2
import struct
import time
import sys
import re
# If you're using an old version of python that don't have json available,
# you can use simplejson instead: https://simplejson.readthedocs.org/en/latest/
try:
import json
except ImportError:
import simplejson as json
class InvalidResponse(Exception):
pass
class pyZabbixSenderBase:
'''
This class creates network-agnostic data structures to send data to a Zabbix server
'''
ZABBIX_SERVER = "127.0.0.1"
ZABBIX_PORT = 10051
def __init__(self, server=ZABBIX_SERVER, port=ZABBIX_PORT, verbose=False):
'''
#####Description:
This is the constructor, to obtain an object of type pyZabbixSender, linked to work with a specific server/port.
#####Parameters:
* **server**: [in] [string] [optional] This is the server domain name or IP. *Default value: "127.0.0.1"*
* **port**: [in] [integer] [optional] This is the port open in the server to receive zabbix traps. *Default value: 10051*
* **verbose**: [in] [boolean] [optional] This is to allow the library to write some output to stderr when finds an error. *Default value: False*
**Note: The "verbose" parameter will be revisited and could be removed/replaced in the future**
#####Return:
It returns a pyZabbixSender object.
'''
self.zserver = server
self.zport = port
self.verbose = verbose
self.timeout = 5 # Socket connection timeout.
self._data = [] # This is to store data to be sent later.
def __str__(self):
'''
This allows you to obtain a string representation of the internal data
'''
return str(self._data)
def _createDataPoint(self, host, key, value, clock=None):
'''
Creates a dictionary using provided parameters, as needed for sending this data.
'''
obj = {
'host': host,
'key': key,
'value': value,
}
if clock:
obj['clock'] = clock
return obj
def addData(self, host, key, value, clock=None):
'''
#####Description:
Adds host, key, value and optionally clock to the internal list of data to be sent later, when calling one of the methods to actually send the data to the server.
#####Parameters:
* **host**: [in] [string] [mandatory] The host which the data is associated to.
* **key**: [in] [string] [mandatory] The name of the trap associated to the host in the Zabbix server.
* **value**: [in] [any] [mandatory] The value you want to send. Please note that you need to take care about the type, as it needs to match key definition in the Zabbix server. Numeric types can be specified as number (for example: 12) or text (for example: "12").
* **clock**: [in] [integer] [optional] Here you can specify the Unix timestamp associated to your measurement. For example, you can process a log or a data file produced an hour ago, and you want to send the data with the timestamp when the data was produced, not when it was processed by you. If you don't specify this parameter, zabbix server will assign a timestamp when it receives the data.
You can create a timestamp compatible with "clock" parameter using this code:
int(round(time.time()))
*Default value: None*
#####Return:
This method doesn't have a return.
'''
obj = self._createDataPoint(host, key, value, clock)
self._data.append(obj)
def clearData(self):
'''
#####Description:
This method removes all data from internal storage. You need to specify when it's done, as it's not automatically done after a data send operation.
#####Parameters:
None
#####Return:
None
'''
self._data = []
def getData(self):
'''
#####Description:
This method is used to obtain a copy of the internal data stored in the object.
Please note you will **NOT** get the internal data object, but a copy of it, so no matter what you do with your copy, internal data will remain safe.
#####Parameters:
None
#####Return:
A copy of the internal data you added using the method *addData* (an array of dicts).
'''
copy_of_data = []
for data_point in self._data:
copy_of_data.append(data_point.copy())
return copy_of_data
def printData(self):
'''
#####Description:
Print stored data (to stdout), so you can see what will be sent if "sendData" is called. This is useful for debugging purposes.
#####Parameters:
None
#####Return:
None
'''
for elem in self._data:
print str(elem)
print 'Count: %d' % len(self._data)
def removeDataPoint(self, data_point):
'''
#####Description:
This method delete one data point from the internal stored data.
It's main purpose is to narrow the internal data to keep only those failed data points (those that were not received/processed by the server) so you can identify/retry them. Data points can be obtained from *sendDataOneByOne* return, or from *getData* return.
#####Parameters:
* **data_point**: [in] [dict] [mandatory] This is a dictionary as returned by *sendDataOneByOne()* or *getData* methods.
#####Return:
It returns True if data_point was found and deleted, and False if not.
'''
if data_point in self._data:
self._data.remove(data_point)
return True
return False
def recognize_response_raw(response_raw):
return recognize_response(json.loads(response_raw))
FAILED_COUNTER = re.compile('^.*failed.+?(\d+).*$')
PROCESSED_COUNTER = re.compile('^.*processed.+?(\d+).*$')
SECONDS_SPENT = re.compile('^.*seconds spent.+?((-|\+|\d|\.|e|E)+).*$')
def recognize_response(response):
failed = FAILED_COUNTER.match(response['info'].lower() if 'info' in response else '')
processed = PROCESSED_COUNTER.match(response['info'].lower() if 'info' in response else '')
seconds_spent = SECONDS_SPENT.match(response['info'].lower() if 'info' in response else '')
if failed is None or processed is None:
raise InvalidResponse('Unable to parse server response',packet,response_raw)
failed = int(failed.group(1))
processed = int(processed.group(1))
seconds_spent = float(seconds_spent.group(1)) if seconds_spent else None
response['info'] = {
'failed':failed,
'processed':processed,
'seconds spent':seconds_spent
}
return response
|
gpl-2.0
| -1,269,972,157,071,972,000
| 37.237838
| 403
| 0.625954
| false
| 4.063182
| false
| false
| false
|
tanchao/algo
|
interviews/zenefits/three_sum.py
|
1
|
1745
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'tanchao'
class Solution:
# @param {integer[]} nums
# @return {integer[][]}
def threeSum(self, nums):
res = []
if len(nums) < 3:
return res
nums.sort() # sorted array for value judgement
for i in range(0, len(nums) - 2):
if i == 0 or nums[i] > nums[i - 1]: # note skip duplication on every step
left = i + 1
right = len(nums) - 1
while right > left:
if nums[i] + nums[left] + nums[right] == 0:
res.append([nums[i], nums[left], nums[right]])
''' this is for less than
tmp = []
tmp.append(nums[i])
tmp.append(nums[left])
while right > left:
tmp.append(right)
res.append(tmp)
tmp.pop()
right -= 1'''
right -= 1
left += 1
while right > left and nums[left] == nums[left - 1]: left += 1
while right > left and nums[right] == nums[right + 1]: right -= 1
elif nums[i] + nums[left] + nums[right] > 0:
while right > left:
right -= 1
if nums[right] < nums[right + 1]: break
else:
while right > left:
left += 1
if nums[left] > nums[left - 1]: break
return res
if __name__ == '__main__':
pass
|
mit
| -540,101,211,688,594,900
| 36.148936
| 89
| 0.373066
| false
| 4.604222
| false
| false
| false
|
IPIDataLab/Mongo-IRD-load
|
python/excell_parse.py
|
1
|
9770
|
#!/usr/bin/python
from xlrd import cellname
import re
from datetime import datetime
from utils import na_check, split_str_array, get_cell, geocode
#############################
#############################
# This file parses incoming
# excell workbooks to convert
# to JSON object array for input
# into MongoDB.
#############################
#############################
def parse(sheet, data):
# creat object where key = header name and value = columnm number
labels = sheet.row(0)
lkey = { str(labels[i]).replace("text:u","").replace("'","").lower(): i for i in range(0, len(labels)) }
# test by iterating over one sheet
nrows = sheet.nrows
for row_index in range(1, nrows):
# english names
a = get_cell(sheet,'MainNameEn',row_index,lkey)
if not a:
continue
# create document for each non-empty row
data.append({})
### ADD FIELDS
###
data[-1]['name_en'] = a
# UID
a = get_cell(sheet,'UID',row_index,lkey)
if a:
data[-1]['uid'] = a
# acronym
a = get_cell(sheet,'Acronym',row_index,lkey)
if a:
data[-1]['acronym'] = a
# main names
a = get_cell(sheet,'MainName',row_index,lkey,str_split=True)
if a:
data[-1]['main_names'] = a
# old names
a = get_cell(sheet,'MainOldNames',row_index,lkey,str_split=True)
if a:
data[-1]['old_alias'] = a
# parent organization
a = get_cell(sheet,'Organization',row_index,lkey,str_split=True)
if a:
data[-1]['parent_org'] = a
### ADD CONTACT DETAILS
###
# website
a = get_cell(sheet,'Web',row_index,lkey)
if a:
data[-1]['website'] = a
# email
a = get_cell(sheet,'Email',row_index,lkey)
if a:
data[-1]['email_gen'] = a
# contact established
a = get_cell(sheet,'ContactEstablished',row_index,lkey)
if a:
if a == 'Yes':
a = True
elif a == 'No':
a = False
data[-1]['contacted'] = a
# contact person
a = get_cell(sheet,'ContactPerson',row_index,lkey)
if a:
data[-1]['contact_person'] = (
a,
get_cell(sheet,'EmailContactPerson',row_index,lkey)
)
### ADD CHARACTERISTICS
###
# international
a = get_cell(sheet,'International',row_index,lkey)
if a:
if a == 'Yes':
a = True
elif a == 'No':
a = False
data[-1]['international'] = a
# type
org_type = get_cell(sheet,'Type',row_index,lkey)
org_type_array = []
if not org_type:
pass
elif org_type == 'No information':
pass
else:
for char in org_type:
org_type_array.append(char)
data[-1]["org_type"] = org_type_array
# thematic area of focus
a = get_cell(sheet,'Subject',row_index,lkey,str_split="([:;, ']|\. |\.$)")
if a:
# input control
b = []
for j in a:
i = j.lower()
if i == '3.':
i = '3'
if i == '1.h':
i = '1h'
# unspecified 2 and 6 are 2.9 and 6.9
i = re.sub(r'^([26])([a-m]?)$', r'\1.9\2', i)
i = re.sub(r'q', 'g', i)
## strict matrix check
if not re.match(r"^(1|2\.9|2\.1|2\.2|2\.3|3|4|5|6.9|6\.1|6\.2|6\.3|7|8|9|10|11|12)[a-m]?$", i):
print "please correct subject: '%s' in %s" % (i, get_cell(sheet,'MainNameEn',row_index,lkey))
else:
b.append(i)
if i != j :
print "autocorrect '%s' => '%s'" % (j,i)
data[-1]['subject'] = b
# structure
a = get_cell(sheet,'Structure',row_index,lkey)
if a:
data[-1]['structure'] = a
# to create array you would need to differentiate between delimiter and sub list in ()
# data[-1]["structure"] = split_str_array(structure, '; ',')
# finances
a = get_cell(sheet,'Finances',row_index,lkey)
if a:
data[-1]['finances'] = a
# Foundation
a = get_cell(sheet,'Foundation',row_index,lkey)
if a:
data[-1]['foundation'] = re.sub(r'\.0', '', a) ## get_cell gives... 1998.0
# history
a = get_cell(sheet,'History',row_index,lkey)
if a:
data[-1]['history'] = a
# aim
a = get_cell(sheet,'Aim',row_index,lkey)
if a:
data[-1]['aim'] = a
# aimURL
a = get_cell(sheet,'AimURL',row_index,lkey)
if a:
data[-1]['aim_URL'] = a
# IRD definition
a = get_cell(sheet,'IRDdefinition',row_index,lkey)
if a:
data[-1]['IRD_def'] = a
# IRD definition URL
a = get_cell(sheet,'IRDdefinitionURL',row_index,lkey)
if a:
data[-1]['IRD_def_URL'] = a
# religious affiliation
a = get_cell(sheet,'ReligiousAffiliation',row_index,lkey,str_split=True)
if a:
data[-1]['religious_affiliation'] = a
# languages
a = get_cell(sheet,'Languages',row_index,lkey,str_split=True)
if a:
data[-1]['languages'] = a
# Staff
a = get_cell(sheet,'Staff',row_index,lkey,str_split=True)
if a:
data[-1]['staff'] = a
# Region Focus
a = get_cell(sheet,'RegionFocus',row_index,lkey,str_split='[;,]')
if a:
data[-1]['regionfocus'] = a
### ADD ACTIVITIES
###
# General activities
a = get_cell(sheet,'Activities',row_index,lkey)
if a:
if a == 'No information':
data[-1]['general_activities'] =['No information']
elif a == 'See IRDActivities':
data[-1]['general_activities'] =['See IRDActivities']
else:
# regex to match pattern of <number>. <text>: to create true key values
activities = re.split('([0-9]{1,}\. [a-zA-Z \'\-!\0-9{1,}+,&]+:)',a)
activity_array = []
# activities = re.split('([0-9]{1,}\. [a-zA-Z ]+:)',a)
activity_name_array = []
activity_description_array = []
for activity in activities:
if activity == "":
pass
elif re.match('[0-9]\.',activity):
activity = re.sub('[0-9]\. ','',activity)
activity = re.sub(':','',activity)
activity_name_array.append(activity)
else:
activity = activity.strip()
activity_description_array.append(activity)
for x in xrange(1,len(activity_name_array)):
try:
activity_array.append({'activity_name':activity_name_array[x],'activity_description':activity_description_array[x]})
except IndexError:
print "!!ERROR: not as many activities descriptions as names in '%s' (%s)" % (get_cell(sheet,'Acronym',row_index,lkey), get_cell(sheet,'MainNameEn',row_index,lkey))
data[-1]['general_activities'] = activity_array
# IRD activities -- need to apply above model to separate activity name and activity description
a = get_cell(sheet,'IRDActivities',row_index,lkey)
if a:
if a == 'No information':
data[-1]['IRD_activities'] =['No information']
else:
IRD_activities_reg = re.split('[0-9]{1,2}\. ',get_cell(sheet,'IRDALocation',row_index,lkey))
IRD_activities = re.split('[0-9]{1,2}\. ',a)
IRD_activities_array = []
del IRD_activities[0]
del IRD_activities_reg[0]
## turn on to look for ragged array match
# if len(IRD_activities_reg) != len(IRD_activities):
# print name_en
# print IRD_activities_reg
# print IRD_activities
try:
for x in xrange(1,len(IRD_activities)):
region = re.sub('Lebanon, Syria, Egypt and Jordan', 'Lebanon; Syria; Egypt; Jordan', IRD_activities_reg[x])
region = re.sub('Bosnia and Herzegovina', 'Bosnia-Herzegovina', region)
region = re.sub('India, Pakistan, Bangladesh, Sri Lanka', 'India; Pakistan; Bangladesh; Sri Lanka', region)
region = re.sub('St. Jean Cap', 'St Jean Cap', region)
region = re.split('[;\.]| and ', region)
region = [ i.strip() for i in region if i.strip() ]
IRD_activity_obj = {
'activity' : IRD_activities[x],
'region' : region
}
IRD_activities_array.append(IRD_activity_obj)
except IndexError:
print "!!ERROR: non-matching number of activities and regions in '%s' (%s)" % (get_cell(sheet,'Acronym',row_index,lkey), get_cell(sheet,'MainNameEn',row_index,lkey))
data[-1]['IRD_activities'] = IRD_activities_array
# events
a = get_cell(sheet,'Events',row_index,lkey,str_split=True)
if a:
data[-1]['events'] = a
# publications
a = get_cell(sheet,'Publications',row_index,lkey,str_split=True)
if a:
data[-1]['publications'] = a
### RELATIONSHIPS
###
# IO relationships
a = get_cell(sheet,'RelationsIO',row_index,lkey,str_split=True)
if a:
data[-1]['IO_relations'] = a
# Other relationships
a = get_cell(sheet,'RelationsOther',row_index,lkey,str_split=True)
if a:
data[-1]['other_relations'] = a
# geocoding
addr = {}
geo = {}
for i in 'AddressMain/Address1/Address 1/Address2/Address 2/Address3/Address 3/Address4/Address 4/Address5/Address 5/Address3AndMore'.split('/'):
try:
a = get_cell(sheet, i, row_index,lkey)
#import ipdb; ipdb.set_trace()#
if a and a != 'No information':
geo = geocode(a)
geo['address'] = a
if i == 'AddressMain':
i = 'Address1'
if i == 'Address 1':
i = 'Address1'
if i == 'Address 2':
i = 'Address2'
if i == 'Address 3':
i = 'Address3'
if i == 'Address 4':
i = 'Address4'
if i == 'Address 5':
i = 'Address5'
addr[i] = geo
except KeyError:
pass
if addr:
data[-1]['adresses'] = addr
try:
country = re.sub(r', *\d+$', '', addr['Address1']['normalized'])
country = re.sub(r'^.*, ', '', country)
country = re.sub(r'(Al Jubail | *\d+ *)', '', country)
data[-1]['country'] = country
except KeyError:
pass
### ADD ENTRY STAMP DETAILs
###
a = get_cell(sheet,'Entry',row_index,lkey)
if not a:
pass
else:
try:
entry_value_array = split_str_array(a, ', ')
entry_date = datetime.strptime(entry_value_array[1], "%d.%m.%Y").date()
data[-1]["entry"] = {'author' : entry_value_array[0], 'date' : str(entry_date.year)+str(entry_date.month).zfill(2)+str(entry_date.day).zfill(2)}
except Exception:
# we don't care about this data format
#print "!!ERROR: bad format for entry date in '%s'" % a
data[-1]["entry"] = a;
return data
if __name__ == '__main__':
parse(sheet, data)
|
gpl-2.0
| 8,808,311,664,993,507,000
| 26.521127
| 170
| 0.59304
| false
| 2.71842
| false
| false
| false
|
mzhr/snakepig_engine
|
src/game.py
|
1
|
1551
|
import pyglet
from pyglet.window import key
from src import world
class GameStates:
MAIN_MENU = 0
GAME_LOAD = 1
GAME_PLAY = 2
GAME_MENU = 3
class Window(pyglet.window.Window):
def __init__(self, *args, **kwargs):
# Initialize window.
super(Window, self).__init__(800, 600, *args, **kwargs)
# Initilize window icon.
self.icon = pyglet.image.load("data/icon.png")
self.set_icon(self.icon)
# Initialize initial game state.
# Currently set to GAME_PLAY for testing purposes.
# Should exist on MAIN_MENU later on.
self.current_state = GameStates.GAME_LOAD
# Initilize batch for image drawing.
self.batch = pyglet.graphics.Batch()
self.group_background = pyglet.graphics.OrderedGroup(0)
self.group_tile = pyglet.graphics.OrderedGroup(1)
self.group_character = pyglet.graphics.OrderedGroup(2)
self.group_text = pyglet.graphics.OrderedGroup(3)
self.backgrounds = []
self.tiles = []
self.characters = []
self.texts = []
# Initlize input buffer.
self.keys = pyglet.window.key.KeyStateHandler()
self.push_handlers(self.keys)
# Initilize fps and update functions.
self.fps_display = pyglet.clock.ClockDisplay()
pyglet.clock.schedule_interval(self.update, 1/60.0)
def on_draw(self):
self.clear()
self.batch.draw()
self.fps_display.draw()
def update(self, dt):
if self.current_state == GameStates.GAME_LOAD:
self.game_world = world.World(self, "data/world.txt")
self.current_state = GameStates.GAME_PLAY
if self.current_state == GameStates.GAME_PLAY:
self.game_world.update()
|
mit
| 319,767,813,804,100,160
| 26.210526
| 57
| 0.711154
| false
| 3.005814
| false
| false
| false
|
voidpp/vcp
|
vcp/system_package_manager_handlers.py
|
1
|
3348
|
import os
import re
import logging
from subprocess import check_call, check_output, CalledProcessError, PIPE
from abc import ABCMeta, abstractmethod, abstractproperty
import platform
from .exceptions import SystemPackageManagerHandlerException
logger = logging.getLogger(__name__)
def register(name, determiner):
def wrapper(cls):
cls.name = name
cls.determiner = determiner
SystemPackageManagerHandlerFactory.types[name] = cls
return cls
return wrapper
class SystemPackageManagerHandlerFactory(object):
types = {}
def create(self):
for name, cls in list(self.types.items()):
if cls.determiner.test():
return self.types[name]()
raise SystemPackageManagerHandlerException("Cannot determine the current system distro name.")
class SystemPackageManagerHandlerHandlerBase(object, metaclass=ABCMeta):
def get_system_dependencies(self, project):
if self.name not in project.system_dependencies:
return []
return project.system_dependencies[self.name]
def get_not_installed_packages(self, project):
names = self.get_system_dependencies(project)
return [name for name in names if not self.is_package_installed(name)]
@abstractmethod
def is_package_installed(self, name):
pass
class DeterminerBase(object, metaclass=ABCMeta):
@abstractmethod
def test(self):
pass
class LinuxDeterminer(DeterminerBase):
def __init__(self, *distro_names):
self._names = distro_names
def test(self):
distro_name = platform.linux_distribution()[0].lower()
return distro_name in self._names
class MaxOSDeterminer(DeterminerBase):
def __init__(self, pkg_mgr):
self._pkg_mgr = pkg_mgr
def test(self):
mac_ver = platform.mac_ver()
# not mac
if len(mac_ver[0]) < 2:
return False
try:
check_call(['which', self._pkg_mgr], stdout = PIPE, stderr = PIPE)
return True
except CalledProcessError:
return False
@register('brew', MaxOSDeterminer('brew'))
class BrewHandler(SystemPackageManagerHandlerHandlerBase):
def is_package_installed(self, name):
try:
check_call(['brew', 'ls', '--versions', name], stdout = PIPE, stderr = PIPE)
return True
except CalledProcessError as e:
if e.returncode == 1:
return False
else:
raise
@register('dpkg', LinuxDeterminer('debian', 'ubuntu', 'linuxmint'))
class DPKGHandler(SystemPackageManagerHandlerHandlerBase):
def is_package_installed(self, name):
try:
check_call(['dpkg', '-s', name], stdout = PIPE, stderr = PIPE)
return True
except CalledProcessError as e:
if e.returncode == 1:
return False
else:
raise
@register('pacman', LinuxDeterminer('arch'))
class PacManHandler(SystemPackageManagerHandlerHandlerBase):
def is_package_installed(self, name):
try:
check_call(['pacman', '-Qi', name], stdout = PIPE, stderr = PIPE)
return True
except CalledProcessError as e:
if e.returncode == 1:
return False
else:
raise
|
mit
| -888,727,097,514,618,100
| 28.368421
| 102
| 0.629331
| false
| 4.370757
| false
| false
| false
|
srusskih/SublimeBicycleRepair
|
bike/refactor/inlineVariable.py
|
1
|
3533
|
from bike.query.findDefinition import findAllPossibleDefinitionsByCoords
from bike.query.findReferences import findReferences
from bike.parsing.parserutils import maskStringsAndRemoveComments, linecontinueRE
from bike.transformer.undo import getUndoStack
from bike.transformer.save import queueFileToSave
from parser import ParserError
from bike.parsing.load import getSourceNode
import compiler
import re
def inlineLocalVariable(filename, lineno,col):
sourceobj = getSourceNode(filename)
return inlineLocalVariable_old(sourceobj, lineno,col)
def inlineLocalVariable_old(sourcenode,lineno,col):
definition, region, regionlinecount = getLocalVariableInfo(sourcenode, lineno, col)
addUndo(sourcenode)
replaceReferences(sourcenode, findReferences(sourcenode.filename, definition.lineno, definition.colno), region)
delLines(sourcenode, definition.lineno-1, regionlinecount)
updateSource(sourcenode)
def getLocalVariableInfo(sourcenode, lineno, col):
definition = findDefinition(sourcenode, lineno, col)
region, linecount = getRegionToInline(sourcenode, definition)
return definition, region, linecount
def findDefinition(sourcenode, lineno, col):
definition = findAllPossibleDefinitionsByCoords(sourcenode.filename,
lineno,col).next()
assert definition.confidence == 100
return definition
def getRegionToInline(sourcenode, defn):
line, linecount = getLineAndContinues(sourcenode, defn.lineno)
start, end = findRegionToInline(maskStringsAndRemoveComments(line))
return line[start:end], linecount
def findRegionToInline(maskedline):
match = re.compile("[^=]+=\s*(.+)$\n", re.DOTALL).match(maskedline)
assert match
return match.start(1), match.end(1)
# Possible refactoring: move to class of sourcenode
def getLineAndContinues(sourcenode, lineno):
line = sourcenode.getLine(lineno)
linecount = 1
while linecontinueRE.search(line):
line += sourcenode.getLine(lineno + linecount)
linecount += 1
return line, linecount
def addUndo(sourcenode):
getUndoStack().addSource(sourcenode.filename,sourcenode.getSource())
def replaceReferences(sourcenode, references, replacement):
for reference in safeReplaceOrder( references ):
replaceReference(sourcenode, reference, replacement)
def safeReplaceOrder( references ):
"""
When inlining a variable, if multiple instances occur on the line, then the
last reference must be replaced first. Otherwise the remaining intra-line
references will be incorrect.
"""
def safeReplaceOrderCmp(self, other):
return -cmp(self.colno, other.colno)
result = list(references)
result.sort(safeReplaceOrderCmp)
return result
def replaceReference(sourcenode, ref, replacement):
""" sourcenode.getLines()[ref.lineno-1][ref.colno:ref.colend] = replacement
But strings don't support slice assignment as they are immutable. :(
"""
sourcenode.getLines()[ref.lineno-1] = \
replaceSubStr(sourcenode.getLines()[ref.lineno-1],
ref.colno, ref.colend, replacement)
def replaceSubStr(str, start, end, replacement):
return str[:start] + replacement + str[end:]
# Possible refactoring: move to class of sourcenode
def delLines(sourcenode, lineno, linecount=1):
del sourcenode.getLines()[lineno:lineno+linecount]
def updateSource(sourcenode):
queueFileToSave(sourcenode.filename,"".join(sourcenode.getLines()))
|
mit
| 3,738,141,442,008,807,000
| 36.585106
| 115
| 0.741296
| false
| 3.730729
| false
| false
| false
|
jkandasa/integration_tests
|
cfme/tests/cli/test_appliance_console.py
|
1
|
14121
|
import pytest
from collections import namedtuple
from wait_for import wait_for
from cfme.utils import os
from cfme.utils.log_validator import LogValidator
from cfme.utils.log import logger
from cfme.utils.conf import hidden
import tempfile
import lxml.etree
import yaml
TimedCommand = namedtuple('TimedCommand', ['command', 'timeout'])
LoginOption = namedtuple('LoginOption', ['name', 'option', 'index'])
TZ = namedtuple('TimeZone', ['name', 'option'])
tzs = [
TZ('Africa/Abidjan', ('1', '1')),
TZ('America/Argentina/Buenos_Aires', ('2', '6', '1')),
TZ('Antarctica/Casey', ('3', 'q', '1')),
TZ('Arctic/Longyearbyen', ('4', 'q', '1')),
TZ('Asia/Aden', ('5', '1')),
TZ('Atlantic/Azores', ('6', 'q', '1')),
TZ('Australia/Adelaide', ('7', 'q', '1')),
TZ('Europe/Amsterdam', ('8', '1')),
TZ('Indian/Antananarivo', ('9', 'q', '1')),
TZ('Pacific/Apia', ('10', '1')),
TZ('UTC', ('11',))
]
@pytest.mark.smoke
def test_black_console(appliance):
"""'ap | tee /tmp/opt.txt)' saves stdout to file, 'ap' launch appliance_console."""
command_set = ('ap | tee -a /tmp/opt.txt', 'ap')
appliance.appliance_console.run_commands(command_set)
assert appliance.ssh_client.run_command("cat /tmp/opt.txt | grep '{} Virtual Appliance'"
.format(appliance.product_name))
assert appliance.ssh_client.run_command("cat /tmp/opt.txt | grep '{} Database:'"
.format(appliance.product_name))
assert appliance.ssh_client.run_command("cat /tmp/opt.txt | grep '{} Version:'"
.format(appliance.product_name))
def test_black_console_set_hostname(appliance):
"""'ap' launch appliance_console, '' clear info screen, '1' loads network settings, '4/5' gives
access to set hostname, 'hostname' sets new hostname."""
hostname = 'test.example.com'
opt = ('1', '5') if appliance.version >= "5.8" else ('4',)
command_set = ('ap', '',) + opt + (hostname,)
appliance.appliance_console.run_commands(command_set)
def is_hostname_set(appliance):
assert appliance.ssh_client.run_command("hostname -f | grep {hostname}"
.format(hostname=hostname))
wait_for(is_hostname_set, func_args=[appliance])
return_code, output = appliance.ssh_client.run_command("hostname -f")
assert output.strip() == hostname
assert return_code == 0
@pytest.mark.parametrize('timezone', tzs, ids=[tz.name for tz in tzs])
def test_black_console_set_timezone(timezone, temp_appliance_preconfig_modscope):
"""'ap' launch appliance_console, '' clear info screen, '2/5' set timezone, 'opt' select
region, 'timezone' selects zone, 'y' confirm slection, '' finish."""
opt = '2' if temp_appliance_preconfig_modscope.version >= "5.8" else '5'
command_set = ('ap', '', opt) + timezone[1] + ('y', '')
temp_appliance_preconfig_modscope.appliance_console.run_commands(command_set)
temp_appliance_preconfig_modscope.appliance_console.timezone_check(timezone)
def test_black_console_internal_db(app_creds, temp_appliance_unconfig_funcscope):
"""'ap' launch appliance_console, '' clear info screen, '5/8' setup db, '1' Creates v2_key,
'1' selects internal db, 'y' continue, '1' use partition, 'n' don't create dedicated db, '0'
db region number, 'pwd' db password, 'pwd' confirm db password + wait 360 secs and '' finish."""
pwd = app_creds['password']
opt = '5' if temp_appliance_unconfig_funcscope.version >= "5.8" else '8'
command_set = ('ap', '', opt, '1', '1', 'y', '1', 'n', '0', pwd, TimedCommand(pwd, 360), '')
temp_appliance_unconfig_funcscope.appliance_console.run_commands(command_set)
temp_appliance_unconfig_funcscope.wait_for_evm_service()
temp_appliance_unconfig_funcscope.wait_for_web_ui()
def test_black_console_internal_db_reset(temp_appliance_preconfig_funcscope):
"""'ap' launch appliance_console, '' clear info screen, '5/8' setup db, '4' reset db, 'y'
confirm db reset, '1' db region number + wait 360 secs, '' continue"""
opt = '5' if temp_appliance_preconfig_funcscope.version >= "5.8" else '8'
temp_appliance_preconfig_funcscope.ssh_client.run_command('systemctl stop evmserverd')
command_set = ('ap', '', opt, '4', 'y', TimedCommand('1', 360), '')
temp_appliance_preconfig_funcscope.appliance_console.run_commands(command_set)
temp_appliance_preconfig_funcscope.ssh_client.run_command('systemctl start evmserverd')
temp_appliance_preconfig_funcscope.wait_for_evm_service()
temp_appliance_preconfig_funcscope.wait_for_web_ui()
def test_black_console_dedicated_db(temp_appliance_unconfig_funcscope, app_creds):
"""'ap' launch appliance_console, '' clear info screen, '5/8' setup db, '1' Creates v2_key,
'1' selects internal db, 'y' continue, '1' use partition, 'y' create dedicated db, 'pwd'
db password, 'pwd' confirm db password + wait 360 secs and '' finish."""
pwd = app_creds['password']
opt = '5' if temp_appliance_unconfig_funcscope.version >= "5.8" else '8'
command_set = ('ap', '', opt, '1', '1', 'y', '1', 'y', pwd, TimedCommand(pwd, 360), '')
temp_appliance_unconfig_funcscope.appliance_console.run_commands(command_set)
wait_for(lambda: temp_appliance_unconfig_funcscope.db.is_dedicated_active)
def test_black_console_external_db(temp_appliance_unconfig_funcscope, app_creds, appliance):
"""'ap' launch appliance_console, '' clear info screen, '5/8' setup db, '2' fetch v2_key,
'ip' address to fetch from, '' default username, 'pwd' db password, '' default v2_key location,
'3' join external region, 'port' ip and port of joining region, '' use defult db name, ''
default username, 'pwd' db password, 'pwd' confirm db password + wait 360 secs and '' finish."""
ip = appliance.hostname
pwd = app_creds['password']
opt = '5' if temp_appliance_unconfig_funcscope.version >= "5.8" else '8'
port = (ip, '') if temp_appliance_unconfig_funcscope.version >= "5.8" else (ip,)
command_set = ('ap', '', opt, '2', ip, '', pwd, '', '3') + port + ('', '',
pwd, TimedCommand(pwd, 360), '')
temp_appliance_unconfig_funcscope.appliance_console.run_commands(command_set)
temp_appliance_unconfig_funcscope.wait_for_evm_service()
temp_appliance_unconfig_funcscope.wait_for_web_ui()
def test_black_console_external_db_create(app_creds, dedicated_db_appliance,
temp_appliance_unconfig_funcscope):
"""'ap' launch appliance_console, '' clear info screen, '5/8' setup db, '1' create v2_key,
'2' create region in external db, '0' db region number, 'y' confirm create region in external db
'port' ip and port for dedicated db, '' use defult db name, '' default username, 'pwd' db
password, 'pwd' confirm db password + wait 360 secs and '' finish."""
ip = dedicated_db_appliance.hostname
pwd = app_creds['password']
opt = '5' if temp_appliance_unconfig_funcscope.version >= "5.8" else '8'
port = (ip, '') if temp_appliance_unconfig_funcscope.version >= "5.8" else (ip,)
command_set = ('ap', '', opt, '1', '2', '0', 'y') + port + ('', '', pwd,
TimedCommand(pwd, 360), '')
temp_appliance_unconfig_funcscope.appliance_console.run_commands(command_set)
temp_appliance_unconfig_funcscope.wait_for_evm_service()
temp_appliance_unconfig_funcscope.wait_for_web_ui()
def test_black_console_extend_storage(unconfigured_appliance):
"""'ap' launches appliance_console, '' clears info screen, '10/13' extend storage, '1' select
disk, 'y' confirm configuration and '' complete."""
opt = '10' if unconfigured_appliance.version >= "5.8" else '13'
command_set = ('ap', '', opt, '1', 'y', '')
unconfigured_appliance.appliance_console.run_commands(command_set)
def is_storage_extended(unconfigured_appliance):
assert unconfigured_appliance.ssh_client.run_command("df -h | grep /var/www/miq_tmp")
wait_for(is_storage_extended, func_args=[unconfigured_appliance])
@pytest.mark.skip('No IPA servers currently available')
def test_black_console_ipa(ipa_creds, configured_appliance):
"""'ap' launches appliance_console, '' clears info screen, '11/14' setup IPA, 'y' confirm setup
+ wait 40 secs and '' finish."""
opt = '11' if configured_appliance.version >= "5.8" else '14'
command_set = ('ap', '', opt, ipa_creds['hostname'], ipa_creds['domain'], '',
ipa_creds['username'], ipa_creds['password'], TimedCommand('y', 40), '')
configured_appliance.appliance_console.run_commands(command_set)
def is_sssd_running(configured_appliance):
assert configured_appliance.ssh_client.run_command("systemctl status sssd | grep running")
wait_for(is_sssd_running, func_args=[configured_appliance])
return_code, output = configured_appliance.ssh_client.run_command(
"cat /etc/ipa/default.conf | grep 'enable_ra = True'")
assert return_code == 0
@pytest.mark.skip('No IPA servers currently available')
@pytest.mark.parametrize('auth_type', [
LoginOption('sso', 'sso_enabled', '1'),
LoginOption('saml', 'saml_enabled', '2'),
LoginOption('local_login', 'local_login_disabled', '3')
], ids=['sso', 'saml', 'local_login'])
def test_black_console_external_auth(auth_type, app_creds, ipa_crud):
"""'ap' launches appliance_console, '' clears info screen, '12/15' change ext auth options,
'auth_type' auth type to change, '4' apply changes."""
evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
matched_patterns=['.*{} to true.*'.format(auth_type.option)],
hostname=ipa_crud.hostname,
username=app_creds['sshlogin'],
password=app_creds['password'])
evm_tail.fix_before_start()
opt = '12' if ipa_crud.version >= "5.8" else '15'
command_set = ('ap', '', opt, auth_type.index, '4')
ipa_crud.appliance_console.run_commands(command_set)
evm_tail.validate_logs()
evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
matched_patterns=['.*{} to false.*'.format(auth_type.option)],
hostname=ipa_crud.hostname,
username=app_creds['sshlogin'],
password=app_creds['password'])
evm_tail.fix_before_start()
opt = '12' if ipa_crud.version >= "5.8" else '15'
command_set = ('ap', '', opt, auth_type.index, '4')
ipa_crud.appliance_console.run_commands(command_set)
evm_tail.validate_logs()
@pytest.mark.skip('No IPA servers currently available')
def test_black_console_external_auth_all(app_creds, ipa_crud):
"""'ap' launches appliance_console, '' clears info screen, '12/15' change ext auth options,
'auth_type' auth type to change, '4' apply changes."""
evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
matched_patterns=['.*sso_enabled to true.*', '.*saml_enabled to true.*',
'.*local_login_disabled to true.*'],
hostname=ipa_crud.hostname,
username=app_creds['sshlogin'],
password=app_creds['password'])
evm_tail.fix_before_start()
opt = '12' if ipa_crud.version >= "5.8" else '15'
command_set = ('ap', '', opt, '1', '2', '3', '4')
ipa_crud.appliance_console.run_commands(command_set)
evm_tail.validate_logs()
evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
matched_patterns=['.*sso_enabled to false.*',
'.*saml_enabled to false.*', '.*local_login_disabled to false.*'],
hostname=ipa_crud.hostname,
username=app_creds['sshlogin'],
password=app_creds['password'])
evm_tail.fix_before_start()
opt = '12' if ipa_crud.version >= "5.8" else '15'
command_set = ('ap', '', opt, '1', '2', '3', '4')
ipa_crud.appliance_console.run_commands(command_set)
evm_tail.validate_logs()
def test_black_console_scap(temp_appliance_preconfig, soft_assert):
"""'ap' launches appliance_console, '' clears info screen, '14/17' Hardens appliance using SCAP
configuration, '' complete."""
opt = '14' if temp_appliance_preconfig.version >= "5.8" else '17'
command_set = ('ap', '', opt, '')
temp_appliance_preconfig.appliance_console.run_commands(command_set)
with tempfile.NamedTemporaryFile('w') as f:
f.write(hidden['scap.rb'])
f.flush()
os.fsync(f.fileno())
temp_appliance_preconfig.ssh_client.put_file(
f.name, '/tmp/scap.rb')
if temp_appliance_preconfig.version >= "5.8":
rules = '/var/www/miq/vmdb/productization/appliance_console/config/scap_rules.yml'
else:
rules = '/var/www/miq/vmdb/gems/pending/appliance_console/config/scap_rules.yml'
temp_appliance_preconfig.ssh_client.run_command('cd /tmp/ && ruby scap.rb '
'--rulesfile={rules}'.format(rules=rules))
temp_appliance_preconfig.ssh_client.get_file(
'/tmp/scap-results.xccdf.xml', '/tmp/scap-results.xccdf.xml')
temp_appliance_preconfig.ssh_client.get_file(
'{rules}'.format(rules=rules), '/tmp/scap_rules.yml') # Get the scap rules
with open('/tmp/scap_rules.yml') as f:
yml = yaml.load(f.read())
rules = yml['rules']
tree = lxml.etree.parse('/tmp/scap-results.xccdf.xml')
root = tree.getroot()
for rule in rules:
elements = root.findall(
'.//{{http://checklists.nist.gov/xccdf/1.1}}rule-result[@idref="{}"]'.format(rule))
if elements:
result = elements[0].findall('./{http://checklists.nist.gov/xccdf/1.1}result')
if result:
soft_assert(result[0].text == 'pass')
logger.info("{}: {}".format(rule, result[0].text))
else:
logger.info("{}: no result".format(rule))
else:
logger.info("{}: rule not found".format(rule))
|
gpl-2.0
| -3,463,984,314,061,797,400
| 48.547368
| 100
| 0.629913
| false
| 3.364546
| true
| false
| false
|
MLR-au/esrc-cnex
|
service/app/config.py
|
1
|
3159
|
import os
import sys
import os.path
import ConfigParser
import collections
import traceback
import ast
from pyramid.httpexceptions import HTTPBadRequest
import logging
log = logging.getLogger(__name__)
class ConfigBase:
def __init__(self):
pass
def get(self, section, param, aslist=False):
data = self.cfg.get(section, param) if (self.cfg.has_section(section) and self.cfg.has_option(section, param)) else None
if data == None:
log.error("Missing parameter %s in section %s" % (param, section))
if aslist:
return [ d.strip() for d in data.split(',') ]
return data
class Config(ConfigBase):
def __init__(self, conf):
"""
Expects to be called with a pyramid request object.
The path to the configs will be extracted from the pyramid
configuration and a config object will be returned.
The params from the config will be available as instance
variables.
@params:
request: a pyramid request object
"""
self.cfg = ConfigParser.SafeConfigParser()
try:
self.cfg.read(conf)
except ConfigParser.ParsingError:
log.error('Config file parsing errors')
log.error(sys.exc_info()[1])
sys.exit()
self.app_config = {
'general': {
'token': self.get('GENERAL', 'token'),
'data_age': self.get('GENERAL', 'data_age'),
'sites': self.get('GENERAL', 'sites'),
'disable_auth': ast.literal_eval(self.get('GENERAL', 'disable_auth')),
'share_path': self.get('GENERAL', 'share_path'),
'share_url': self.get('GENERAL', 'share_url'),
},
'mongodb': {
'nodes': self.get('MONGODB', 'nodes', aslist=True),
'user': self.get('MONGODB', 'user'),
'pass': self.get('MONGODB', 'pass'),
'db': self.get('MONGODB', 'db'),
'replica_set': self.get('MONGODB', 'replica.set'),
'write_concern': self.get('MONGODB', 'write.concern')
}
}
class SiteConfig(ConfigBase):
def __init__(self, conf):
self.cfg = ConfigParser.SafeConfigParser()
try:
self.cfg.read(conf)
except ConfigParser.ParsingError:
log.error('Config file parsing errors')
log.error(sys.exc_info()[1])
sys.exit()
def load(self, site):
conf = {}
conf['code'] = self.get('GENERAL', 'code')
conf['name'] = self.get('GENERAL', 'name')
conf['url'] = self.get('GENERAL', 'url')
conf['eac'] = self.get('GENERAL', 'eac')
datamap = self.get('GENERAL', 'map', aslist=True)
conf['map'] = {}
conf['map']['source'] = datamap[0]
conf['map']['localpath'] = datamap[1]
conf['public'] = ast.literal_eval(self.get('GENERAL', 'public'))
conf['allow_groups'] = self.get('GENERAL', 'allow_groups', aslist=True)
conf['allow_users'] = self.get('GENERAL', 'allow_users', aslist=True)
return conf
|
bsd-3-clause
| -5,075,897,506,453,778,000
| 32.606383
| 128
| 0.552073
| false
| 3.919355
| true
| false
| false
|
wagnerpeer/gitexplorer
|
gitexplorer/visualizations/punchcard.py
|
1
|
3117
|
'''
Created on 28.08.2017
@author: Peer
'''
from collections import defaultdict
import datetime
from itertools import chain
import matplotlib.pyplot as plt
from gitexplorer.basics import GitExplorerBase
def draw_punchcard(infos,
xaxis_range=24,
yaxis_range=7,
xaxis_ticks=range(24),
yaxis_ticks=['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'],
xaxis_label='Hour',
yaxis_label='Day'):
# build the array which contains the values
data = [[0.0] * xaxis_range for _ in range(yaxis_range)]
for key, value in infos.items():
data[key[0]][key[1]] = value
max_value = float(max(chain.from_iterable(data)))
# Draw the punchcard (create one circle per element)
# Ugly normalisation allows to obtain perfect circles instead of ovals....
for x in range(xaxis_range):
for y in range(yaxis_range):
circle = plt.Circle((x, y),
data[y][x] / 2 / max_value)
plt.gca().add_artist(circle)
plt.xlim(0, xaxis_range)
plt.ylim(0, yaxis_range)
plt.xticks(range(xaxis_range), xaxis_ticks)
plt.yticks(range(yaxis_range), yaxis_ticks)
plt.xlabel(xaxis_label)
plt.ylabel(yaxis_label)
plt.gca().invert_yaxis()
# make sure the axes are equal, and resize the canvas to fit the plot
plt.axis('scaled')
margin = 0.7
plt.axis([-margin, 23 + margin, 6 + margin, -margin])
scale = 0.5
plt.gcf().set_size_inches(xaxis_range * scale, yaxis_range * scale, forward=True)
plt.tight_layout()
def collect_data(commits):
'''
'''
information = defaultdict(int)
for commit in commits:
information[(commit['date'].isoweekday() - 1, commit['date'].hour)] += 1
return information
def find_commits(reference_day=datetime.datetime.today(),
days_before_reference=30,
number_of_commits=None):
'''Load commits from database meeting certain conditions.
Parameters
----------
days_before_reference: int (>=0), optional
Limit commits to number of days before reference_day
number_of_commits: int (>=0), optional
Limit the number of commits. If given it takes precedence before days_before_today.
Returns
-------
Documents meeting criteria defined through parameters
'''
criteria = {}
if(number_of_commits is None):
datetime_limit = reference_day - datetime.timedelta(days=days_before_reference)
criteria = {'date': {'$lte': reference_day, '$gte': datetime_limit}}
gitexplorer_database = GitExplorerBase.get_gitexplorer_database()
cursor = gitexplorer_database['commit_collection'].find(criteria)
if(number_of_commits is not None):
cursor = cursor.limit(number_of_commits)
return cursor
if(__name__ == '__main__'):
infos = collect_data(find_commits(days_before_reference=90,
number_of_commits=None))
draw_punchcard(infos)
plt.show()
|
mit
| 8,640,303,030,174,708,000
| 27.861111
| 110
| 0.616619
| false
| 3.769045
| false
| false
| false
|
jaked122/MUF-highlighting
|
MufLoad.py
|
1
|
17718
|
#!/bin/python3
from telnetlib import Telnet
import re
from hashlib import sha512
from typing import *
from time import sleep
from os import stat, path
import yaml
import argparse
import datetime
prognameMatch = re.compile("\(\(\( filename: (.+) \)\)\)")
progDependencyMatch = re.compile("\(\(\( dependsOn: (.+) \)\)\)")
progIncludeMatch = re.compile(
'\(\(\( includes: (.+) as (\.\.[a-zA-Z0-9-]+) \)\)\)')
programFinder = "@find {}\n"
programFinderRegex = re.compile(b"(.+)([0-9]+):.+")
programFinderTerminator = re.compile(b'\*\*\*End of List\*\*\*')
ProgramId = re.compile(b"Program .+ created with number ([0-9]+)")
ProgramId2 = re.compile(
b'Entering editor for .+\(#([0-9]+).+\)\.')
# Command to list content of a program, showing line numbers
programListCommand = "@dlist {}\n"
programListMatch = re.compile(b"\s*([0-9]+):(.+)\r\n")
programListTerminator = re.compile(b"[0-9]+ lines displayed\.")
editorInsertExitMatch = [re.compile(b"Exiting insert mode\.")]
editorCompilerStringMatch = [re.compile(
b"Compiler done\."), re.compile(b"^Error in line")]
editorExitStringMatch = [re.compile(b"Editor exited\.")]
objectModifiedStringFieldMatch = \
[
re.compile(b"Modified: (.+) by (.+)$"),
re.compile(b"I don't see that there\.$")
]
objectModificationCommand = "ex {}\n"
functionListCommand = "@listfunc {}\n"
functionListRegex = re.compile("\x1b\[[^m]*m")
# Goals:
# Manage Dependencies:
# Upload changed files in necessary order
# Replacing special tokens with the correct program reference
# Send minimal line-by-line diff in format accepted by @edit
# and @prog
# Provide Server-Client code synchronization when requested
# (This will be hard to do properly in a very noisy server)
# Provide cleanup functionality for things that totally fuck up the system
# Current stuff that needs doing
# [x] Determine if file needs to be updated due to the program being modified
# since it was last retrieved.
# [ ] Better latency handling for the editor commands.
# a. expect/error loop until match is found
# b. Maybe the telnet class could do with a wrapper class
# for handling this automatically.
# 3.
class SyncException(Exception):
def __init__(self, filename, remoteid):
super(Exception, self).__init__(filename, remoteid)
self.message = "The object with id {} associated with {}".\
format(remoteid, filename) + \
" could not be found"
def because_I_cant_understand_strptime(s: str):
months = {
"Jan": 1,
"Feb": 2,
"Mar": 3,
"Apr": 4,
"May": 5,
"Jun": 6,
"Jul": 7,
"Aug": 8,
"Sep": 9,
"Oct": 10,
"Nov": 11,
"Dec": 12
}
m = re.compile("(Sat|Sun|Mon|Tue|Wed|Thu|Fri) " +
"(Jan|Feb|Mar|Apr|May|Jun|Jul" +
"|Aug|Sep|Oct|Nov|Dec) " +
"([123 ][0-9]) " +
"([012 ][0-9]):" +
"([0-5][0-9]):" +
"([0-5][0-9]) " +
"(CST|CDT) " +
"([0-9]+)").match(s)
month = months[m.group(2)]
monthday = int(m.group(3))
hour = int(m.group(4))
minute = int(m.group(5))
second = int(m.group(6))
year = int(m.group(7))
dt = datetime.datetime(year, month, day, hour, minute, second)
return dt
class MufFile():
def __init__(self, filename, depth=0, parent=None, send_method="name", id=None,
regname=None):
self.dependencies = []
self.transformedname = ""
self.filename = filename
self.hash = sha512()
self.length = 0
self.parent = parent
self.includes = {}
self.id = id
self.regname = regname
self.send_method = send_method
with open(filename) as file:
for z in file.readlines():
pnmatch = prognameMatch.match(z)
if pnmatch is not None:
self.transformedname = pnmatch.group(1)
continue
pdepmatch = progDependencyMatch.match(z)
if pdepmatch is not None:
self.dependencies.append(pdepmatch.group(1))
continue
pincMatch = progIncludeMatch.match(z)
if pincMatch is not None:
self.includes[pincMatch.group(2)] = pincMatch.group(1)
self.hash.update(z.encode())
self.length += 1
self.hash = self.hash.hexdigest()
def send(self, tc: Telnet):
let_be = False
while True:
if self.send_method == "name":
tc.write("@prog {}\n".format(self.transformedname).encode())
elif self.send_method == "id":
tc.write("@prog {}\n".format(self.id).encode())
elif self.send_method == "regname":
print("Using regname:{0}".format(self.regname))
tc.write("@prog {}\n".format(self.regname).encode())
mindex, match, _ = tc.expect([ProgramId, ProgramId2], timeout=3)
if match is not None:
self.id = int(match.group(1))
break
tc.write("1 {} delete\n".format(self.length * 10).encode())
tc.write("i\n".encode())
counter = 0
with open(self.filename) as fi:
lines = fi.readlines()
if len(lines[-1]) > 0:
lines.append('')
for i in lines:
tc.write("{}".format(i).encode())
# sleep(0.05)
counter += 1
print("{: =4.2}%".format(100 * counter / len(lines)),
end='\r', flush=True)
print("\n", end="", flush=True)
print("finished sending")
while True:
tc.write('.\n'.encode())
index, m, _ = tc.expect(editorInsertExitMatch,
timeout=5)
if m is not None:
break
print("compiling program")
while True:
tc.write("c\n".encode())
index, m, line = tc.expect(editorCompilerStringMatch,
timeout=7)
if index != None and index != 1:
print("Message Recieved")
print(line.decode("ascii"))
let_be = True
if m is not None:
break
print("quitting")
while True:
if let_be:
tc.write("q\n".encode())
else:
tc.write("x\n".encode())
index, m, _ = tc.expect(editorExitStringMatch,
timeout=7)
if m is not None:
break
@staticmethod
def check_last_modified(filename, remoteid, tc: Telnet):
tc.send(objectModificationCommand.format(remoteid).encode())
idx, match, _ = tc.expect(objectModifiedStringFieldMatch)
if idx == 1:
raise SyncException(filename, remoteid)
# mod_date = datetime.datetime.strptime(match.group(1),
# "%a %b %d %H:%M:%S %Z %Y")
mod_date = because_I_cant_understand_strptime(match.group(1))
local_stuff = path.getmtime(filename)
return mod_date >= local_stuff
@staticmethod
def sync(filename, remoteid, tc: Telnet):
tc.read_very_eager()
# tc.write(b"@set me=H\n")
# tc.write(b"pub #alloff\n")
sleep(2)
tc.read_very_eager()
tc.write(programListCommand.format(remoteid).encode())
print(programListCommand.format(remoteid))
with open(filename, 'w') as output:
lines = tc.read_until(b" lines displayed.").decode().split('\r\n')
for i in lines[:-1]:
if i[0:4] == "LOG>":
continue
if i[0:5] == "PROG>":
i = i[5:]
else:
continue
output.write(i + '\n')
# tc.write(b"@set me=!H\n")
# tc.write(b"pub #allon\n")
tc.read_very_eager()
# mindex = 0
# while mindex < 1:
# mindex, match, _ = tc.expect([programListMatch,
# programListTerminator])
# if mindex >= 1 \
# or match is None:
# break
# output.write(match.group(2).decode()+'\n')
# Keep track of whether or not files are up to date on the server.
class Cache():
def __init__(self, path):
import pickle
self.newfiles = {}
self.oldfiles = {}
try:
self = pickle.load(path + ".cache")
except IOError:
# probably doesn't exist
pass
def addFile(self, file: MufFile):
fname = file.filename
if fname in self.oldfiles.keys():
if self.newfiles[fname].hash != file.hash:
self.oldfiles[fname] = self.newfiles[fname]
self.newfiles[fname] = file
def syncOld(self, file: MufFile, tc: Telnet):
tc.write(programFinder.format(file.filename))
mindex, match, _ = tc.expect([programFinderRegex,
programFinderTerminator])
fn = None
while match is not None and mindex != 1:
if match.group(1) == file.transformedname:
fn = match.group(1)
break
else:
mindex, match, _ = tc.expect([programFinderRegex,
programFinderTerminator])
tc.write(programListCommand.format(fn))
mindex = 0
lines = []
lastindex = 0
while mindex != 1:
mindex, match, _ = tc.expect([programListMatch,
programListTerminator])
if mindex != 1:
if int(math.group(2)) != lastindex + 1:
print("Hmm. There might be a problem.")
else:
lastindex = int(match.group(1))
lines.append(match.group(2))
class DepGraph():
def __init__(self):
self.nodes = {}
self.edges = {}
self.depths = {}
self.validstarts = set()
def addFile(self, file: MufFile, depth=0):
self.nodes[file.filename] = file
if file.filename not in self.edges.keys():
self.edges[file.filename] = set()
self.depths[file.filename] = depth
if depth == 0:
self.validstarts.add(file.filename)
for fn in file.dependencies:
self.edges[file.filename].add(fn)
if fn not in self.nodes.keys():
self.addFile(MufFile(fn, depth=depth + 1), depth + 1)
def send(self, tc: Telnet):
stack = list()
path = []
sent = set()
for i in self.validstarts:
stack.append(i)
while len(stack) > 0:
cn = stack.pop()
if cn not in path:
path.append(cn)
else:
continue
for n in self.edges[cn]:
path.append(n)
stack.append(n)
for n in reversed(path):
print("Updating program {}".format(n))
self.nodes[n].send(tc)
# TODO: Use a cache to check if the file needs to be uploaded again,
# I.E. it's hash has changed.
# TODO: define a macro on the copy sent to the server such that each
# program refers to the correct id at runtime.
# argInterpret = argparse.ArgumentParser()
# argInterpret.add_argument()
# tc = Telnet(host="localhost", port=2001)
# tc.write(b"connect one potrzebie\n")
# dg = DepGraph()
# dg.addFile(MufFile("Channel/Channel.muf"))
# dg.send(tc)
parser = argparse.ArgumentParser("Manage files on the MUCK")
parser.add_argument("--send", dest='files', action='append',
help='Files to send', default=[])
parser.add_argument('--sync', dest='sync', action='store_const',
help='Sync files?', const=True, default=False)
parser.add_argument('--force-sync', default=[],
dest='needsync',
action='append', help='Force a file to be synced')
parser.add_argument('--send-all', dest='send_all', action='store_const',
help='send all files', const=True, default=False)
parser.add_argument('--spaz', const=True,default=False,action='store_const')
parser.add_argument('--primary',const=True,default=False,action='store_const')
parser.add_argument('--host',default=[],action='append',dest='host')
args = parser.parse_args()
with open('project.yaml') as projfile:
project = yaml.load(projfile)
print(project)
project = project['project']
for conn in project['connections']:
conn=conn['connect']
if args.primary and \
(not 'primary' in conn.keys()):
continue
if len(args.host)>0\
and conn['host'] not in args.host:
continue
print(conn)
tc = Telnet(host=conn['host'],
port=int(conn['port']))
tc.read_some()
tc.write("connect {} {}\n".format(conn['username'],
conn['password']).encode())
print("connect {} {}".format(conn['username'],
conn['password']))
sleep(2)
if args.spaz:
while True:
tc.close()
tc = Telnet(host=project['connect']['host'],
port=int(project['connect']['port']))
tc.read_some()
tc.write("connect {} {}\n".format(project['connect']['username'],
project['connect']['password']).encode())
sleep(0.1)
tc.read_some()
if args.sync and conn['sync']:
for i in project['sync']:
if 'no_exist' in i['file'].keys() and i['file']['no_exist']:
try:
stat(i['file']['name'])
print('skipping {}'.format(i['file']['name']))
continue
except FileNotFoundError:
print('need to get {}'.format(i['file']['name']))
MufFile.sync(i['file']['name'], i['file']['id'], tc)
for i in project['sync']:
if i['file']['name'] in args.needsync \
and 'sync' in conn.keys()\
and (not args.primary or\
args.primary and 'primary' in conn.keys()):
MufFile.sync(i['file']['name'], i['file']['id'], tc)
if args.send_all:
for i in project['send']:
f = None
should_send=True
if 'send_method' in i['file'].keys():
id = None
regname = None
print("Send method:" + i['file']['send_method'])
if 'id' in i['file'].keys():
id = i['file']['id']
if '#' in id and 'primary' not in conn.keys():
should_send=False
if 'regname' in i['file'].keys():
regname = i['file']['regname']
f = MufFile(i['file']['name'],
send_method=i['file']['send_method'],
id=id, regname=regname)
else:
print("No send method found")
f = MufFile(i['file']['name'])
f.transformedname = i['file']['gamename']
if '#' in i['file']['gamename'] and 'primary' not in conn.keys():
should_send=False
if not should_send:
print('File ',f.transformedname,' is not encoded using an cross game manner')
continue
print("Sending " + f.transformedname)
f.send(tc)
sleep(1)
print("\a")
else:
for i in project['send']:
if i['file']['name'] not in args.files:
continue
send_with_id = False
should_send = True
f = None
if 'send_method' in i['file'].keys():
id = None
regname = None
print("Send method:" + i['file']['send_method'])
if 'id' in i['file'].keys():
id = i['file']['id']
if '#' in id and 'primary' not in conn.keys():
should_send=False
if 'regname' in i['file'].keys():
regname = i['file']['regname']
f = MufFile(i['file']['name'],
send_method=i['file']['send_method'],
id=id, regname=regname)
else:
f = MufFile(i['file']['name'])
f.transformedname = i['file']['gamename']
if '#' in f.transformedname and 'primary' not in conn.keys():
should_send=False
if not should_send:
print(f.transformed_name, " is not appropriately kept with game independent identification. Skipping")
continue
print("Sending " + f.transformedname)
f.send(tc)
sleep(1)
tc.close()
|
mit
| -9,212,332,200,114,590,000
| 38.112583
| 122
| 0.4943
| false
| 4.041515
| false
| false
| false
|
ablifedev/ABLIRC
|
ABLIRC/install/external_lib/HTSeq/__init__.py
|
1
|
39255
|
"""HTSeq is a package to process high-throughput sequencing data.
See http://www-huber.embl.de/users/anders/HTSeq for documentation.
"""
import itertools, warnings, os, shlex
try:
from _HTSeq import *
except ImportError:
if os.path.isfile( "setup.py" ):
raise ImportError( "Cannot import 'HTSeq' when working directory is HTSeq's own build directory.")
else:
raise
from _version import __version__
#from vcf_reader import *
#########################
## Utils
#########################
class FileOrSequence( object ):
""" The construcutor takes one argument, which may either be a string,
which is interpreted as a file name (possibly with path), or a
connection, by which we mean a text file opened for reading, or
any other object that can provide an iterator over strings
(lines of the file).
The advantage of passing a file name instead of an already opened file
is that if an iterator is requested several times, the file will be
re-opened each time. If the file is already open, its lines can be read
only once, and then, the iterator stays exhausted.
Furthermore, if a file name is passed that end in ".gz" or ".gzip"
(case insensitive), it is transparently gunzipped.
"""
def __init__( self, filename_or_sequence ):
self.fos = filename_or_sequence
self.line_no = None
def __iter__( self ):
self.line_no = 1
if isinstance( self.fos, str ):
if self.fos.lower().endswith( ( ".gz" , ".gzip" ) ):
lines = gzip.open( self.fos )
else:
lines = open( self.fos )
else:
lines = self.fos
for line in lines:
yield line
self.line_no += 1
if isinstance( self.fos, str ):
lines.close()
self.line_no = None
def __repr__( self ):
if isinstance( self.fos, str ):
return "<%s object, connected to file name '%s'>" % (
self.__class__.__name__, self.fos )
else:
return "<%s object, connected to %s >" % (
self.__class__.__name__, repr( self.fos ) )
def get_line_number_string( self ):
if self.line_no is None:
if isinstance( self.fos, str ):
return "file %s closed" % self.fos
else:
return "file closed"
if isinstance( self.fos, str ):
return "line %d of file %s" % ( self.line_no, self.fos )
else:
return "line %d" % self.line_no
#########################
## Features
#########################
class GenomicFeature( object ):
"""A genomic feature, i.e., an interval on a genome with metadata.
At minimum, the following information should be provided by slots:
name: a string identifying the feature (e.g., a gene symbol)
type: a string giving the feature type (e.g., "gene", "exon")
iv: a GenomicInterval object specifying the feature locus
"""
def __init__( self, name, type_, interval ):
self.name = name
self.type = intern( type_ )
self.iv = interval
def __repr__( self ):
return "<%s: %s '%s' at %s: %d -> %d (strand '%s')>" % \
( self.__class__.__name__, self.type, self.name,
self.iv.chrom, self.iv.start_d, self.iv.end_d, self.iv.strand )
def __eq__( self, other ):
if not isinstance( other, GenomicFeature ):
return False
return self.name == other.name and self.type == other.type and \
self.iv == other.iv
def __neq__( self, other ):
if not isinstance( other, GenomicFeature ):
return True
return not self.__eq__( other )
def get_gff_line( self, with_equal_sign=False ):
try:
source = self.source
except AttributeError:
source = "."
try:
score = self.score
except AttributeError:
score = "."
try:
frame = self.frame
except AttributeError:
frame = "."
try:
attr = self.attr
except AttributeError:
attr = { 'ID': self.name }
if with_equal_sign:
sep = "="
else:
sep = " "
attr_str = '; '.join( [ '%s%s\"%s\"' % ( ak, sep, attr[ak] ) for ak in attr ] )
return "\t".join( str(a) for a in ( self.iv.chrom, source,
self.type, self.iv.start+1, self.iv.end, score,
self.iv.strand, frame, attr_str ) ) + "\n"
_re_attr_main = re.compile( "\s*([^\s\=]+)[\s=]+(.*)" )
_re_attr_empty = re.compile( "^\s*$" )
def parse_GFF_attribute_string( attrStr, extra_return_first_value=False ):
"""Parses a GFF attribute string and returns it as a dictionary.
If 'extra_return_first_value' is set, a pair is returned: the dictionary
and the value of the first attribute. This might be useful if this is the ID.
"""
if attrStr.endswith( "\n" ):
attrStr = attrStr[:-1]
d = {}
first_val = "_unnamed_"
for (i, attr) in itertools.izip( itertools.count(), _HTSeq.quotesafe_split( attrStr ) ):
if _re_attr_empty.match( attr ):
continue
if attr.count( '"' ) not in ( 0, 2 ):
raise ValueError, "The attribute string seems to contain mismatched quotes."
mo = _re_attr_main.match( attr )
if not mo:
raise ValueError, "Failure parsing GFF attribute line"
val = mo.group(2)
if val.startswith( '"' ) and val.endswith( '"' ):
val = val[1:-1]
#val = urllib.unquote( val )
d[ intern(mo.group(1)) ] = intern(val)
if extra_return_first_value and i == 0:
first_val = val
if extra_return_first_value:
return ( d, first_val )
else:
return d
_re_gff_meta_comment = re.compile( "##\s*(\S+)\s+(\S*)" )
class GFF_Reader( FileOrSequence ):
"""Parse a GFF file
Pass the constructor either a file name or an iterator of lines of a
GFF files. If a file name is specified, it may refer to a gzip compressed
file.
Iterating over the object then yields GenomicFeature objects.
"""
def __init__( self, filename_or_sequence, end_included=True ):
FileOrSequence.__init__( self, filename_or_sequence )
self.end_included = end_included
self.metadata = {}
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
if line == "\n":
continue
if line.startswith( '#' ):
if line.startswith( "##" ):
mo = _re_gff_meta_comment.match( line )
if mo:
self.metadata[ mo.group(1) ] = mo.group(2)
continue
( seqname, source, feature, start, end, score,
strand, frame, attributeStr ) = line.split( "\t", 8 )
( attr, name ) = parse_GFF_attribute_string( attributeStr, True )
if self.end_included:
iv = GenomicInterval( seqname, int(start)-1, int(end), strand )
else:
iv = GenomicInterval( seqname, int(start)-1, int(end)-1, strand )
f = GenomicFeature( name, feature, iv )
if score != ".":
score = float( score )
if frame != ".":
frame = int( frame )
f.source = source
f.score = score
f.frame = frame
f.attr = attr
yield f
def make_feature_dict( feature_sequence ):
"""A feature dict is a convenient way to organize a sequence of Feature
object (which you have got, e.g., from parse_GFF).
The function returns a dict with all the feature types as keys. Each value
of this dict is again a dict, now of feature names. The values of this dict
is a list of feature.
An example makes this clear. Let's say you load the C. elegans GTF file
from Ensemble and make a feature dict:
>>> worm_features_dict = HTSeq.make_feature_dict( HTSeq.parse_GFF(
... "test_data/Caenorhabditis_elegans.WS200.55.gtf.gz" ) )
(This command may take a few minutes to deal with the 430,000 features
in the GTF file. Note that you may need a lot of RAM if you have millions
of features.)
Then, you can simply access, say, exon 0 of gene "F08E10.4" as follows:
>>> worm_features_dict[ 'exon' ][ 'F08E10.4' ][ 0 ]
<GenomicFeature: exon 'F08E10.4' at V: 17479353 -> 17479001 (strand '-')>
"""
res = {}
for f in feature_sequence:
if f.type not in res:
res[ f.type ] = {}
res_ftype = res[ f.type ]
if f.name not in res_ftype:
res_ftype[ f.name ] = [ f ]
else:
res_ftype[ f.name ].append( f )
return res
#########################
## GenomicArray
#########################
def read_chrom_lens( filename, delimiter="\t" ):
return dict( ( ( chrom, int(len) )
for chrom, len in csv.reader( open(filename), delimiter=delimiter ) ) )
#########################
## Sequence readers
#########################
_re_fasta_header_line = re.compile( r'>\s*(\S+)\s*(.*)' )
class FastaReader( FileOrSequence ):
"""A Fasta_Reader is associated with a FASTA file or an open connection
to a file-like object with content in FASTA format.
It can generate an iterator over the sequences.
"""
def __iter__( self ):
seq = None
for line in FileOrSequence.__iter__( self ):
if line.startswith( ">" ):
if seq:
s = Sequence( seq, name )
s.descr = descr
yield s
mo = _re_fasta_header_line.match( line )
name = mo.group(1)
descr = mo.group(2)
seq = ""
else:
assert seq is not None, "FASTA file does not start with '>'."
seq += line[:-1]
if seq is not None:
s = Sequence( seq, name )
s.descr = descr
yield s
def get_sequence_lengths( self ):
seqname = None
seqlengths = {}
for line in FileOrSequence.__iter__( self ):
if line.startswith( ">" ):
if seqname is not None:
seqlengths[ seqname ] = length
mo = _re_fasta_header_line.match( line )
seqname = mo.group(1)
length = 0
else:
assert seqname is not None, "FASTA file does not start with '>'."
length += len( line.rstrip() )
if seqname is not None:
seqlengths[ seqname ] = length
return seqlengths
@staticmethod
def _import_pysam():
global pysam
try:
import pysam
except ImportError:
sys.stderr.write( "Please install the 'pysam' package to be able to use the Fasta indexing functionality." )
raise
def build_index( self, force = False ):
self._import_pysam()
if not isinstance( self.fos, str ):
raise TypeError, "This function only works with FastaReader objects " + \
"connected to a fasta file via file name"
index_filename = self.fos + ".fai"
if os.access( index_filename, os.R_OK ):
if (not force) and os.stat( self.filename_or_sequence ).st_mtime <= \
os.stat( index_filename ).st_mtime:
# index is up to date
return
pysam.faidx( self.fos )
if not os.access( index_filename, os.R_OK ):
raise SystemError, "Building of Fasta index failed due to unknown error."
def __getitem__( self, iv ):
if not isinstance( iv, GenomicInterval ):
raise TypeError, "GenomicInterval expected as key."
if not isinstance( self.fos, str ):
raise TypeError, "This function only works with FastaReader objects " + \
"connected to a fasta file via file name"
self._import_pysam()
fasta = pysam.faidx( self.fos, "%s:%d-%d" % ( iv.chrom, iv.start, iv.end-1 ) )
ans = list( FastaReader( fasta ) )
assert len( ans ) == 1
ans[0].name = str(iv)
if iv.strand != "-":
return ans[0]
else:
return ans[0].get_reverse_complement()
class FastqReader( FileOrSequence ):
"""A Fastq object is associated with a FASTQ self.file. When an iterator
is requested from the object, the FASTQ file is read.
qual_scale is one of "phred", "solexa", "solexa-old".
"""
def __init__( self, file_, qual_scale = "phred" ):
FileOrSequence.__init__( self, file_ )
self.qual_scale = qual_scale
if qual_scale not in ( "phred", "solexa", "solexa-old" ):
raise ValueError, "Illegal quality scale."
def __iter__( self ):
fin = FileOrSequence.__iter__( self )
while True:
id1 = fin.next()
seq = fin.next()
id2 = fin.next()
qual = fin.next()
if qual == "":
if id1 != "":
warnings.warn( "Number of lines in FASTQ file is not "
"a multiple of 4. Discarding the last, "
"incomplete record" )
break
if not qual.endswith( "\n" ):
qual += "\n"
if not id1.startswith( "@" ):
raise ValueError( "Primary ID line in FASTQ file does"
"not start with '@'. Either this is not FASTQ data or the parser got out of sync." )
if not id2.startswith( "+" ):
raise ValueError( "Secondary ID line in FASTQ file does"
"not start with '+'. Maybe got out of sync." )
if len( id2 ) > 2 and id1[1:] != id2[1:]:
raise ValueError( "Primary and secondary ID line in FASTQ"
"disagree." )
yield SequenceWithQualities( seq[:-1], id1[1:-1], qual[:-1],
self.qual_scale )
class BowtieReader( FileOrSequence ):
"""A BowtieFile object is associated with a Bowtie output file that
contains short read alignments. It can generate an iterator of Alignment
objects."""
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
try:
algnt = BowtieAlignment( line )
except ValueError:
if line.startswith( "Reported " ):
continue
warnings.warn( "BowtieReader: Ignoring the following line, which could not be parsed:\n%s\n" % line,
RuntimeWarning )
yield algnt
def bundle_multiple_alignments( sequence_of_alignments ):
"""Some alignment programs, e.g., Bowtie, can output multiple alignments,
i.e., the same read is reported consecutively with different alignments.
This function takes an iterator over alignments and bundles consecutive
alignments regarding the same read to a list of Alignment objects and
returns an iterator over these.
"""
alignment_iter = iter( sequence_of_alignments )
algnt = alignment_iter.next()
ma = [ algnt ]
for algnt in alignment_iter:
if algnt.read.name != ma[0].read.name:
yield ma
ma = [ algnt ]
else:
ma.append( algnt )
yield ma
class SolexaExportAlignment( Alignment ):
"""Iterating over SolexaExportReader objects will yield SoelxaExportRecord
objects. These have four fields:
read - a SequenceWithQualities object
aligned - a boolean, indicating whether the object was aligned
iv - a GenomicInterval giving the alignment (or None, if not aligned)
passed_filter - a boolean, indicating whether the object passed the filter
nomatch_code - a code indicating why no match was found (or None, if the
read was aligned)
As long as 'aligned' is True, a SolexaExportRecord can be treated as an
Alignment object.
"""
def __init__( self ):
# Data is filled in by SolexaExportRecord
pass
def __repr__( self ):
if self.aligned:
return "< %s object: Read '%s', aligned to %s >" % (
self.__class__.__name__, self.read.name, self.iv )
else:
return "< %s object: Non-aligned read '%s' >" % (
self.__class__.__name__, self.read.name )
class SolexaExportReader( FileOrSequence ):
"""Parser for *_export.txt files from the SolexaPipeline software.
Iterating over a SolexaExportReader yields SolexaExportRecord objects.
"""
def __init__( self, filename_or_sequence, solexa_old = False ):
FileOrSequence.__init__( self, filename_or_sequence)
if solexa_old:
self.qualscale = "solexa-old"
else:
self.qualscale = "solexa"
@classmethod
def parse_line_bare( dummy, line ):
if line[-1] == "\n":
line = line[:-1]
res = {}
( res['machine'], res['run_number'], res['lane'], res['tile'], res['x_coord'],
res['y_coord'], res['index_string'], res['read_nbr'], res['read_seq'],
res['qual_str'], res['chrom'], res['contig'], res['pos'], res['strand'],
res['match_descr'], res['single_read_algnt_score'],
res['paired_read_algnt_score'], res['partner_chrom'], res['partner_contig'],
res['partner_offset'], res['partner_strand'], res['passed_filtering'] ) \
= line.split( "\t" )
return res
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
record = SolexaExportAlignment()
fields = SolexaExportReader.parse_line_bare( line )
if fields['read_nbr'] != "1":
warnings.warn( "Paired-end read encountered. PE is so far supported only for " +
"SAM files, not yet for SolexaExport. All PE-related fields are ignored. " )
record.read = SequenceWithQualities(
fields['read_seq'],
"%s:%s:%s:%s:%s#0" % (fields['machine'], fields['lane'], fields['tile'],
fields['x_coord'], fields['y_coord'] ),
fields['qual_str'], self.qualscale )
if fields['passed_filtering'] == 'Y':
record.passed_filter = True
elif fields['passed_filtering'] == 'N':
record.passed_filter = False
else:
raise ValueError, "Illegal 'passed filter' value in Solexa export data: '%s'." % fields['passed_filtering']
record.index_string = fields['index_string']
if fields['pos'] == '':
record.iv = None
record.nomatch_code = fields['chrom']
else:
if fields['strand'] == 'F':
strand = '+'
elif fields['strand'] == 'R':
strand = '-'
else:
raise ValueError, "Illegal strand value in Solexa export data."
start = int( fields['pos'] )
chrom = fields['chrom']
if fields['chrom'] == "":
chrom = fields['contig']
record.iv = GenomicInterval( chrom, start,
start + len( fields['read_seq'] ), strand )
yield record
class SAM_Reader( FileOrSequence ):
"""A SAM_Reader object is associated with a SAM file that
contains short read alignments. It can generate an iterator of Alignment
objects."""
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
if line.startswith( "@" ):
# do something with the header line
continue
try:
algnt = SAM_Alignment.from_SAM_line( line )
except ValueError, e:
e.args = e.args + ( self.get_line_number_string(), )
raise
yield algnt
class GenomicArrayOfSets( GenomicArray ):
"""A GenomicArrayOfSets is a specialization of GenomicArray that allows to store
sets of objects. On construction, the step vectors are initialized with empty sets.
By using the 'add_value' method, objects can be added to intervals. If an object
is already present in the set(s) at this interval, an the new object is added to
the present set, and the set is split if necessary.
"""
def __init__( self, chroms, stranded=True, storage='step', memmap_dir = "" ):
GenomicArray.__init__( self, chroms, stranded, 'O', storage, memmap_dir )
def add_chrom( self, chrom, length = sys.maxint, start_index = 0 ):
GenomicArray.add_chrom( self, chrom, length, start_index )
for cv in self.chrom_vectors[ chrom ].values():
cv[:] = set()
cv.is_vector_of_sets = True
###########################
## paired-end handling
###########################
def pair_SAM_alignments( alignments, bundle=False ):
mate_missing_count = [0]
def process_list( almnt_list ):
while len( almnt_list ) > 0:
a1 = almnt_list.pop( 0 )
# Find its mate
for a2 in almnt_list:
if a1.pe_which == a2.pe_which:
continue
if a1.aligned != a2.mate_aligned or a1.mate_aligned != a2.aligned:
continue
if not (a1.aligned and a2.aligned):
break
if a1.iv.chrom == a2.mate_start.chrom and a1.iv.start == a2.mate_start.pos and \
a2.iv.chrom == a1.mate_start.chrom and a2.iv.start == a1.mate_start.pos:
break
else:
if a1.mate_aligned:
mate_missing_count[0] += 1
if mate_missing_count[0] == 1:
warnings.warn( "Read " + a1.read.name + " claims to have an aligned mate " +
"which could not be found in an adjacent line." )
a2 = None
if a2 is not None:
almnt_list.remove( a2 )
if a1.pe_which == "first":
yield ( a1, a2 )
else:
assert a1.pe_which == "second"
yield ( a2, a1 )
almnt_list = []
current_name = None
for almnt in alignments:
if not almnt.paired_end:
raise ValueError, "'pair_alignments' needs a sequence of paired-end alignments"
if almnt.pe_which == "unknown":
raise ValueError, "Paired-end read found with 'unknown' 'pe_which' status."
if almnt.read.name == current_name:
almnt_list.append( almnt )
else:
if bundle:
yield list( process_list( almnt_list ) )
else:
for p in process_list( almnt_list ):
yield p
current_name = almnt.read.name
almnt_list = [ almnt ]
if bundle:
yield list( process_list( almnt_list ) )
else:
for p in process_list( almnt_list ):
yield p
if mate_missing_count[0] > 1:
warnings.warn( "%d reads with missing mate encountered." % mate_missing_count[0] )
def pair_SAM_alignments_with_buffer( alignments, max_buffer_size=3000000 ):
almnt_buffer = {}
ambiguous_pairing_counter = 0
for almnt in alignments:
if not almnt.paired_end:
raise ValueError, "Sequence of paired-end alignments expected, but got single-end alignment."
if almnt.pe_which == "unknown":
raise ValueError, "Cannot process paired-end alignment found with 'unknown' 'pe_which' status."
matekey = (
almnt.read.name,
"second" if almnt.pe_which == "first" else "first",
almnt.mate_start.chrom if almnt.mate_aligned else None,
almnt.mate_start.pos if almnt.mate_aligned else None,
almnt.iv.chrom if almnt.aligned else None,
almnt.iv.start if almnt.aligned else None,
-almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None )
if matekey in almnt_buffer:
if len( almnt_buffer[ matekey ] ) == 1:
mate = almnt_buffer[ matekey ][ 0 ]
del almnt_buffer[ matekey ]
else:
mate = almnt_buffer[ matekey ].pop( 0 )
if ambiguous_pairing_counter == 0:
ambiguous_pairing_first_occurance = matekey
ambiguous_pairing_counter += 1
if almnt.pe_which == "first":
yield ( almnt, mate )
else:
yield ( mate, almnt )
else:
almntkey = (
almnt.read.name, almnt.pe_which,
almnt.iv.chrom if almnt.aligned else None,
almnt.iv.start if almnt.aligned else None,
almnt.mate_start.chrom if almnt.mate_aligned else None,
almnt.mate_start.pos if almnt.mate_aligned else None,
almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None )
if almntkey not in almnt_buffer:
almnt_buffer[ almntkey ] = [ almnt ]
else:
almnt_buffer[ almntkey ].append( almnt )
if len(almnt_buffer) > max_buffer_size:
raise ValueError, "Maximum alignment buffer size exceeded while pairing SAM alignments."
if len(almnt_buffer) > 0:
warnings.warn( "Mate records missing for %d records; first such record: %s." %
( len(almnt_buffer), str( almnt_buffer.values()[0][0] ) ) )
for almnt_list in almnt_buffer.values():
for almnt in almnt_list:
if almnt.pe_which == "first":
yield ( almnt, None )
else:
yield ( None, almnt )
if ambiguous_pairing_counter > 0:
warnings.warn( "Mate pairing was ambiguous for %d records; mate key for first such record: %s." %
( ambiguous_pairing_counter, str( ambiguous_pairing_first_occurance ) ) )
###########################
## variant calls
###########################
_re_vcf_meta_comment = re.compile( "^##([a-zA-Z]+)\=(.*)$" )
_re_vcf_meta_descr = re.compile('ID=[^,]+,?|Number=[^,]+,?|Type=[^,]+,?|Description="[^"]+",?')
_re_vcf_meta_types = re.compile( "[INFO|FILTER|FORMAT]" )
_vcf_typemap = {
"Integer":int,
"Float":float,
"String":str,
"Flag":bool
}
class VariantCall( object ):
def __init__( self, chrom = None, pos = None, identifier = None, ref = None, alt = None, qual = None, filtr = None, info = None ):
self.chrom = chrom
self.pos = pos
self.id = identifier
self.ref = ref
self.alt = alt
self.qual = qual
self.filter = filtr
self.info = info
self._original_line = None
@classmethod
def fromdict( cls, dictionary ):
ret = cls()
ret.chrom = dictionary["chrom"]
ret.pos = dictionary["pos"]
ret.id = dictionary["id"]
ret.ref = dictionary["ref"]
ret.alt = dictionary["alt"]
ret.qual = dictionary["qual"]
ret.filter = dictionary["filter"]
ret.info = dictionary["info"]
ret._original_line = None
@classmethod
def fromline( cls, line, nsamples = 0, sampleids = [] ):
ret = cls()
if nsamples == 0:
ret.format = None
ret.chrom, ret.pos, ret.id, ret.ref, ret.alt, ret.qual, ret.filter, ret.info = line.rstrip("\n").split("\t", 7)
else:
lsplit = line.rstrip("\n").split("\t")
ret.chrom, ret.pos, ret.id, ret.ref, ret.alt, ret.qual, ret.filter, ret.info = lsplit[:8]
ret.format = lsplit[8].split(":")
ret.samples = {}
spos=9
for sid in sampleids:
ret.samples[ sid ] = dict( ( name, value ) for (name, value) in itertools.izip( ret.format, lsplit[spos].split(":") ) )
spos += 1
ret.pos = GenomicPosition( ret.chrom, int(ret.pos) )
ret.alt = ret.alt.split(",")
ret._original_line = line
return ret
def infoline( self ):
if self.info.__class__ == dict:
return ";".join(map((lambda key: str(key) + "=" + str(self.info[key])), self.info ))
else:
return self.info
def get_original_line( self ):
warnings.warn( "Original line is empty, probably this object was created from scratch and not from a line in a .vcf file!" )
return self._original_line
def sampleline( self ):
if self.format == None:
print >> sys.stderr, "No samples in this variant call!"
return ""
keys = self.format
ret = [ ":".join( keys ) ]
for sid in self.samples:
tmp = []
for k in keys:
if k in self.samples[sid]:
tmp.append( self.samples[sid][k] )
ret.append( ":".join(tmp) )
return "\t".join( ret )
def to_line( self ):
if self.format == None:
return "\t".join( map( str, [ self.pos.chrom, self.pos.pos, self.id, self.ref, ",".join( self.alt ), self.qual, self.filter, self.infoline() ] ) ) + "\n"
else:
return "\t".join( map( str, [ self.pos.chrom, self.pos.pos, self.id, self.ref, ",".join( self.alt ), self.qual, self.filter, self.infoline(), self.sampleline() ] ) ) + "\n"
def __descr__( self ):
return "<VariantCall at %s, ref '%s', alt %s >" % (str(self.pos).rstrip("/."), self.ref, str(self.alt).strip("[]"))
def __str__( self ):
return "%s:'%s'->%s" % (str(self.pos).rstrip("/."), self.ref, str(self.alt).strip("[]"))
def unpack_info( self, infodict ):
tmp = {}
for token in self.info.strip(";").split(";"):
if re.compile("=").search(token):
token = token.split("=")
if infodict.has_key( token[0] ):
tmp[token[0]] = map( infodict[token[0]], token[1].split(",") )
else:
tmp[token[0]] = token[1].split(",")
if len( tmp[ token[0] ] ) == 1:
tmp[token[0]] = tmp[token[0]][0]
else: #Flag attribute found
tmp[token] = True
diff = set( infodict.keys() ).difference( set( tmp.keys() ) )
for key in diff:
if infodict[key] == bool:
tmp[key] = False
self.info = tmp
class VCF_Reader( FileOrSequence ):
def __init__( self, filename_or_sequence ):
FileOrSequence.__init__( self, filename_or_sequence )
self.metadata = {}
self.info = {}
self.filters = {}
self.formats = {}
self.nsamples = 0
self.sampleids = []
def make_info_dict( self ):
self.infodict = dict( ( key, _vcf_typemap[self.info[key]["Type"]] ) for key in self.info.keys() )
def parse_meta( self, header_filename = None ):
if header_filename == None:
the_iter = FileOrSequence.__iter__( self )
else:
the_iter = open( header_filename, "r" )
for line in the_iter:
if line.startswith( '#' ):
if line.startswith( "##" ):
mo = _re_vcf_meta_comment.match( line )
if mo:
value = mo.group(2)
if mo.group(1) == "INFO":
value = dict( e.rstrip(",").split("=",1) for e in _re_vcf_meta_descr.findall(value) )
key = value["ID"]
del value["ID"]
self.info[ key ] = value
elif mo.group(1) == "FILTER":
value = dict( e.rstrip(",").split("=",1) for e in _re_vcf_meta_descr.findall(value) )
key = value["ID"]
del value["ID"]
self.filters[ key ] = value
elif mo.group(1) == "FORMAT":
value = dict( e.rstrip(",").split("=",1) for e in _re_vcf_meta_descr.findall(value) )
key = value["ID"]
del value["ID"]
self.formats[ key ] = value
else:
self.metadata[ mo.group(1) ] = mo.group(2)
else:
self.sampleids = line.rstrip("\t\n").split("\t")[9:]
self.nsamples = len( self.sampleids )
continue
else:
break
def meta_info( self, header_filename = None ):
ret = []
if header_filename == None:
the_iter = FileOrSequence.__iter__( self )
else:
the_iter = open( header_filename, "r" )
for line in the_iter:
if line.startswith( '#' ):
ret.append( line )
else:
break
return ret
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
if line == "\n" or line.startswith( '#' ):
continue
vc = VariantCall.fromline( line, self.nsamples, self.sampleids )
yield vc
class WiggleReader( FileOrSequence ):
def __init__( self, filename_or_sequence, verbose = True ):
FileOrSequence.__init__( self, filename_or_sequence )
self.attributes = {}
self.stepType = 'none'
self.verbose = verbose
def __iter__( self ):
span = 1
pos = None
step = None
chrom = None
for line in FileOrSequence.__iter__( self ):
if line.startswith( 'track' ):
fields = shlex.split(line)[1:]
self.attributes = dict([(p[0], p[1].strip('"')) for p in [x.split("=") for x in fields]])
elif line.startswith( 'fixedStep' ): # do fixed step stuff
self.stepType = 'fixed'
fields = shlex.split(line)[1:]
declarations = dict([(p[0], p[1].strip('"')) for p in [x.split("=") for x in fields]])
pos = int(declarations['start'])
step = int(declarations['step'])
chrom = declarations['chrom']
if 'span' in declarations:
span = int(declarations['span'])
else:
span = 1
elif line.startswith( 'variableStep' ): # do variable step stuff
self.stepType = 'variable'
fields = shlex.split(line)[1:]
declarations = dict([(p[0], p[1].strip('"')) for p in [x.split("=") for x in fields]])
chrom = declarations['chrom']
if 'span' in declarations:
span = int(declarations['span'])
else:
span = 1
elif line.startswith( 'browser' ) or line.startswith( '#' ): #Comment or ignored
if self.verbose:
print "Ignored line:", line
continue
else:
if self.stepType == 'fixed':
yield ( GenomicInterval( chrom, pos, pos + span, '.' ), float(line.strip()) )
pos += step
elif self.stepType == 'variable':
tmp = line.strip().split(" ")
pos = int(tmp[0])
yield ( GenomicInterval( chrom, pos, pos + span, '.' ), float(tmp[1]) )
class BAM_Reader( object ):
def __init__( self, filename ):
global pysam
self.filename = filename
self.sf = None # This one is only used by __getitem__
self.record_no = -1
try:
import pysam
except ImportError:
sys.stderr.write( "Please Install PySam to use the BAM_Reader Class (http://code.google.com/p/pysam/)" )
raise
def __iter__( self ):
sf = pysam.Samfile(self.filename, "rb")
self.record_no = 0
for pa in sf:
yield SAM_Alignment.from_pysam_AlignedRead( pa, sf )
self.record_no += 1
def fetch( self, reference = None, start = None, end = None, region = None ):
sf = pysam.Samfile(self.filename, "rb")
self.record_no = 0
try:
for pa in sf.fetch( reference, start, end, region ):
yield SAM_Alignment.from_pysam_AlignedRead( pa, sf )
self.record_no += 1
except ValueError as e:
if e.message == "fetch called on bamfile without index":
print "Error: ", e.message
print "Your bam index file is missing or wrongly named, convention is that file 'x.bam' has index file 'x.bam.bai'!"
else:
raise
except:
raise
def get_line_number_string( self ):
if self.record_no == -1:
return "unopened file %s" % ( self.filename )
else:
return "record #%d in file %s" % ( self.record_no, self.filename )
def __getitem__( self, iv ):
if not isinstance( iv, GenomicInterval ):
raise TypeError, "Use a HTSeq.GenomicInterval to access regions within .bam-file!"
if self.sf is None:
self.sf = pysam.Samfile( self.filename, "rb" )
if not self.sf._hasIndex():
raise ValueError, "The .bam-file has no index, random-access is disabled!"
for pa in self.sf.fetch( iv.chrom, iv.start+1, iv.end ):
yield SAM_Alignment.from_pysam_AlignedRead( pa, self.sf )
def get_header_dict( self ):
sf = pysam.Samfile(self.filename, "rb")
return sf.header
class BAM_Writer( object ):
def __init__( self, filename, template = None, referencenames = None, referencelengths = None, text = None, header = None ):
try:
import pysam
except ImportError:
sys.stderr.write( "Please Install PySam to use the BAM_Writer Class (http://code.google.com/p/pysam/)" )
raise
self.filename = filename
self.template = template
self.referencenames = referencenames
self.referencelengths = referencelengths
self.text = text
self.header = header
self.sf = pysam.Samfile( self.filename, mode="wb", template = self.template, referencenames = self.referencenames, referencelengths = self.referencelengths, text = self.text, header = self.header )
@classmethod
def from_BAM_Reader( cls, fn, br ):
return BAM_Writer( filename = fn, header = br.get_header_dict() )
def write( self, alnmt):
self.sf.write( alnmt.to_pysam_AlignedRead( self.sf ) )
def close( self ):
self.sf.close()
class BED_Reader( FileOrSequence ):
def __init__( self, filename_or_sequence ):
FileOrSequence.__init__( self, filename_or_sequence )
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
if line.startswith( "track" ) or line.startswith( "#" ):
continue
fields = line.split()
if len(fields) < 3:
raise ValueError, "BED file line contains less than 3 fields"
#2015-7-28 modified by ChengChao
#if len(fields) > 9:
# raise ValueError, "BED file line contains more than 9 fields"
iv = GenomicInterval( fields[0], int(fields[1]), int(fields[2]), fields[5] if len(fields) > 5 else "." )
f = GenomicFeature( fields[3] if len(fields) > 3 else "unnamed", "BED line", iv )
f.score = float( fields[4] ) if len(fields) > 4 else None
#f.thick = GenomicInterval( iv.chrom, int( fields[6] ), int( fields[7] ), iv.strand ) if len(fields) > 7 else None
#f.itemRgb = [ int(a) for a in fields[8].split(",") ] if len(fields) > 8 else None
f.line = line.strip()
yield(f)
|
mit
| 5,902,803,634,480,403,000
| 36.890927
| 203
| 0.543472
| false
| 3.792387
| false
| false
| false
|
jceipek/Mind-Rush
|
biofeedback.py
|
1
|
7491
|
#
# biofeedback.py
#
# Copyright (C)2011 Julian Ceipek and Patrick Varin
#
# Redistribution is permitted under the BSD license. See LICENSE for details.
#
try:
import multiprocessing
except:
raise Exception("Unable to load multiprocessing Python module.")
try:
import serial
except:
raise Exception("""Unable to load serial Python module.
Do you have pyserial installed?""")
import time
from engine.trueProcess import TrueProcess
from engine.altInput import AltInput
class Arduino:
def __init__(self):
self.active = multiprocessing.Value('i',1)
self.eventReader = None
self.proc = None
def listen(self, deviceID, mindFlexActive=True, eyeCircuitActive=True):
self.mindflexQueue = multiprocessing.Queue(11)
self.eyeCircuitQueue = multiprocessing.Queue(5)
self.proc = TrueProcess(self.mindflexReader, deviceID,
mindFlexActive, eyeCircuitActive)
def mindflexReader(self, deviceID,
mindFlexActive=True, eyeCircuitActive=True):
self.quality = -1
self.attention = -1
self.meditation = -1
self.delta = -1
self.theta = -1
self.lowAlpha = -1
self.highAlpha = -1
self.lowBeta = -1
self.highBeta = -1
self.lowGamma = -1
self.highGamma = -1
self.eyeSignal = -1
connected = False
count = 0
while not connected and count <= 5:
try:
ser = serial.Serial(deviceID, 9600)
connected = True
except Exception as e:
count += 1
print e
if count >= 5:
raise Exception("Unable to communicate with Arduino")
while self.active.value == 1 and (mindFlexActive or eyeCircuitActive):
try:
line = ser.readline().strip()
except Exception as e:
line = ""
print "Reading from Arduino Failed: ",e
if mindFlexActive and ('EEG:' in line):
line = line.split(':')
line = line[1].split(',')
try:
if not len(line) == 11:
raise ValueError
newQuality = (200.0-int(line[0]))/200.0
newAttention = int(line[1])/100.0
newMeditation = int(line[2])/100.0
newDelta = int(line[3])
newTheta = int(line[4])
newLowAlpha = int(line[5])
newHighAlpha = int(line[6])
newLowBeta = int(line[7])
newHighBeta = int(line[8])
newLowGamma = int(line[9])
newHighGamma = int(line[10])
if self.quality != newQuality:
self.quality = newQuality
self.putMindflexMessage(('Arduino_quality',self.quality))
if self.attention != newAttention:
self.attention = newAttention
self.putMindflexMessage(('Arduino_attention',self.attention))
if self.meditation != newMeditation:
self.meditation = newMeditation
self.put(('Arduino_meditation',self.meditation))
if self.delta != newDelta:
self.delta = newDelta
self.putMindflexMessage(('Arduino_delta',self.delta))
if self.theta != newTheta:
self.theta = newTheta
self.putMindflexMessage(('Arduino_theta',self.theta))
if self.lowAlpha != newLowAlpha:
self.lowAlpha = newLowAlpha
self.putMindflexMessage(('Arduino_lowAlpha',self.lowAlpha))
if self.highAlpha != newHighAlpha:
self.highAlpha = newHighAlpha
self.putMindflexMessage(('Arduino_highAlpha',self.highAlpha))
if self.lowBeta != newLowBeta:
self.lowBeta = newLowBeta
self.putMindflexMessage(('Arduino_lowBeta',self.lowBeta))
if self.highBeta != newHighBeta:
self.highBeta = newHighBeta
self.putMindflexMessage(('Arduino_highBeta',self.highBeta))
if self.lowGamma != newLowGamma:
self.lowGamma = newLowGamma
self.putMindflexMessage(('Arduino_lowGamma',self.lowGamma))
if self.highGamma != newHighGamma:
self.highGamma = newHighGamma
self.putMindflexMessage(('Arduino_highGamma',self.highGamma))
except:
print line
print "Caught Mindflex serial error!"
elif eyeCircuitActive and ('EMG:' in line):
line = line.split(':')
line = line[1].split(',')
try:
if not len(line) == 1:
raise ValueError
newEyeSignal = int(line[0])
if self.eyeSignal != newEyeSignal:
self.eyeSignal = newEyeSignal
self.putEyeCircuitMessage(('Arduino_eyeValue',
self.eyeSignal))
except Exception as e:
print e
print "Caught EMG circuit serial error!",line
try:
ser.close()
print "Arduino Serial Connection Closed"
except:
print "Unable to close serial connection to Arduino!"
def putEyeCircuitMessage(self, message):
while self.eyeCircuitQueue.full():
self.eyeCircuitQueue.get()
self.eyeCircuitQueue.put(message)
def putMindflexMessage(self, message):
while self.mindflexQueue.full():
self.mindflexQueue.get()
self.mindflexQueue.put(message)
def deactivate(self):
self.active.value = 0
print("Closed Arduino Process")
class Biofeedback(AltInput):
def __init__(self, deviceID, mindFlexActive=True, eyeCircuitActive=True):
self.arduino = Arduino()
self.arduino.listen(deviceID, mindFlexActive, eyeCircuitActive)
def poll(self):
return (not self.arduino.mindflexQueue.empty() or
not self.arduino.eyeCircuitQueue.empty())
def getEvents(self):
events = []
if not self.arduino.mindflexQueue.empty():
reading = self.arduino.mindflexQueue.get()
identifier = reading[0]
value = reading[1]
discrete = False #All of the bio-feedback events we use are continuous values
mindflexReading = self.makeEvent(identifier, value, discrete)
events.append(mindflexReading)
if not self.arduino.eyeCircuitQueue.empty():
reading = self.arduino.eyeCircuitQueue.get()
identifier = reading[0]
value = reading[1]
discrete = False #All of the bio-feedback events we use are continuous values
eyeCircuitReading = self.makeEvent(identifier, value, discrete)
events.append(eyeCircuitReading)
return events
def stop(self):
self.arduino.deactivate()
|
bsd-3-clause
| -5,254,349,475,841,835,000
| 37.813472
| 89
| 0.536243
| false
| 4.427305
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.