code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import logging
class HeaderReorganization(object):
def __init__(self, project_layout, project_services):
self.project_layout = project_layout
self.project_services = project_services
def reorganizeHeaders(self):
logger = logging.getLogger('Archie')
logger.debug('Reorganizing source headers into tier folders')
expected_installed_files = dict()
build_folder_stack = []
max_tier = 0
has_prescient_module = self.project_layout.hasPrescientModule()
build_folder_stack.append(self.project_layout.getSourceFolders())
while len(build_folder_stack) > 0:
source_folder_list = build_folder_stack.pop()
for source_folder in source_folder_list:
tier = self.project_layout.tierForModule(source_folder)
max_tier = max(max_tier, tier)
logger.debug('Source folder %s has default tier %d', source_folder, tier)
source_files = self.project_services.listFiles(source_folder)
for file_name in source_files:
source_file = source_folder + '/' + file_name
if self.project_layout.isIncludeFile(source_file):
file_tier = self.project_layout.tierForModule(source_file, tier)
if file_tier == 0 and not has_prescient_module:
logger.debug('Include file %s is private', source_file)
continue
logger.debug('Include file %s has tier %d', source_file, file_tier)
max_tier = max(max_tier, file_tier)
tier_folder = self.project_layout.getIncludeFolder(file_tier)
if not self.project_services.folderExists(tier_folder):
self.project_services.createFolder(tier_folder)
if tier_folder not in expected_installed_files:
expected_installed_files[tier_folder] = set()
dest_file = tier_folder + '/' + file_name
expected_installed_files[tier_folder].add(dest_file)
file_exists = self.project_services.fileExists(dest_file)
file_different = not file_exists
if file_exists:
source_mtime = self.project_services.statFile(source_file)
dest_mtime = self.project_services.statFile(dest_file)
file_different = source_mtime != dest_mtime
if file_different:
if file_exists:
logger.info('Updating header file %s in tier folder %s as it has changed.', source_file, tier_folder)
self.project_services.removeFile(dest_file)
else:
logger.info('Installing header file %s into tier folder %s as it was missing.', source_file, tier_folder)
self.project_services.createLinkedFile(source_file, tier_folder)
else:
logger.debug('File %s is already installed into tier folder %s', source_file, tier_folder)
else:
logger.debug('Source file %s is not an include file', source_file)
build_folder_stack.append(self.project_services.listFolders(source_folder))
# Add any inbetween folders to the header map which were not mentioned by any source directory
for tier in range(1, max_tier + 1):
tier_folder = self.project_layout.getIncludeFolder(tier)
if not self.project_services.folderExists(tier_folder):
logger.info('Adding empty tier folder %s.', tier_folder)
self.project_services.createFolder(tier_folder)
if tier_folder not in expected_installed_files:
expected_installed_files[tier_folder] = set()
# Remove all the header files we discover in the include folders but not in the source directories
for tier_folder, header_file_set in expected_installed_files.items():
for file_name in self.project_services.listFiles(tier_folder):
header_file = tier_folder + '/' + file_name
if not header_file in header_file_set:
logger.info('Removing header file %s which has changed tiers or been removed from the source tree.', header_file)
self.project_services.removeFile(header_file)
|
niccroad/Archie
|
archie/headertiers/businessrules/HeaderReorganization.py
|
Python
|
mit
| 4,774
|
from rest_framework.routers import DefaultRouter
from retail.views import CategoryViewSet, AssetViewSet, ProductViewSet, get_auth_token, UserViewSet, \
CustomerViewSet
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from rest_framework.authtoken import views as rest_framework_views
urlpatterns = [
# Session Login
url(r'^login/$', get_auth_token, name='login'),
url(r'^get_auth_token/$', rest_framework_views.obtain_auth_token, name='get_auth_token'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
router = DefaultRouter()
router.register(prefix='categories', viewset=CategoryViewSet, base_name="my_categories")
router.register(prefix='assets', viewset=AssetViewSet, base_name="my_assets")
router.register(prefix='products', viewset=ProductViewSet, base_name="my_products")
router.register(prefix='users', viewset=UserViewSet, base_name="my_users")
router.register(prefix='customers', viewset=CustomerViewSet, base_name="my_customers")
urlpatterns += router.urls
|
Nobay/SampleStore
|
restful-api/urls.py
|
Python
|
mit
| 1,066
|
class Config:
ServerUrl = 'http://homeserver.spdns.org/blot.php'
UseGetRequests = True
NotificationServiceUrl = 3333
IFTTTUrlTemplate = "https://maker.ifttt.com/trigger/tag_%s_pressed/with/key/cV2tU0tD8V2UWOjPb4H7SO"
|
fablab-ka/labtags
|
blot-gateway/config.py
|
Python
|
mit
| 234
|
"""Unit tests for top_k_accuracy.py
Written by Grant Van Horn.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import random
import unittest
import numpy as np
import top_k_accuracy
def make_accuracy_data(num_classes, num_test_samples, num_correct_predictions, k):
"""Convenience method to create the labels and predictions array.
Args:
num_classes (int): The number of classes in the dataset.
num_test_samples (int): The number of labels and predictions to generate.
num_correct_predictions (int): The number of predictions that are correct.
k (int): The number of class labels in a prediction.
Returns:
list : The labels.
list : The predictions.
"""
assert k <= num_classes, "k too big"
assert num_correct_predictions <= num_test_samples, ""\
"`num_correct_predictions` is larger than `num_test_samples`"
if k == num_classes:
assert num_test_samples == num_correct_predictions, ""\
"`num_correct_predictions` should be equal to `num_test_samples` "\
"when `k` equals `num_classes`"
labels = []
predictions = []
class_labels = range(num_classes)
# Determine which idexes will be correct
if num_correct_predictions > 0:
correct_prediction_idxs = set(random.sample(range(num_test_samples),
num_correct_predictions))
else:
correct_prediction_idxs = set()
# Fill in the labels and prediction lists
for i in range(num_test_samples):
gt_label = random.choice(class_labels)
labels.append(gt_label)
preds = random.sample(class_labels, k)
if i in correct_prediction_idxs:
if gt_label not in preds:
preds[0] = gt_label
else:
if gt_label in preds:
preds = [p for p in preds if p != gt_label]
preds.append(gt_label + 1 % num_classes)
predictions.append(preds)
return labels, predictions
class TestAccuracy(unittest.TestCase):
def test_top_1_perfect(self):
labels, predictions = make_accuracy_data(
num_classes=10,
num_test_samples=100,
num_correct_predictions=100,
k=1)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
# Try with strings
labels = map(str, labels)
predictions = [map(str, preds) for preds in predictions]
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
# Try with numpy
labels = np.array(labels)
predictions = np.array(predictions)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
def test_top_3_perfect(self):
labels, predictions = make_accuracy_data(
num_classes=10,
num_test_samples=100,
num_correct_predictions=100,
k=3)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
# Try with strings
labels = map(str, labels)
predictions = [map(str, preds) for preds in predictions]
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
# Try with numpy
labels = np.array(labels)
predictions = np.array(predictions)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
def test_top_5_half_right(self):
labels, predictions = make_accuracy_data(
num_classes=10,
num_test_samples=10,
num_correct_predictions=5,
k=5)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 0.5)
# Try with strings
labels = map(str, labels)
predictions = [map(str, preds) for preds in predictions]
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 0.5)
# Try with numpy
labels = np.array(labels)
predictions = np.array(predictions)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 0.5)
def test_top_5_none_correct(self):
labels, predictions = make_accuracy_data(
num_classes=10,
num_test_samples=100,
num_correct_predictions=0,
k=5)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 0.)
# Try with strings
labels = map(str, labels)
predictions = [map(str, preds) for preds in predictions]
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 0.)
# Try with numpy
labels = np.array(labels)
predictions = np.array(predictions)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 0.)
def test_top_5_with_5_classes(self):
labels, predictions = make_accuracy_data(
num_classes=5,
num_test_samples=100,
num_correct_predictions=100,
k=5)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
# Try with strings
labels = map(str, labels)
predictions = [map(str, preds) for preds in predictions]
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
# Try with numpy
labels = np.array(labels)
predictions = np.array(predictions)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
def test_empty_labels(self):
labels = []
predictions = []
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 0)
# Try with numpy
labels = np.array(labels)
predictions = np.array(predictions)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 0)
@unittest.expectedFailure
def test_unmatched_lengths(self):
labels, predictions = make_accuracy_data(
num_classes=10,
num_test_samples=100,
num_correct_predictions=0,
k=3)
# add one extra prediction to the prediction matrix
predictions.append([0, 1, 2])
# Should throw an Exception
top_k_accuracy.compute_top_k_accuracy(labels, predictions)
def make_csv_file(output_path, field_names, row_data):
"""Write the data to a csv file.
Args:
output_path (str) : File path to write the csv file.
field_names [str] : The column field names.
prediction_data ([{}] : A list of dict containing the field names.
"""
with open(output_path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=field_names)
writer.writeheader()
for data in row_data:
writer.writerow(data)
def make_submission_file(output_path, submission_data):
"""Create a submission csv file.
Args:
output_path (str) : File path to write the csv file.
submission_data [{}] : A list of dicts containing values for the keys
"image_id" and "preditions".
"""
make_csv_file(output_path, field_names=["image_id", "predictions"],
row_data=submission_data)
def make_solution_file(output_path, solution_data):
"""Create a solution csv file.
Args:
output_path (str) : File path to write the csv file.
solution_data [{}] : A list of dicts containing values for the keys
"image_id", "truth" and "usage".
"""
make_csv_file(output_path, field_names=["image_id", "truth", "usage"],
row_data=solution_data)
def make_submission_entry(image_id, predictions):
"""Convenience function to create a prediction dictionary that can be
written to a csv file.
Args:
image_id : The image id.
predictions [] : A list of predictions.
Returns:
{} : A dict that can be used by a csv.DictWriter to write the data.
"""
predictions_str = " ".join(map(str, predictions))
return {
"image_id" : image_id,
"predictions" : predictions_str
}
def make_submission_files(num_classes, k, num_public_samples,
num_correct_public_predictions, num_private_samples,
num_correct_private_samples):
"""Convenience method to create the submission and solution csv files.
Args:
num_classes (int): The number of classes in the dataset.
k (int): The number of predictions to consider.
num_public_samples (int): The number of "Public" samples.
num_correct_public_predictions (int): The number of "Public" samples
the user gets correct.
num_private_samples (int): The number of "Private" samples.
num_correct_private_samples (int): The number of "Private" samples the
user gets correct.
Returns:
str: A file path to a submission file.
str: A file path to a solution file.
"""
public_labels, public_predictions = make_accuracy_data(
num_classes=num_classes,
num_test_samples=num_public_samples,
num_correct_predictions=num_correct_public_predictions,
k=k)
private_labels, private_predictions = make_accuracy_data(
num_classes=num_classes,
num_test_samples=num_private_samples,
num_correct_predictions=num_correct_private_samples,
k=k)
solution_data = []
for i, label in enumerate(public_labels):
solution_data.append({
"image_id" : i,
"truth" : label,
"usage" : "Public"
})
for i, label in enumerate(private_labels):
solution_data.append({
"image_id" : i + num_public_samples,
"truth" : label,
"usage" : "Private"
})
submission_data = []
for i, preds in enumerate(public_predictions + private_predictions):
image_id = i
submission_data.append(make_submission_entry(image_id, preds))
submission_fp = '/tmp/submission_file.txt'
make_submission_file(submission_fp, submission_data)
solution_fp = '/tmp/solution_file.txt'
make_solution_file(solution_fp, solution_data)
return submission_fp, solution_fp
class TestSubmissions(unittest.TestCase):
def test_top_1_perfect(self):
k = 1
submission_fp, solution_fp = make_submission_files(
num_classes=10,
k=k,
num_public_samples=100,
num_correct_public_predictions=100,
num_private_samples=100,
num_correct_private_samples=100)
public_acc, private_acc = top_k_accuracy.evaluate(submission_fp,
solution_fp, k=k)
self.assertEqual(public_acc, 1.)
self.assertEqual(private_acc, 1.)
def test_top_3_perfect(self):
k = 3
submission_fp, solution_fp = make_submission_files(
num_classes=10,
k=k,
num_public_samples=100,
num_correct_public_predictions=100,
num_private_samples=100,
num_correct_private_samples=100)
public_acc, private_acc = top_k_accuracy.evaluate(submission_fp,
solution_fp, k=k)
self.assertEqual(public_acc, 1.)
self.assertEqual(private_acc, 1.)
def test_top_5_half_right(self):
k = 5
submission_fp, solution_fp = make_submission_files(
num_classes=10,
k=k,
num_public_samples=100,
num_correct_public_predictions=50,
num_private_samples=100,
num_correct_private_samples=50)
public_acc, private_acc = top_k_accuracy.evaluate(submission_fp,
solution_fp, k=k)
self.assertEqual(public_acc, 0.5)
self.assertEqual(private_acc, 0.5)
def test_top_5_none_right(self):
k = 5
submission_fp, solution_fp = make_submission_files(
num_classes=10,
k=k,
num_public_samples=100,
num_correct_public_predictions=0,
num_private_samples=100,
num_correct_private_samples=0)
public_acc, private_acc = top_k_accuracy.evaluate(submission_fp,
solution_fp, k=k)
self.assertEqual(public_acc, 0.)
self.assertEqual(private_acc, 0.)
if __name__ == '__main__':
unittest.main()
|
visipedia/inat_comp
|
eval/test_top_k_accuracy.py
|
Python
|
mit
| 13,223
|
import os, sys, traceback
import github3
import gspread
import io
import json
import logging
from logging.config import dictConfig
from oauth2client.client import SignedJwtAssertionCredentials
GITHUB_CONFIG = {
'TOKEN': os.environ['GITHUB_TOKEN'],
'REPO_OWNER': 'opennews',
'REPO_NAME': 'srccon-2021',
'TARGET_FILE': 'schedule/sessions.json',
'TARGET_BRANCHES': ['master','staging',],# choose one or more branches
}
GITHUB_SRCCON_YAML_CONFIG = {
'TOKEN': os.environ['GITHUB_TOKEN'],
'REPO_OWNER': 'opennews',
'REPO_NAME': 'srccon-2021',
'TARGET_FILE': '_data/schedule.yaml',
'TARGET_BRANCHES': ['master',],
}
GOOGLE_API_CONFIG = {
'CLIENT_EMAIL': os.environ['GOOGLE_API_CLIENT_EMAIL'],
'PRIVATE_KEY': os.environ['GOOGLE_API_PRIVATE_KEY'].decode('unicode_escape'),
'SCOPE': ['https://spreadsheets.google.com/feeds'],
}
# the unique ID of the spreadsheet with your data can be stored
# as an environment variable or simply added here as a string
GOOGLE_SPREADSHEET_KEY = '1oYob00DLW09BoYUt-ZvPza5xnwNiFVQkQbcg6Zn2VWE'
#GOOGLE_SPREADSHEET_KEY = os.environ['GOOGLE_SPREADSHEET_KEY']
# pull data from a named worksheet, or leave blank to assume first worksheet
GOOGLE_SPREADSHEET_SHEETNAME = 'schedule data'
# if data is spread across multiple worksheets, set to True
FETCH_MULTIPLE_WORKSHEETS = False
# if fetching multiple worksheets, name sheets to skip here
# EXAMPLE: WORKSHEETS_TO_SKIP = ['Sheet1', 'Sheet4',]
WORKSHEETS_TO_SKIP = []
# set to True to store local version of JSON
MAKE_LOCAL_JSON = False
# set to False for dry runs
COMMIT_JSON_TO_GITHUB = True
# TODO: Add method for storing JSON output in S3 bucket
# S3_CONFIG = {}
# SEND_JSON_TO_S3 = False
def authenticate_with_google():
'''
Connect to Google Spreadsheet with gspread library.
'''
credentials = SignedJwtAssertionCredentials(
GOOGLE_API_CONFIG['CLIENT_EMAIL'], GOOGLE_API_CONFIG['PRIVATE_KEY'], GOOGLE_API_CONFIG['SCOPE']
)
google_api_conn = gspread.authorize(credentials)
return google_api_conn
def open_google_spreadsheet():
'''
Authenticate and return spreadsheet by `GOOGLE_SPREADSHEET_KEY`.
'''
google_api_conn = authenticate_with_google()
spreadsheet = google_api_conn.open_by_key(GOOGLE_SPREADSHEET_KEY)
return spreadsheet
def fetch_data(multiple_sheets=False, worksheets_to_skip=[]):
spreadsheet = open_google_spreadsheet()
if not multiple_sheets:
# Return data from named worksheet if specified ...
if GOOGLE_SPREADSHEET_SHEETNAME:
worksheet = spreadsheet.worksheet(GOOGLE_SPREADSHEET_SHEETNAME)
# .. otherwise return data from the first worksheet
else:
worksheet = spreadsheet.get_worksheet(0)
data = worksheet.get_all_records(empty2zero=False)
else:
# Return data from all worksheets in Google spreadsheet, optionally
# skipping sheets identified by title in `WORKSHEETS_TO_SKIP`
data = []
worksheet_list = [
sheet for sheet in spreadsheet.worksheets() if sheet.title not in WORKSHEETS_TO_SKIP
]
for worksheet in worksheet_list:
worksheet.title
data.extend(worksheet.get_all_records(empty2zero=False))
return data
def transform_data(data):
'''
Transforms data and filters/validates individual spreadsheet rows
for fields we want in the JSON output. Currently, this:
* ensures that all variables going into the JSON are strings
Additional filters should be added to _transform_response_item.
'''
def _transform_response_item(item, skip=False):
# make sure vars are strings
_transformed_item = {k: unicode(v) for k, v in item.iteritems() if k}
# EXAMPLE: get rid of data from column `rowNumber`
# if 'rowNumber' in _transformed_item:
# del _transformed_item['rowNumber']
# EXAMPLE: rename spreadsheet column `name` into JSON key `title`
# if 'name' in _transformed_item:
# _transformed_item['title'] = _transformed_item.pop('name', '')
# EXAMPLE: use `skip` flag to ignore rows without valid id
# if 'id' in _transformed_item:
# try:
# int(_transformed_item['id'])
# except:
# skip = True
# if we've triggered the skip flag anywhere, drop this record
if skip:
_transformed_item = None
return _transformed_item
# pass spreadsheet rows through the transformer
transformed_data = filter(None, [_transform_response_item(item) for item in data])
return transformed_data
def make_json(data, store_locally=False, filename=GITHUB_CONFIG['TARGET_FILE']):
'''
Turns data into nice JSON, and optionally stores to a local file.
'''
json_out = json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)
if store_locally:
with io.open(filename, 'w', encoding='utf8') as outfile:
outfile.write(unicode(json_out))
return json_out.encode('utf-8')
def commit_json(data, target_config=GITHUB_CONFIG, commit=COMMIT_JSON_TO_GITHUB):
'''
Uses token to log into GitHub, then gets the appropriate repo based
on owner/name defined in GITHUB_CONFIG.
Creates data file if it does not exist in the repo, otherwise updates
existing data file.
If `COMMIT_JSON_TO_GITHUB` is False, this will operate in "dry run" mode,
authenticating against GitHub but not changing any files.
'''
# authenticate with GitHub
gh = github3.login(token=target_config['TOKEN'])
# get the right repo
repo = gh.repository(target_config['REPO_OWNER'], target_config['REPO_NAME'])
for branch in target_config['TARGET_BRANCHES']:
# check to see whether data file exists
try:
contents = repo.file_contents(
path=target_config['TARGET_FILE'],
ref=branch
)
except:
contents = None
if commit:
if not contents:
# create file that doesn't exist
repo.create_file(
path=target_config['TARGET_FILE'],
message='adding session data for schedule',
content=data,
branch=branch
)
logger.info('Created new data file in repo')
else:
# if data has changed, update existing file
if data.decode('utf-8') == contents.decoded.decode('utf-8'):
logger.info('Data has not changed, no commit created')
else:
contents.update(
message='updating schedule data',
content=data,
branch=branch
)
logger.info('Data updated, new commit to repo')
def update_srccon_schedule():
data = fetch_data(multiple_sheets=FETCH_MULTIPLE_WORKSHEETS, worksheets_to_skip=WORKSHEETS_TO_SKIP)
#print 'Fetched the data ...'
data = transform_data(data)
#print 'Prepped the data ...'
json_data = make_json(data, store_locally=MAKE_LOCAL_JSON)
#print 'Made some JSON!'
#commit_json(json_data, target_config=GITHUB_SRCCON_YAML_CONFIG)
#print 'SENT THE DATA TO GITHUB!'
commit_json(json_data)
#print 'Sent the data to GitHub!'
'''
Set up logging.
'''
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'log.txt',
'formatter': 'verbose'
},
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'schedule_loader': {
'handlers':['file','console'],
'propagate': False,
'level':'DEBUG',
}
}
}
dictConfig(LOGGING)
logger = logging.getLogger('schedule_loader')
if __name__ == "__main__":
try:
update_srccon_schedule()
except Exception, e:
sys.stderr.write('\n')
traceback.print_exc(file=sys.stderr)
sys.stderr.write('\n')
sys.exit(1)
|
ryanpitts/membot
|
membot/apps/membot/commands/update_srccon_schedule.py
|
Python
|
mit
| 8,643
|
"""
Author: Justin Cappos
Start Date: 29 June 2008
Description:
Timer functions for the sandbox. This does sleep as well as setting and
cancelling timers.
"""
import threading
import thread # Armon: this is to catch thread.error
import nanny
import idhelper
# for printing exceptions
import tracebackrepy
# for harshexit
import harshexit
# For getruntime()
import nonportable
# For sleep
import time
# Import the exception hierarchy
from exception_hierarchy import *
##### Constants
# Armon: Prefix for use with event handles
EVENT_PREFIX = "_EVENT:"
# Store callable
safe_callable = callable
##### Public Functions
def sleep(seconds):
"""
<Purpose>
Allow the current event to pause execution (similar to time.sleep()).
This function will not return early for any reason
<Arguments>
seconds:
The number of seconds to sleep. This can be a floating point value
<Exceptions>
RepyArgumentException if seconds is not an int/long/float.
<Side Effects>
None.
<Returns>
None.
"""
# Check seconds to ensure it is a valid type.
if type(seconds) not in [long, float, int]:
raise RepyArgumentError("Invalid type " + str(type(seconds)))
# Using getruntime() in lieu of time.time() because we want elapsed time
# regardless of the oddities of NTP
start = nonportable.getruntime()
sleeptime = seconds
# Return no earlier than the finish time
finish = start + seconds
while sleeptime > 0.0:
time.sleep(sleeptime)
# If sleeptime > 0.0 then I woke up early...
sleeptime = finish - nonportable.getruntime()
def createthread(function):
"""
<Purpose>
Creates a new thread of execution.
<Arguments>
function:
The function to invoke on entering the new thread.
<Exceptions>
RepyArgumentError is raised if the function is not callable.
ResourceExhaustedError is raised if there are no available events.
<Side Effects>
Launches a new thread.
<Resource Consumption>
Consumes an event.
<Returns>
None
"""
# Check if the function is callable
if not safe_callable(function):
raise RepyArgumentError("Provided function is not callable!")
# Generate a unique handle and see if there are resources available
eventhandle = EVENT_PREFIX + idhelper.getuniqueid()
nanny.tattle_add_item('events', eventhandle)
# Wrap the provided function
def wrapped_func():
try:
function()
except:
# Exit if they throw an uncaught exception
tracebackrepy.handle_exception()
harshexit.harshexit(30)
finally:
# Remove the event before I exit
nanny.tattle_remove_item('events',eventhandle)
# Create a thread object
tobj = threading.Thread(target=wrapped_func, name=idhelper.get_new_thread_name(EVENT_PREFIX))
# Check if we get an exception trying to create a new thread
try:
tobj.start()
except thread.error:
# Set exit code 56, which stands for a Threading Error
# The Node manager will detect this and handle it
harshexit.harshexit(56)
|
SeattleTestbed/repy_v2
|
emultimer.py
|
Python
|
mit
| 3,081
|
from django.conf.urls.defaults import *
urlpatterns = patterns('django_fbi.views',
url(r'^channel/$', 'channel', name='channel'),
url(r'^connect/$', 'connect', name='connect'),
url(r'^deauthorize/$', 'deauthorize', name='deauthorize'),
url(r'^app/(?P<slug>[-\w]+)/$', 'view_app', {'page': 'canvas'}, name='canvas'),
url(r'^app/(?P<slug>[-\w]+)/tab/$', 'view_app', {'page': 'tab'}, name='tab'),
)
|
dmpayton/django-fbi
|
django_fbi/urls.py
|
Python
|
mit
| 426
|
import hashlib
import re
def class_name(obj):
class_name = str(type(obj))
class_name = re.search(".*'(.+?)'.*", class_name).group(1)
return class_name
def _can_iterate(obj):
import types
import collections
is_string = isinstance(obj, types.StringTypes)
is_iterable = isinstance(obj, collections.Iterable)
return is_iterable and not is_string
# based on:
# http://stackoverflow.com/questions/5386694/fast-way-to-hash-numpy-objects-for-caching
# http://stackoverflow.com/questions/806151/how-to-hash-a-large-object-dataset-in-python
def hash_sha1_numpy_array(a):
'''
Hash a numpy array using sha1.
'''
import numpy as np
# conver to contigous in case the array has a different
# representation
a = np.ascontiguousarray(a)
# get a view from the array, this will help produce different hashes
# for arrays with same data but different shapes
a = a.view(np.uint8)
return hashlib.sha1(a).hexdigest()
|
edublancas/pipeline
|
pipeline/util.py
|
Python
|
mit
| 986
|
# -*- coding: utf-8 -*-
from factory import Sequence, LazyAttribute
from factory.alchemy import SQLAlchemyModelFactory
from fogspoon.core import db
from fogspoon.films import Film
from fogspoon.locations import Location, GeoLoc
class BaseFactory(SQLAlchemyModelFactory):
class Meta:
abstract = True
sqlalchemy_session = db.session
class FilmFactory(BaseFactory):
class Meta:
model = Film
id = Sequence(lambda n: n)
title = Sequence(lambda n: u'Gamma Film {0}'.format(n))
release_year = Sequence(lambda n: 1950 + n)
locations = LazyAttribute(lambda _: [LocationFactory()])
class LocationFactory(BaseFactory):
class Meta:
model = Location
id = Sequence(lambda n: n)
place_name = Sequence(lambda n: u'{0} Market St.'.format(n))
fun_fact = Sequence(lambda n: u'{0} Market St. was the place where Beta Film {1} was also shot'.format(n, n))
geo_locs = LazyAttribute(lambda n: [GeoLocFactory()])
class GeoLocFactory(BaseFactory):
class Meta:
model = GeoLoc
id = Sequence(lambda n: n)
service = Sequence(lambda n: u'Space! Deep Space {0}'.format(n))
altitude = Sequence(lambda n: 0.0 + float(n))
latitude = Sequence(lambda n: float(n*0.01) + 37.77928)
longitude = Sequence(lambda n: float(n*0.01) + -122.41922)
raw = Sequence(lambda n: u'Some raw payload from geothingy {0}'.format(n))
|
tkalus/fogspoon
|
tests/factories.py
|
Python
|
mit
| 1,416
|
"""
This tutorial shows how to use cleverhans.picklable_model
to create models that can be saved for evaluation later.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
import tensorflow as tf
from cleverhans.attacks import FastGradientMethod
from cleverhans.compat import flags
from cleverhans.dataset import MNIST
from cleverhans.loss import CrossEntropy
from cleverhans.serial import save
from cleverhans.utils_tf import model_eval, silence
from cleverhans.train import train
from cleverhans.utils import AccuracyReport, set_log_level
from cleverhans_tutorials.tutorial_models import make_basic_picklable_cnn
silence()
FLAGS = flags.FLAGS
NB_EPOCHS = 6
BATCH_SIZE = 128
LEARNING_RATE = 0.001
NB_FILTERS = 64
CLEAN_TRAIN = True
BACKPROP_THROUGH_ATTACK = False
def mnist_tutorial(
train_start=0,
train_end=60000,
test_start=0,
test_end=10000,
nb_epochs=NB_EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LEARNING_RATE,
clean_train=CLEAN_TRAIN,
testing=False,
backprop_through_attack=BACKPROP_THROUGH_ATTACK,
nb_filters=NB_FILTERS,
num_threads=None,
label_smoothing=0.1,
):
"""
MNIST cleverhans tutorial
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:param clean_train: perform normal training on clean examples only
before performing adversarial training.
:param testing: if true, complete an AccuracyReport for unit tests
to verify that performance is adequate
:param backprop_through_attack: If True, backprop through adversarial
example construction process during
adversarial training.
:param label_smoothing: float, amount of label smoothing for cross entropy
:return: an AccuracyReport object
"""
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Set logging level to see debug information
set_log_level(logging.DEBUG)
# Create TF session
if num_threads:
config_args = dict(intra_op_parallelism_threads=1)
else:
config_args = {}
sess = tf.Session(config=tf.ConfigProto(**config_args))
# Get MNIST test data
mnist = MNIST(
train_start=train_start,
train_end=train_end,
test_start=test_start,
test_end=test_end,
)
x_train, y_train = mnist.get_set("train")
x_test, y_test = mnist.get_set("test")
# Use Image Parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
# Train an MNIST model
train_params = {
"nb_epochs": nb_epochs,
"batch_size": batch_size,
"learning_rate": learning_rate,
}
eval_params = {"batch_size": batch_size}
fgsm_params = {"eps": 0.3, "clip_min": 0.0, "clip_max": 1.0}
rng = np.random.RandomState([2017, 8, 30])
def do_eval(preds, x_set, y_set, report_key, is_adv=None):
"""
Run the evaluation and print the results.
"""
acc = model_eval(sess, x, y, preds, x_set, y_set, args=eval_params)
setattr(report, report_key, acc)
if is_adv is None:
report_text = None
elif is_adv:
report_text = "adversarial"
else:
report_text = "legitimate"
if report_text:
print("Test accuracy on %s examples: %0.4f" % (report_text, acc))
if clean_train:
model = make_basic_picklable_cnn()
# Tag the model so that when it is saved to disk, future scripts will
# be able to tell what data it was trained on
model.dataset_factory = mnist.get_factory()
preds = model.get_logits(x)
assert len(model.get_params()) > 0
loss = CrossEntropy(model, smoothing=label_smoothing)
def evaluate():
"""
Run evaluation for the naively trained model on clean examples.
"""
do_eval(preds, x_test, y_test, "clean_train_clean_eval", False)
train(
sess,
loss,
x_train,
y_train,
evaluate=evaluate,
args=train_params,
rng=rng,
var_list=model.get_params(),
)
with sess.as_default():
save("clean_model.joblib", model)
print(
"Now that the model has been saved, you can evaluate it in a"
" separate process using `evaluate_pickled_model.py`. "
"You should get exactly the same result for both clean and "
"adversarial accuracy as you get within this program."
)
# Calculate training error
if testing:
do_eval(preds, x_train, y_train, "train_clean_train_clean_eval")
# Initialize the Fast Gradient Sign Method (FGSM) attack object and
# graph
fgsm = FastGradientMethod(model, sess=sess)
adv_x = fgsm.generate(x, **fgsm_params)
preds_adv = model.get_logits(adv_x)
# Evaluate the accuracy of the MNIST model on adversarial examples
do_eval(preds_adv, x_test, y_test, "clean_train_adv_eval", True)
# Calculate training error
if testing:
do_eval(preds_adv, x_train, y_train, "train_clean_train_adv_eval")
print("Repeating the process, using adversarial training")
# Create a new model and train it to be robust to FastGradientMethod
model2 = make_basic_picklable_cnn()
# Tag the model so that when it is saved to disk, future scripts will
# be able to tell what data it was trained on
model2.dataset_factory = mnist.get_factory()
fgsm2 = FastGradientMethod(model2, sess=sess)
def attack(x):
"""Return an adversarial example near clean example `x`"""
return fgsm2.generate(x, **fgsm_params)
loss2 = CrossEntropy(model2, smoothing=label_smoothing, attack=attack)
preds2 = model2.get_logits(x)
adv_x2 = attack(x)
if not backprop_through_attack:
# For the fgsm attack used in this tutorial, the attack has zero
# gradient so enabling this flag does not change the gradient.
# For some other attacks, enabling this flag increases the cost of
# training, but gives the defender the ability to anticipate how
# the atacker will change their strategy in response to updates to
# the defender's parameters.
adv_x2 = tf.stop_gradient(adv_x2)
preds2_adv = model2.get_logits(adv_x2)
def evaluate_adv():
"""
Evaluate the adversarially trained model.
"""
# Accuracy of adversarially trained model on legitimate test inputs
do_eval(preds2, x_test, y_test, "adv_train_clean_eval", False)
# Accuracy of the adversarially trained model on adversarial examples
do_eval(preds2_adv, x_test, y_test, "adv_train_adv_eval", True)
# Perform and evaluate adversarial training
train(
sess,
loss2,
x_train,
y_train,
evaluate=evaluate_adv,
args=train_params,
rng=rng,
var_list=model2.get_params(),
)
with sess.as_default():
save("adv_model.joblib", model2)
print(
"Now that the model has been saved, you can evaluate it in a "
"separate process using "
"`python evaluate_pickled_model.py adv_model.joblib`. "
"You should get exactly the same result for both clean and "
"adversarial accuracy as you get within this program."
" You can also move beyond the tutorials directory and run the "
" real `compute_accuracy.py` script (make sure cleverhans/scripts "
"is in your PATH) to see that this FGSM-trained "
"model is actually not very robust---it's just a model that trains "
" quickly so the tutorial does not take a long time"
)
# Calculate training errors
if testing:
do_eval(preds2, x_train, y_train, "train_adv_train_clean_eval")
do_eval(preds2_adv, x_train, y_train, "train_adv_train_adv_eval")
return report
def main(argv=None):
"""
Run the tutorial using command line flags
"""
from cleverhans_tutorials import check_installation
check_installation(__file__)
mnist_tutorial(
nb_epochs=FLAGS.nb_epochs,
batch_size=FLAGS.batch_size,
learning_rate=FLAGS.learning_rate,
clean_train=FLAGS.clean_train,
backprop_through_attack=FLAGS.backprop_through_attack,
nb_filters=FLAGS.nb_filters,
)
if __name__ == "__main__":
flags.DEFINE_integer("nb_filters", NB_FILTERS, "Model size multiplier")
flags.DEFINE_integer("nb_epochs", NB_EPOCHS, "Number of epochs to train model")
flags.DEFINE_integer("batch_size", BATCH_SIZE, "Size of training batches")
flags.DEFINE_float("learning_rate", LEARNING_RATE, "Learning rate for training")
flags.DEFINE_bool("clean_train", CLEAN_TRAIN, "Train on clean examples")
flags.DEFINE_bool(
"backprop_through_attack",
BACKPROP_THROUGH_ATTACK,
(
"If True, backprop through adversarial example "
"construction process during adversarial training"
),
)
tf.app.run()
|
cleverhans-lab/cleverhans
|
cleverhans_v3.1.0/cleverhans_tutorials/mnist_tutorial_picklable.py
|
Python
|
mit
| 10,006
|
from wrapper import Wrapper
class UberApi(Wrapper):
def __init__(self, user):
Wrapper.__init__(self, __name__.split('.').pop(), user)
|
whittlbc/jarvis
|
jarvis/api/uber.py
|
Python
|
mit
| 138
|
# Create your views here.
import django.contrib.auth
from django.contrib.auth.models import User, check_password
from django import forms
from django.http import HttpResponse,HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.http import Http404
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, logout
from django import forms
from djwed.wedding.email import email_invite_code
from djwed.wedding.models import Invitee, Guest
from djwed.wedding.settings import *
from datetime import datetime
def user_to_invitee(user):
return Invitee.objects.get(id=int(user.username[len(username_prefix):]))
def token_to_invitee(token):
return Invitee.objects.get(invite_code__exact=token.upper())
class InviteeAuthBackend:
def authenticate(self, token=None):
try:
inv = token_to_invitee(token)
except Invitee.DoesNotExist:
return None
user = inv.user()
user.backend = "djwed.wedding.auth.InviteeAuthBackend"
return user
def get_user(self, user_id):
try:
u = User.objects.get(pk=user_id)
if u.username[0:len(username_prefix)] == username_prefix:
return u
else:
return None
except User.DoesNotExist:
return None
class LoginForm(forms.Form):
token = forms.CharField(max_length=10)
def clean_token(self):
token = self.cleaned_data['token']
try:
inv = token_to_invitee(token)
except Invitee.DoesNotExist:
raise forms.ValidationError(u'Invalid login token')
return inv
class ReminderForm(forms.Form):
email = forms.EmailField()
def clean_email(self):
email = self.cleaned_data['email']
try:
guest = Guest.objects.get(email=email)
except Guest.DoesNotExist:
raise forms.ValidationError(u'No guest found with this email address')
return guest
def rsvp_logout(request):
logout(request)
return HttpResponseRedirect('/rsvp/')
def rsvp_login(request):
next = ''
if request.REQUEST.has_key('next'):
next = request.REQUEST['next']
if request.method == 'POST':
if 'login' in request.POST:
form = LoginForm(request.POST, prefix='login')
if form.is_valid():
inv = form.cleaned_data['token']
inv.last_visited = datetime.now()
inv.save()
u = inv.user()
u.backend = "djwed.wedding.auth.InviteeAuthBackend"
login(request, u)
if next:
return HttpResponseRedirect(next) # Redirect after POST
else:
return HttpResponseRedirect('/rsvp/') # Redirect after POST
else:
reminder_form = ReminderForm(prefix='reminder')
elif 'reminder' in request.POST:
reminder_form = ReminderForm(request.POST, prefix='reminder')
if reminder_form.is_valid():
guest = reminder_form.cleaned_data['email']
email_invite_code(guest)
reminder_form = ReminderForm(prefix='reminder')
form = LoginForm(prefix='login')
else:
form = LoginForm(prefix='login')
reminder_form = ReminderForm(prefix='reminder')
return render_to_response('login.html', {
'form': form,
'reminder_form': reminder_form,
'next': next,
})
def rsvp_login_from_token(request, invite_code, target="rsvp"):
u = InviteeAuthBackend().authenticate(invite_code)
if u:
inv = user_to_invitee(u)
inv.last_visited = datetime.now()
inv.save()
login(request, u)
return HttpResponseRedirect('/%s/'%(target,))
else:
return HttpResponseRedirect('/accounts/login/')
|
garyp/djwed
|
wedding/auth.py
|
Python
|
mit
| 3,976
|
#/###################/#
# Import modules
#
#ImportModules
import ShareYourSystem as SYS
#/###################/#
# Build the model
#
#Simulation time
SimulationTimeFloat=1000.
#SimulationTimeFloat=0.2
BrianingDebugVariable=0.1 if SimulationTimeFloat<0.5 else 25.
#A - transition matrix
JacobianTimeFloat = 10. #(ms)
A = (-1./float(JacobianTimeFloat)
)*SYS.numpy.array([[1.]])
#Define
MyPredicter=SYS.PredicterClass(
).mapSet(
{
'BrianingStepTimeFloat':0.05,
'-Populations':[
('|Sensor',{
'RecordingLabelVariable':[0],
#'BrianingDebugVariable':BrianingDebugVariable,
'-Interactions':{
'|Encod':{
#'BrianingDebugVariable':BrianingDebugVariable
}
}
}),
('|Agent',{
'RecordingLabelVariable':[0,1,2],
#'BrianingDebugVariable':BrianingDebugVariable,
'-Traces':{
'|U':{
'RecordingInitMeanVariable':0.,
'RecordingInitStdVariable':0.1,
}
},
'-Interactions':{
'|Fast':{
#'BrianingDebugVariable':BrianingDebugVariable
}
}
}),
('|Decoder',{
'RecordingLabelVariable':[0],
#'BrianingDebugVariable':BrianingDebugVariable
})
]
}
).predict(
_AgentUnitsInt=100,
_DynamicBool=False,
_JacobianVariable=A,
_CommandVariable = "#custom:#clock:250*ms:(0.5/"+str(
JacobianTimeFloat
)+")*mV*(int(t==250*ms)+int(t==500*ms))",
_RateTransferVariable='(1./<ThresFloat>)*mV*tanh((<ThresFloat>*(#CurrentStr))/(1.*mV))'.replace(
'<ThresFloat>',
'10.'
),
_DecoderVariable='#array',
_DecoderStdFloat=7.,
_InteractionStr="Rate",
#_EncodPerturbStdFloat=5./100.,
_FastPerturbStdFloat=0.04
).simulate(
SimulationTimeFloat
)
#/###################/#
# View
#
MyPredicter.view(
).mapSet(
{
'-Panels':[
(
'|Run',
[
(
'-Charts',
[
(
'|Sensor_U',
{
'PyplotingLegendDict':{
'fontsize':10,
'ncol':2
}
}
),
(
'|Agent_U',
{
'PyplotingLegendDict':{
'fontsize':10,
'ncol':2
}
}
),
(
'|Decoder_U',
{
'PyplotingLegendDict':{
'fontsize':10,
'ncol':2
}
}
)
]
)
]
)
]
}
).pyplot(
).show()
#/###################/#
# Print
#
#Definition the AttestedStr
print('MyPredicter is ')
SYS._print(MyPredicter)
|
Ledoux/ShareYourSystem
|
Pythonlogy/ShareYourSystem/Specials/Predicters/Predicter/tests/03_tests_chaotic/05_01_test_rate_chaotic_dense_ExampleCell.py
|
Python
|
mit
| 2,520
|
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test compact blocks (BIP 152).
Only testing Version 1 compact blocks (txids)
"""
import random
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import (
BlockTransactions,
BlockTransactionsRequest,
calculate_shortid,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
FromHex,
HeaderAndShortIDs,
msg_block,
msg_blocktxn,
msg_cmpctblock,
msg_getblocktxn,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_sendcmpct,
msg_sendheaders,
msg_tx,
NODE_NETWORK,
P2PHeaderAndShortIDs,
PrefilledTransaction,
ToHex,
)
from test_framework.mininode import (
mininode_lock,
P2PInterface,
)
from test_framework.script import CScript, OP_TRUE
from test_framework.test_framework import BitcoinTestFramework
from test_framework.txtools import pad_tx
from test_framework.util import assert_equal, sync_blocks, wait_until
# TestP2PConn: A peer we use to send messages to bitcoind, and store responses.
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.last_sendcmpct = []
self.block_announced = False
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
def on_sendcmpct(self, message):
self.last_sendcmpct.append(message)
def on_cmpctblock(self, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(
self.last_message["cmpctblock"].header_and_shortids.header.sha256)
def on_headers(self, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
def on_inv(self, message):
for x in self.last_message["inv"].inv:
if x.type == 2:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
wait_until(self.received_block_announcement,
timeout=30, lock=mininode_lock)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
wait_until(received_hash, timeout=timeout, lock=mininode_lock)
def send_await_disconnect(self, message, timeout=30):
"""Sends a message to the node and wait for disconnect.
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
wait_until(lambda: not self.is_connected,
timeout=timeout, lock=mininode_lock)
class CompactBlocksTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-txindex"]]
self.utxos = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def build_block_on_tip(self, node):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(
int(tip, 16), create_coinbase(height + 1), mtp + 1)
block.nVersion = 4
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
# Doesn't matter which node we use, just use node0.
block = self.build_block_on_tip(self.nodes[0])
self.test_node.send_and_ping(msg_block(block))
assert int(self.nodes[0].getbestblockhash(), 16) == block.sha256
self.nodes[0].generate(100)
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.test_node.send_and_ping(msg_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
return
# Test "sendcmpct" (between peers preferring the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, node, test_node,
preferred_version, old_node=None):
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
wait_until(received_sendcmpct, timeout=30, lock=mininode_lock)
with mininode_lock:
# Check that the first version received is the preferred one
assert_equal(
test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert peer.block_announced
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(
node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(
node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = 999 # was: preferred_version+1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(
node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(
node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(
node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(
node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(
node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after sending a version-1, announce=false message.
sendcmpct.version = preferred_version - 1
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(
node, test_node, lambda p: "cmpctblock" in p.last_message)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(
node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
sendcmpct.version = 1 # preferred_version-1
sendcmpct.announce = True
old_node.send_and_ping(sendcmpct)
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(
node, old_node, lambda p: "cmpctblock" in p.last_message)
# This test actually causes bitcoind to (reasonably!) disconnect us, so do
# this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block))
assert_equal(
int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# bitcoind's choice of nonce.
def test_compactblock_construction(self, node, test_node):
# Generate a bunch of transactions.
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
test_node.wait_for_block_announcement(tip)
# Make sure we will receive a fast-announce compact block
self.request_cb_announcements(test_node, node)
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock(
"{:02x}".format(block_hash), False))
for tx in block.vtx:
tx.calc_sha256()
block.rehash()
# Wait until the block was announced (via compact blocks)
wait_until(test_node.received_block_announcement,
timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert "cmpctblock" in test_node.last_message
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(
test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(
header_and_shortids, block_hash, block)
# Now fetch the compact block using a normal non-announce getdata
with mininode_lock:
test_node.clear_block_announcement()
inv = CInv(4, block_hash) # 4 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement,
timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert "cmpctblock" in test_node.last_message
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(
test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(
header_and_shortids, block_hash, block)
def check_compactblock_construction_from_block(
self, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert len(header_and_shortids.prefilled_txn) >= 1
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn)
+ len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that bitcoind requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
def test_compactblock_requests(self, node, test_node, version):
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node)
with mininode_lock:
test_node.last_message.pop("getdata", None)
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
wait_until(lambda: "getheaders" in test_node.last_message,
timeout=30, lock=mininode_lock)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
wait_until(lambda: "getdata" in test_node.last_message,
timeout=30, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert_equal(test_node.last_message["getdata"].inv[0].type, 4)
assert_equal(
test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
comp_block.shortids = [
calculate_shortid(k0, k1, coinbase_hash)]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert "getblocktxn" in test_node.last_message
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute(
)
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
# Note that num_transactions is number of transactions not including the
# coinbase.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE])))
pad_tx(tx)
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
ordered_txs = block.vtx
block.vtx = [block.vtx[0]] + \
sorted(block.vtx[1:], key=lambda tx: tx.get_id())
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block, ordered_txs
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, node, test_node, version):
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute(
)
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block, ordered_txs = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append(
[ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_blocktxn()
msg_bt.block_transactions = BlockTransactions(
block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block, ordered_txs = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append(
[ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
comp_block.initialize_from_block(
block, prefill_list=[0, 1, 5])
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(
block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block, ordered_txs = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append(
[ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(ordered_txs[1]))
assert ordered_txs[1].hash in node.getrawmempool()
test_node.send_and_ping(msg_tx(ordered_txs[1]))
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
prefill_list = [0, 1, 2, 3, 4, 5]
prefill_list.remove(block.vtx.index(ordered_txs[1]))
expected_index = block.vtx.index(ordered_txs[-1])
prefill_list.remove(expected_index)
comp_block.initialize_from_block(block, prefill_list=prefill_list)
test_getblocktxn_response(comp_block, test_node, [expected_index])
msg_bt.block_transactions = BlockTransactions(
block.sha256, [ordered_txs[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block, ordered_txs = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append(
[ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue])
for tx in ordered_txs[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert tx.hash in mempool
# Clear out last request.
with mininode_lock:
test_node.last_message.pop("getblocktxn", None)
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0])
test_tip_after_message(
node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
# Shouldn't have gotten a request for any transaction
assert "getblocktxn" not in test_node.last_message
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, node, test_node, version):
if (len(self.utxos) == 0):
self.make_utxos()
utxo = self.utxos.pop(0)
block, ordered_txs = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append(
[ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in ordered_txs[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in ordered_txs[1:6]:
assert tx.hash in mempool
# Send compact block
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0])
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indices = []
with mininode_lock:
assert "getblocktxn" in test_node.last_message
absolute_indices = test_node.last_message["getblocktxn"].block_txn_request.to_absolute(
)
expected_indices = []
for i in [6, 7, 8, 9, 10]:
expected_indices.append(block.vtx.index(ordered_txs[i]))
assert_equal(absolute_indices, sorted(expected_indices))
# Now give an incorrect response.
# Note that it's possible for bitcoind to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change was made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_blocktxn()
msg.block_transactions = BlockTransactions(
block.sha256, [ordered_txs[5]] + ordered_txs[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
wait_until(lambda: "getdata" in test_node.last_message,
timeout=10, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert test_node.last_message["getdata"].inv[0].type == 2
assert_equal(
test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Deliver the block
test_node.send_and_ping(msg_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, node, test_node, version):
# bitcoind will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(
int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(
sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
wait_until(lambda: "blocktxn" in test_node.last_message,
timeout=10, lock=mininode_lock)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(
block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(
0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
test_node.last_message.pop("blocktxn", None)
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(
int(block_hash, 16), [0])
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(
test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
def test_compactblocks_not_at_tip(self, node, test_node):
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement,
timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "cmpctblock" in test_node.last_message,
timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement,
timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "block" in test_node.last_message,
timeout=30, lock=mininode_lock)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(
test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height - 5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert found
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
assert "blocktxn" not in test_node.last_message
def test_end_to_end_block_relay(self, node, listeners):
utxo = self.utxos.pop(0)
block, _ = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
node.submitblock(ToHex(block))
for l in listeners:
wait_until(lambda: l.received_block_announcement(),
timeout=30, lock=mininode_lock)
with mininode_lock:
for l in listeners:
assert "cmpctblock" in l.last_message
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256(
)
assert_equal(
l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, node, test_node):
assert len(self.utxos)
utxo = self.utxos[0]
block, ordered_txs = self.build_block_with_transactions(node, utxo, 5)
block.vtx.remove(ordered_txs[3])
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4])
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert int(node.getbestblockhash(), 16) is not block.sha256
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer, node, version=1):
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = version
msg.announce = True
peer.send_and_ping(msg)
def test_compactblock_reconstruction_multiple_peers(
self, node, stalling_peer, delivery_peer):
assert len(self.utxos)
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block, _ = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert tx.hash in mempool
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append(
[block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now test that delivering an invalid compact block won't break relay
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
# TODO: modify txhash in a way that doesn't impact txid.
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
# Because txhash isn't modified, we end up reconstructing the same block
# assert int(node.getbestblockhash(), 16) != block.sha256
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn())
self.ex_softfork_node = self.nodes[1].add_p2p_connection(
TestP2PConn(), services=NODE_NETWORK)
self.old_node = self.nodes[1].add_p2p_connection(
TestP2PConn(), services=NODE_NETWORK)
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
self.log.info("Running tests:")
self.log.info("\tTesting SENDCMPCT p2p message... ")
self.test_sendcmpct(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_sendcmpct(
self.nodes[1], self.ex_softfork_node, 1, old_node=self.old_node)
sync_blocks(self.nodes)
self.log.info("\tTesting compactblock construction...")
self.test_compactblock_construction(self.nodes[0], self.test_node)
sync_blocks(self.nodes)
self.test_compactblock_construction(
self.nodes[1], self.ex_softfork_node)
sync_blocks(self.nodes)
self.log.info("\tTesting compactblock requests... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_compactblock_requests(
self.nodes[1], self.ex_softfork_node, 2)
sync_blocks(self.nodes)
self.log.info("\tTesting getblocktxn requests...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_requests(self.nodes[1], self.ex_softfork_node, 2)
sync_blocks(self.nodes)
self.log.info("\tTesting getblocktxn handler...")
self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_handler(self.nodes[1], self.ex_softfork_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
sync_blocks(self.nodes)
self.log.info(
"\tTesting compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
sync_blocks(self.nodes)
self.test_compactblocks_not_at_tip(
self.nodes[1], self.ex_softfork_node)
self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
sync_blocks(self.nodes)
self.log.info("\tTesting handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_incorrect_blocktxn_response(
self.nodes[1], self.ex_softfork_node, 2)
sync_blocks(self.nodes)
# End-to-end block relay tests
self.log.info("\tTesting end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0])
self.request_cb_announcements(self.old_node, self.nodes[1])
self.request_cb_announcements(
self.ex_softfork_node, self.nodes[1], version=2)
self.test_end_to_end_block_relay(
self.nodes[0], [self.ex_softfork_node, self.test_node, self.old_node])
self.test_end_to_end_block_relay(
self.nodes[1], [self.ex_softfork_node, self.test_node, self.old_node])
self.log.info("\tTesting handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node)
self.test_invalid_tx_in_compactblock(
self.nodes[1], self.ex_softfork_node)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node)
self.log.info(
"\tTesting reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(
self.nodes[1], self.ex_softfork_node, self.old_node)
sync_blocks(self.nodes)
self.log.info("\tTesting invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
|
ftrader-bitcoinabc/bitcoin-abc
|
test/functional/p2p_compactblocks.py
|
Python
|
mit
| 39,944
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Main Menu
"""
from icons import Icons
from pyqode.qt import QtWidgets
from pyqode.core import widgets
from dockTemplate import DockBase
class MainMenuBar(QtWidgets.QMenuBar):
def __init__(self, parent=None):
super(MainMenuBar, self).__init__(parent)
self.mainWindow = parent # parent should be a MainWindow
self.docks = []
# sub-menus
self.openRecMenu = \
widgets.MenuRecentFiles(self,
self.mainWindow.recent_files_manager)
# must load icons here
Icons.load()
# Actions --
# file actions
self.newAct = QtWidgets.QAction(Icons.new, "&New", self)
self.newAct.setShortcut('Ctrl+N')
self.openAct = QtWidgets.QAction(Icons.openFile, "&Open", self)
self.openAct.setShortcut('Ctrl+O')
self.openRecAct = QtWidgets.QAction("Open &Recent", self)
self.openRecAct.setMenu(self.openRecMenu)
self.saveAct = QtWidgets.QAction(Icons.save, "&Save", self)
self.saveAct.setShortcut('Ctrl+S')
self.saveAsAct = QtWidgets.QAction(Icons.saveAs, "Save &As...", self)
self.saveAsAct.setShortcut('Ctrl+Shift+S')
self.saveAllAct = QtWidgets.QAction(Icons.saveAll, "Save A&ll", self)
self.saveAllAct.setShortcut('Ctrl+Shift+A')
self.closeAllAct = QtWidgets.QAction("Close All", self)
self.exitAct = QtWidgets.QAction("E&xit", self)
self.exitAct.setShortcut('Alt+F4')
# tool actions
self.interpAct = QtWidgets.QAction("&Interpreter Config...", self)
self.runConfigAct = QtWidgets.QAction(Icons.runConfig,
"Run &Config...", self)
self.runAct = QtWidgets.QAction(Icons.run, "&Run", self)
self.runAct.setShortcut('F5')
# Top-level menus
self.fileMenu = QtWidgets.QMenu("&File", self)
self.fileMenu.addAction(self.newAct)
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.openRecAct)
self.fileMenu.addAction(self.closeAllAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.saveAct)
self.fileMenu.addAction(self.saveAsAct)
self.fileMenu.addAction(self.saveAllAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.addAction(self.fileMenu.menuAction()) # add to menubar
self.editMenu = \
QtWidgets.QMenu("&Edit", self) # edit menu updated dynamically
# self.editMenu.addActions(self.mainWindow.editor.actions())
self.addAction(self.editMenu.menuAction())
self.toolsMenu = QtWidgets.QMenu("&Tools", self)
self.toolsMenu.addAction(self.interpAct)
self.toolsMenu.addAction(self.runConfigAct)
self.toolsMenu.addAction(self.runAct)
self.toolsMenu.addSeparator()
self.addAction(self.toolsMenu.menuAction())
self.viewMenu = QtWidgets.QMenu("&View", self)
self.addAction(self.viewMenu.menuAction())
self.helpMenu = QtWidgets.QMenu("&Help", self)
self.addAction(self.helpMenu.menuAction())
def addDock(self, dock):
assert isinstance(dock, DockBase)
self.viewMenu.addAction(dock.viewAction)
self.docks.append(dock)
def updateViewMenu(self):
for dock in self.docks:
dock.updateStatus()
|
Zachacious/PyCreator
|
PyCreator/UI/mainMenu.py
|
Python
|
mit
| 3,462
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 1 19:11:52 2017
@author: mariapanteli
"""
import pytest
import numpy as np
from sklearn.model_selection import train_test_split
import scripts.classification as classification
def test_confusion_matrix():
X = np.random.randn(100, 3)
# create 2 classes by shifting the entries of half the samples
X[-50:, :] = X[-50:, :] + 10
Y = np.concatenate([np.repeat('a', 50), np.repeat('b', 50)])
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, train_size=0.6, random_state=1, stratify=Y)
accuracy, _, _ = classification.confusion_matrix(X_train, Y_train, X_test, Y_test)
# expect perfect accuracy for this 'easy' dataset
assert accuracy == 1.0
|
mpanteli/music-outliers
|
tests/test_classification.py
|
Python
|
mit
| 731
|
"""
Problem Statement: https://community.topcoder.com/stat?c=problem_statement&pm=1918&rd=5006
"""
def getOrdering(heights, blooms, wilt):
pass
|
MFry/pyAlgoDataStructures
|
Top_Coder/Dynamic Programming/TCCC04_FlowerGarden.py
|
Python
|
mit
| 154
|
from django.test import TestCase
from django.shortcuts import render
from django.forms.forms import BoundField
from authtools.forms import UserCreationForm
from tunes.templatetags.add_css import add_class_to_field
class TunesTemplateTagsTestCase(TestCase):
def test_add_class_to_field(self):
"""
Confirm we can add a class to a form field
"""
form = UserCreationForm()
field = BoundField(form, form.fields['email'], 'email')
field_with_class = add_class_to_field(field, 'form-control')
self.assertIn('form-control', field_with_class)
|
kevinharvey/ci-jmad
|
tunes/tests/test_templatetags.py
|
Python
|
mit
| 599
|
"""
WSGI config for receiver project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "receiver.settings")
#from django.core.wsgi import get_wsgi_application
#application = get_wsgi_application()
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
|
ministryofjustice/courtfinder-govuk-publisher-test
|
receiver/wsgi.py
|
Python
|
mit
| 517
|
import cPickle as pckl
import codecs
import argparse
VOCAB_PATH = "../rsc/vocab.pickle"
def main():
""" Main method. """
argument_parser = init_argument_parser()
args = argument_parser.parse_args()
print args
# Save vocabulary to a pickle file
if args.write:
args_dict = vars(args)
viable_options = {"min_length", "max_length", "mwes"}
options = {
option: args_dict[option] for option in args_dict
if option in viable_options
}
total_vocab = merge_vocabularies(args.input)
print len(total_vocab)
save_vocab(total_vocab, options=options)
# Load vocabulary from a pickle file
elif args.read:
total_vocab = load_vocab()
print total_vocab, len(total_vocab)
def merge_vocabularies(paths):
"""
Merges multiple files containing vocabulary.
Args:
paths (list): List of path to input files.
Returns:
set: Set of all words in vocabulary.
"""
assert len(paths) > 0
total_vocab = set()
for path in paths:
total_vocab = total_vocab.union(read_vocabulary(path))
return total_vocab
def save_vocab(vocab, options={}):
"""
Saves vocabulary to a pickle file.
Args:
vocab (set): Set of all words in vocabulary.
options (dict): Filtering options.
"""
global VOCAB_PATH
encoded_vocab = set()
for entry in vocab:
try:
if False not in check_constraints(entry, options):
print entry
encoded_vocab.add(entry.decode('latin-1'))
except UnicodeEncodeError:
continue
with open(VOCAB_PATH, 'wb') as vocab_file:
pckl.dump(encoded_vocab, vocab_file)
def check_constraints(word, options):
"""
Enforce filtering constraints on the vocabulary.
Args:
word (str): Current vocabulary to be checked.
options (dict): Filtering options.
Returns:
list: List of filtering results with booleans for each check.
"""
# Defining checks
def _min_length_check(_word, min_length):
if len(_word) < min_length:
return False
return True
def _max_length_check(_word, max_length):
if len(_word) > max_length:
return False
return True
def _multi_word_check(_word, mwes):
return True if mwes else (' ' not in _word)
# Enforcing constraints
checks = {
"min_length": _min_length_check,
"max_length": _max_length_check,
"mwes": _multi_word_check
}
results = []
for option in options:
arg = options[option]
results.append(checks[option](word, arg))
return results
def load_vocab():
"""
Load vocabulary from pickle file.
Returns:
set: Set of all words in vocabulary.
"""
global VOCAB_PATH
with open(VOCAB_PATH, 'rb') as vocab_file:
vocab = pckl.load(vocab_file)
print vocab
decoded_vocab = set()
for entry in vocab:
decoded_vocab.add(entry.encode('latin-1'))
return decoded_vocab
def read_vocabulary(vocab_inpath):
"""
Read a vocabulary file with one word per line.
Args:
vocab_inpath (str): Path to vocabulary file.
Returns:
set: Set of all words in vocabulary.
"""
vocab = set()
with codecs.open(vocab_inpath, 'rb', 'utf-8') as vocab_infile:
line = vocab_infile.readline()
while line:
vocab.add(line.strip())
line = vocab_infile.readline()
return vocab
def init_argument_parser():
"""
Initialize the argument parser for this script.
Returns:
argparse.ArgumentParser: ArguementParser object
"""
argument_parser = argparse.ArgumentParser()
# Basic arguments
argument_parser.add_argument(
'--input',
nargs='+',
help='Paths to vocabulary files.'
)
argument_parser.add_argument(
'-r',
'--read',
action='store_true',
help='Enable reading mode.'
)
argument_parser.add_argument(
'-w',
'--write',
action='store_true',
help='Enable writing mode.'
)
# Filtering options
argument_parser.add_argument(
'--min',
type=int,
help='Minimum length of a word.'
)
argument_parser.add_argument(
'--max',
type=int,
help='Maximum length of a word.'
)
argument_parser.add_argument(
'--mwes',
action='store_true',
default=False,
help="Are multi-word entries allowed or not?"
)
return argument_parser
if __name__ == "__main__":
main()
|
Kaleidophon/doppelmoppelbot
|
misc/create_vocab.py
|
Python
|
mit
| 4,734
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_irule
short_description: Manage iRules across different modules on a BIG-IP
description:
- Manage iRules across different modules on a BIG-IP device.
version_added: "1.0.0"
options:
content:
description:
- When used instead of B(src), sets the contents of an iRule directly to
the specified value. This is for simple values, but can be used with
lookup plugins for anything complex or with formatting. Either one
of C(src) or C(content) must be provided.
type: str
module:
description:
- The BIG-IP module to which the iRule should be added.
type: str
required: True
choices:
- ltm
- gtm
name:
description:
- The name of the iRule.
type: str
required: True
src:
description:
- The iRule file to interpret and upload to the BIG-IP. Either one
of C(src) or C(content) must be provided.
type: path
state:
description:
- Whether the iRule should exist or not.
type: str
choices:
- present
- absent
default: present
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Add the iRule contained in template irule.tcl to the LTM module
bigip_irule:
content: "{{ lookup('template', 'irule.tcl') }}"
module: ltm
name: MyiRule
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Add the iRule contained in static file irule.tcl to the LTM module
bigip_irule:
module: ltm
name: MyiRule
src: irule.tcl
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
module:
description: The module that the iRule was added to.
returned: changed and success
type: str
sample: gtm
src:
description: The filename that included the iRule source.
returned: changed and success, when provided
type: str
sample: /opt/src/irules/example1.tcl
content:
description: The content of the iRule that was managed.
returned: changed and success
type: str
sample: "when LB_FAILED { set wipHost [LB::server addr] }"
'''
import os
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec
)
from ..module_utils.icontrol import tmos_version
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'apiAnonymous': 'content',
}
updatables = [
'content',
]
api_attributes = [
'apiAnonymous',
]
returnables = [
'content', 'src', 'module',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def content(self):
if self._values['content'] is None:
result = self.src_content
else:
result = self._values['content']
return str(result).strip()
@property
def src(self):
if self._values['src'] is None:
return None
return self._values['src']
@property
def src_content(self):
if not os.path.exists(self._values['src']):
raise F5ModuleError(
"The specified 'src' was not found."
)
with open(self._values['src']) as f:
result = f.read()
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
self.kwargs = kwargs
def exec_module(self):
if self.module.params['module'] == 'ltm':
manager = self.get_manager('ltm')
elif self.module.params['module'] == 'gtm':
manager = self.get_manager('gtm')
else:
raise F5ModuleError(
"An unknown iRule module type was specified"
)
return manager.exec_module()
def get_manager(self, type):
if type == 'ltm':
return LtmManager(**self.kwargs)
elif type == 'gtm':
return GtmManager(**self.kwargs)
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.have = None
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state in ["present"]:
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def present(self):
if not self.want.content and not self.want.src:
raise F5ModuleError(
"Either 'content' or 'src' must be provided"
)
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
if not self.exists():
raise F5ModuleError("Failed to create the iRule")
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the iRule")
return True
class LtmManager(BaseManager):
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/rule/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/rule/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/rule/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/rule/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return ApiParameters(params=response)
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/rule/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status in [200, 201]:
return True
raise F5ModuleError(response.content)
class GtmManager(BaseManager):
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/rule/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/gtm/rule/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/gtm/rule/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/rule/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return ApiParameters(params=response)
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/rule/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status in [200, 201]:
return True
raise F5ModuleError(response.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
content=dict(),
src=dict(
type='path',
),
name=dict(required=True),
module=dict(
required=True,
choices=['gtm', 'ltm']
),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['content', 'src']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
F5Networks/f5-ansible-modules
|
ansible_collections/f5networks/f5_modules/plugins/modules/bigip_irule.py
|
Python
|
mit
| 16,597
|
import numpy
from six import moves
import chainer
from chainer import cuda
from chainer import function
from chainer.utils import conv
from chainer.utils import type_check
from chainer import variable
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.cudnn.cudnn
_fwd_pref = libcudnn.CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT
_bwd_filter_pref = \
libcudnn.CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT
_bwd_data_pref = \
libcudnn.CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
class DilatedConvolution2DFunction(function.Function):
def __init__(self, stride=1, pad=0, dilate=1, cover_all=False,
requires_x_grad=True):
self.sy, self.sx = _pair(stride)
self.ph, self.pw = _pair(pad)
self.dy, self.dx = _pair(dilate)
self.cover_all = cover_all
self.requires_x_grad = requires_x_grad
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type = in_types[0]
w_type = in_types[1]
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim == 4,
w_type.ndim == 4,
x_type.shape[1] == w_type.shape[1],
)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype == x_type.dtype,
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def forward_cpu(self, inputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
if not all([isinstance(i, numpy.ndarray) for i in inputs]):
if b is not None:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}, type(b): {2}'
.format(type(W), type(x), type(b)))
else:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}'
.format(type(W), type(x)))
kh, kw = W.shape[2:]
self.col = conv.im2col_cpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
y = numpy.tensordot(
self.col, W, ((1, 2, 3), (1, 2, 3))).astype(x.dtype, copy=False)
if b is not None:
y += b
return numpy.rollaxis(y, 3, 1),
def forward_gpu(self, inputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
if not all([isinstance(i, cuda.ndarray) for i in inputs]):
if b is not None:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}, type(b): {2}'
.format(type(W), type(x), type(b)))
else:
raise ValueError('numpy and cupy must not be used together\n'
'type(W): {0}, type(x): {1}'
.format(type(W), type(x)))
out_c, _, kh, kw = W.shape
n, c, h, w = x.shape
dkh, dkw = kh + (kh - 1) * (self.dy - 1), kw + (kw - 1) * (self.dx - 1)
out_h = conv.get_conv_outsize(h, kh, self.sy, self.ph,
cover_all=self.cover_all, d=self.dy)
out_w = conv.get_conv_outsize(w, kw, self.sx, self.pw,
cover_all=self.cover_all, d=self.dx)
y = cuda.cupy.zeros((n, out_c, out_h, out_w), dtype=x.dtype)
if (not self.cover_all and chainer.should_use_cudnn('>=auto') and
x.dtype == W.dtype):
pad_x = cuda.cupy.zeros((n, c, h + 2 * self.ph, w + 2 * self.pw),
dtype=x.dtype)
pad_x[:, :, self.ph:self.ph + h, self.pw:self.pw + w] = x
out_h_s1 = h + 2 * self.ph - dkh + 1
out_w_s1 = w + 2 * self.pw - dkw + 1
for j in moves.range(kh):
for i in moves.range(kw):
xji = cuda.cupy.ascontiguousarray(
pad_x[:, :,
j * self.dy:j * self.dy + out_h_s1,
i * self.dx:i * self.dx + out_w_s1])
Wji = cuda.cupy.ascontiguousarray(
W[:, :, j:j + 1, i:i + 1])
if i == 0 and j == 0:
handle = cudnn.get_handle()
xji_desc = cudnn.create_tensor_descriptor(xji)
y_desc = cudnn.create_tensor_descriptor(y)
self.filter_desc = cudnn.create_filter_descriptor(Wji)
self.conv_desc = cudnn.create_convolution_descriptor(
(0, 0), (self.sy, self.sx), xji.dtype)
workspace_size = cuda.get_max_workspace_size()
workspace = cuda.cupy.empty(
(workspace_size,), dtype='b')
algo = libcudnn.getConvolutionForwardAlgorithm(
handle, xji_desc.value, self.filter_desc.value,
self.conv_desc.value, y_desc.value, _fwd_pref,
workspace_size)
oz_dtype = 'd' if x.dtype == 'd' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
libcudnn.convolutionForward(
handle, one.data, xji_desc.value, xji.data.ptr,
self.filter_desc.value, Wji.data.ptr,
self.conv_desc.value, algo, workspace.data.ptr,
workspace_size, one.data, y_desc.value, y.data.ptr)
if b is not None:
b = cuda.cupy.ascontiguousarray(b)
self.bias_desc = cudnn.create_tensor_descriptor(
b[None, :, None, None])
cudnn.add_tensor(
handle, one.data, self.bias_desc.value, b.data.ptr,
one.data, y_desc.value, y.data.ptr)
else:
# Implementation using im2col
self.col = conv.im2col_gpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
y = cuda.cupy.tensordot(
self.col, W, ((1, 2, 3), (1, 2, 3))).astype(x.dtype,
copy=False)
# TODO(beam2d): Support unshared bias
if b is not None:
y += b
y = cuda.cupy.rollaxis(y, 3, 1)
return y,
def backward_cpu(self, inputs, grad_outputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
gy = grad_outputs[0]
h, w = x.shape[2:]
gW = numpy.tensordot(
gy, self.col, ((0, 2, 3), (0, 4, 5))).astype(W.dtype, copy=False)
if not self.requires_x_grad:
gx = None
else:
gcol = numpy.tensordot(W, gy, (0, 1)).astype(x.dtype, copy=False)
gcol = numpy.rollaxis(gcol, 3)
gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw,
h, w, dy=self.dy, dx=self.dx)
if b is None:
return gx, gW
else:
gb = gy.sum(axis=(0, 2, 3))
return gx, gW, gb
def backward_gpu(self, inputs, grad_outputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
gy = grad_outputs[0]
_, out_c, out_h, out_w = gy.shape
n, c, h, w = x.shape
kh, kw = W.shape[2:]
dkh, dkw = kh + (kh - 1) * (self.dy - 1), kw + (kw - 1) * (self.dx - 1)
gW = cuda.cupy.empty_like(W)
if (not self.cover_all and chainer.should_use_cudnn('>=auto') and
x.dtype == W.dtype):
pad_x = cuda.cupy.zeros(
(n, c, h + 2 * self.ph, w + 2 * self.pw), dtype=x.dtype)
pad_x[:, :, self.ph:self.ph + h, self.pw:self.pw + w] = x
out_h_s1 = h + 2 * self.ph - dkh + 1
out_w_s1 = w + 2 * self.pw - dkw + 1
out_sh = out_h + (out_h - 1) * (self.sy - 1)
out_sw = out_w + (out_w - 1) * (self.sx - 1)
gy_ph = (h + dkh - out_sh - 1) / 2
gy_pw = (w + dkw - out_sw - 1) / 2
pad_gy = cuda.cupy.zeros(
(n, out_c, h + dkh - 1, w + dkw - 1), dtype=x.dtype)
pad_gy[:, :,
gy_ph:gy_ph + out_sh:self.sy,
gy_pw:gy_pw + out_sw:self.sx] = gy
gx = None
for j in moves.range(kh):
for i in moves.range(kw):
xji = cuda.cupy.ascontiguousarray(
pad_x[:, :,
j * self.dy:j * self.dy + out_h_s1,
i * self.dx:i * self.dx + out_w_s1])
gyji = cuda.cupy.ascontiguousarray(
pad_gy[:, :,
j * self.dy:j * self.dy + h,
i * self.dx:i * self.dx + w])
Wji = cuda.cupy.ascontiguousarray(
W[:, :, -1::-1, -1::-1][:, :, j:j + 1, i:i + 1])
if i == 0 and j == 0:
x = cuda.cupy.ascontiguousarray(x)
gy = cuda.cupy.ascontiguousarray(gy)
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(x)
xji_desc = cudnn.create_tensor_descriptor(xji)
gy_desc = cudnn.create_tensor_descriptor(gy)
gyji_desc = cudnn.create_tensor_descriptor(gyji)
conv_desc_data = cudnn.create_convolution_descriptor(
(0, 0), (1, 1), xji.dtype)
oz_dtype = 'd' if x.dtype == 'd' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
zero = numpy.array(0, dtype=oz_dtype).ctypes
if self.requires_x_grad:
gx = cuda.cupy.zeros_like(x)
gWji = cuda.cupy.empty((out_c, c, 1, 1), dtype=W.dtype)
workspace_size = cuda.get_max_workspace_size()
workspace = cuda.cupy.empty(
(workspace_size,), dtype='b')
algo_filter = (
libcudnn.getConvolutionBackwardFilterAlgorithm(
handle, xji_desc.value, gy_desc.value,
self.conv_desc.value,
self.filter_desc.value,
_bwd_filter_pref, workspace_size))
algo_data = (
libcudnn.getConvolutionBackwardDataAlgorithm(
handle, self.filter_desc.value,
gyji_desc.value, conv_desc_data.value,
x_desc.value, _bwd_data_pref,
workspace_size))
libcudnn.convolutionBackwardFilter_v3(
handle, one.data, xji_desc.value, xji.data.ptr,
gy_desc.value, gy.data.ptr, self.conv_desc.value,
algo_filter, workspace.data.ptr, workspace_size,
zero.data, self.filter_desc.value, gWji.data.ptr)
if self.requires_x_grad:
libcudnn.convolutionBackwardData_v3(
handle, one.data, self.filter_desc.value,
Wji.data.ptr, gyji_desc.value,
gyji.data.ptr, conv_desc_data.value,
algo_data, workspace.data.ptr, workspace_size,
one.data, x_desc.value, gx.data.ptr)
gW[:, :, j:j + 1, i:i + 1] = gWji
if b is not None:
gb = cuda.cupy.empty_like(b)
libcudnn.convolutionBackwardBias(
handle, one.data, gy_desc.value, gy.data.ptr,
zero.data, self.bias_desc.value, gb.data.ptr)
else:
gW = cuda.cupy.tensordot(
gy, self.col, ((0, 2, 3), (0, 4, 5))).astype(W.dtype,
copy=False)
if not self.requires_x_grad:
gx = None
else:
gcol = cuda.cupy.tensordot(W, gy, (0, 1)).astype(x.dtype,
copy=False)
gcol = cuda.cupy.rollaxis(gcol, 3)
gx = conv.col2im_gpu(gcol, self.sy, self.sx, self.ph, self.pw,
h, w, dy=self.dy, dx=self.dx)
if b is not None:
gb = gy.sum(axis=(0, 2, 3))
if b is None:
return gx, gW
else:
return gx, gW, gb
def dilated_convolution_2d(x, W, b=None, stride=1, pad=0, dilate=1,
cover_all=False):
"""Two-dimensional dilated convolution function.
This is an implementation of two-dimensional dilated convolution
in ConvNets.
It takes three variables: the input image ``x``, the filter weight ``W``,
and the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output,
respectively.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`k_H` and :math:`k_W` are the height and width of the filters,
respectively.
Args:
x (~chainer.Variable): Input variable of shape :math:`(n, c_I, h, w)`.
W (~chainer.Variable): Weight variable of shape
:math:`(c_O, c_I, k_H, k_W)`.
b (~chainer.Variable): Bias variable of length :math:`c_O` (optional).
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
dilate (int or pair of ints): Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
cover_all (bool): If ``True``, all spatial locations are convoluted
into some output pixels. It may make the output size larger.
Returns:
~chainer.Variable: Output variable.
The two-dimensional dilated convolution function is defined as follows.
Then the ``DilatedConvolution2D`` function computes correlations
between filters and patches of size :math:`(k_H, k_W)` in ``x``.
Patches here are extracted at intervals of the dilation factor.
Note that correlation here is equivalent to the inner product between
expanded vectors.
Patches are extracted at intervals of the dilation factor and at positions
shifted by multiples of ``stride`` from the first position ``-pad`` for
each spatial axis. The right-most (or bottom-most) patches do not run over
the padded spatial size.
Let :math:`(s_Y, s_X)` be the stride of filter application,
:math:`(p_H, p_W)` the spatial padding size, and :math:`(d_Y, d_X)`
the dilation factor of filter application. Then, the output size
:math:`(h_O, w_O)` is determined by the following equations:
.. math::
h_O &= (h + 2p_H - k_H - (k_H - 1) * (d_Y - 1)) / s_Y + 1,\\\\
w_O &= (w + 2p_W - k_W - (k_W - 1) * (d_X - 1)) / s_X + 1.
If the bias vector is given, then it is added to all spatial locations of
the output of convolution.
.. seealso:: :class:`DilatedConvolution2D`
"""
requires_x_grad = isinstance(x, variable.Variable) and x.requires_grad
func = DilatedConvolution2DFunction(stride, pad, dilate, cover_all,
requires_x_grad)
if b is None:
return func(x, W)
else:
return func(x, W, b)
|
kashif/chainer
|
chainer/functions/connection/dilated_convolution_2d.py
|
Python
|
mit
| 16,543
|
from PyQt5 import QtWidgets
from PyQt5.QtCore import QCoreApplication
from matplotlib.backends.backend_qt5agg import (FigureCanvasQTAgg as
FigureCanvas)
from matplotlib.backends.backend_qt5agg import (NavigationToolbar2QT as
NavigationToolbar)
from matplotlib.figure import Figure
_translate = QCoreApplication.translate
class MplCanvas(FigureCanvas):
def __init__(self, parent=None, width=4, height=5, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.ax = self.fig.add_subplot(111)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
# override mouseMoveEvent with non-functional dummy
# this will prevent the gui thread to hang while moving the mouse
# while a large number of plots is shown simultaniously
def mouseMoveEvent(self, event):
pass
def clear(self):
self.ax.clear()
self.fig.clear()
def save(self, filename):
try:
self.fig.savefig(filename, dpi=300)
except IOError:
QtWidgets.QMessageBox.critical(
self, _translate("MainWindow", "Error"),
_translate("MainWindow", "Error saving figure! Please check "
"permissions/free space of target path!"),
QtWidgets.QMessageBox.Close, QtWidgets.QMessageBox.Close)
class CustomNavigationToolbar(NavigationToolbar):
toolitems = (
(_translate("CustomNavigationToolbar", "Save"),
_translate("CustomNavigationToolbar",
"Save the figure"), "filesave",
"save_figure"),
(_translate("CustomNavigationToolbar", "Subplots"),
_translate("CustomNavigationToolbar",
"Configure subplots"), "subplots",
"configure_subplots"),
(None, None, None, None), )
def __init__(self, canvas, parent, coordinates=True):
NavigationToolbar.__init__(self, canvas, parent,
coordinates=coordinates)
class MplWidget(QtWidgets.QGraphicsView):
def __init__(self, parent=None):
QtWidgets.QGraphicsView.__init__(self, parent)
self.canvas = MplCanvas()
self.ntb = CustomNavigationToolbar(self.canvas, self,
coordinates=False)
self.vbl = QtWidgets.QVBoxLayout()
self.vbl.addWidget(self.ntb)
self.vbl.addWidget(self.canvas)
self.setLayout(self.vbl)
|
Athemis/PyDSF
|
ui/mplwidget.py
|
Python
|
mit
| 2,733
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('plea', '0034_auto_20160519_1047'),
]
operations = [
migrations.AlterField(
model_name='oucode',
name='ou_code',
field=models.CharField(help_text=b'The first five digits of an OU code', unique=True, max_length=5),
),
]
|
ministryofjustice/manchester_traffic_offences_pleas
|
apps/plea/migrations/0035_auto_20160519_1055.py
|
Python
|
mit
| 462
|
import tensorflow as tf
import numpy as np
a = tf.placeholder(shape=[3,4], dtype=tf.float32)
b = tf.placeholder(shape=[4,6], dtype=tf.float32)
c = tf.matmul(a,b)
sess = tf.Session()
feed = {a:np.random.randn(3,4), b:np.random.randn(4,6)}
result = sess.run(c, feed_dict=feed)
print (result)
|
kiseyno92/SNU_ML
|
Practice7/code/quiz0.py
|
Python
|
mit
| 294
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
'''
sqlite_fill.py -> inserts devices into the database
Copyright 2017 Ron Wellman
'''
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from db.sqlite_gen import Base, Device, Config
import json
def load_database(inputfile):
'''
reads in a json formatted file, creates a new device object, and inserts it into the database
'''
engine = create_engine('sqlite:///nbmon.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
load = json.load(inputfile)
for device in load['devices']:
new_device = Device(ip=device['ip'],port=device['port'],description=device['description'],
username=device['username'],password=device['password'],
actively_poll=device['actively_poll'],device_type=device['device_type'],
secret=device['secret'],missed_polls=device['missed_polls'],
config_changes=device['config_changes'])
session.add(new_device)
session.commit()
|
ronwellman/nbmon
|
db/sqlite_fill.py
|
Python
|
mit
| 1,089
|
import datetime
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
# Create your models here.
class TodoList(models.Model):
name = models.TextField()
author = models.ForeignKey(User)
create_time = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.name
class TodoItem(models.Model):
content = models.TextField()
todo_list = models.ForeignKey(TodoList)
def __unicode__(self):
return self.content
|
zhy0216/django-todo
|
todo/models.py
|
Python
|
mit
| 546
|
# -*- coding: UTF-8 -*-
import setuptools
from distutils.core import setup
# http://stackoverflow.com/a/7071358/735926
import re
VERSIONFILE='wikimd/__init__.py'
verstrline = open(VERSIONFILE, 'rt').read()
VSRE = r'^__version__\s+=\s+[\'"]([^\'"]+)[\'"]'
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % VERSIONFILE)
setup(
name='WikiMD',
version=verstr,
author='Salmon Thomas',
author_email='ths871@gmail.com',
packages=['wikimd'],
url='https://github.com/tsalmon/wikimd',
license=open('LICENSE', 'r').read(),
description='Convert wikipedia\'s page into markdown style',
long_description=open('README.md', 'r').read(),
install_requires=[
'beautifulsoup4 >= 4.3.2',
'lxml >= 3.4.0',
'ordereddict == 1.1',
'requests >= 2.4.2',
],
classifiers=[
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
],
entry_points={
'console_scripts':[
'wikimd = wikimd.cli:run'
]
},
)
|
tsalmon/WikiMD
|
setup.py
|
Python
|
mit
| 1,260
|
"""A sample custom HTTP server."""
import functools
import html
import traceback
import collect
import server
server.Logger.name = __file__
HTML_TMPL = '''\
<html>
<head>
<link rel="stylesheet" type="text/css" href="/myStyle.css"/>
</head>
<body id="consolas">
%s</body>
</html>
'''
LINK_HOME = '<a href="/">Home</a>'
app = server.app.App('0.0.0.0', 8080)
app.resolver.update_from_files_json('app.json')
@app.register('/myStyle.css')
def my_style():
status_code, headers, content = app._return_file(
collect.Path('myStyle.css')
)
headers['Content-Type'] = 'text/css'
return status_code, headers, content
def insert_body(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
response = func(*args, **kwargs)
if isinstance(response, tuple):
status_code, headers, text = response
return status_code, headers, HTML_TMPL % text
else:
return HTML_TMPL % response
return wrapper
@app.register('/')
@insert_body
def index():
return '''\
<a href="/img.png"><img src="/img.png" width="250"/></a>
<form action="/" method="post">
<input id="consolas" type="text" name="url"><br/>
<input id="consolas" type="submit" value="Submit">
</form>
'''
@insert_body
def dir_landing_page(url_path, folder_path, recursive):
def contents():
yield folder_path.parent
yield folder_path
yield from folder_path
parts = []
for file in contents():
rel_path = file.relpath(folder_path)
new_url = url_path / rel_path
if recursive or file.is_file():
parts.append(f'''
<a href="{new_url}">{rel_path}</a>''')
inner = '<br/>'.join(parts)
return f'''\
<h1>{LINK_HOME}{url_path}</h1>
<p>{inner}
</p>
'''
for url_path, fs_path in app.resolver.dirs.items():
recursive = app.resolver.recursive(url_path)
def contents():
if recursive:
yield from fs_path.tree
else:
yield fs_path
for file in contents():
if not file.is_dir():
continue
rel_file = file.relpath(fs_path)
new_url = url_path / rel_file
app.register(new_url)(
functools.partial(dir_landing_page, new_url, file, recursive)
)
@app.register('/', 'post')
def index_post():
input = server.app.ActiveRequest.body['url']
new_url = collect.Path(input)
return 303, {'Location': str(new_url)}, ''
@app.register('/page')
def page():
return 307, {'Location': '/new'}, ''
@app.register('/new')
@insert_body
def new():
return f'''\
<p>
This is the new page. You may have been redirected.<br/>
{LINK_HOME}
</p>
'''
@app.register('/req', 'GET', 'POST')
def req_():
return (
200, {'Content-Type': 'text/plain'},
server.app.ActiveRequest.raw_request)
@app.register_exception(server.http.HTTPException)
def handle_http(error):
body = f'''\
<h1>{error.status_code} {error.reason}</h1>
<pre id="consolas">{html.escape(str(error.message))}</pre>
{LINK_HOME}
'''
return error.status_code, HTML_TMPL % body
@app.register_exception(Exception)
def handle_exc(error):
new_error = server.http.HTTPException(traceback.format_exc(), 500)
return handle_http(new_error)
print('ready')
if __name__ == '__main__':
app.run()
|
cheeseywhiz/cheeseywhiz
|
socket/app.py
|
Python
|
mit
| 3,395
|
#!/usr/bin/env python3
import matplotlib
#matplotlib.use("Agg")
import subprocess #import check_output
import operator
from os import mkdir
from shutil import rmtree
from networkit import *
from pylab import *
import matplotlib.pyplot as plt
from scipy.stats import spearmanr
import os.path
import sys, traceback
import time
from param_fitter import ParameterFitter
def getModularity(G):
plm = community.PLM(G).run()
return community.Modularity().getQuality(plm.getPartition(), G)
def numberOfComponents(G):
return properties.ConnectedComponents(G).run().numberOfComponents()
def clustering(G):
return properties.ClusteringCoefficient().avgLocal(G)
def averageBetweennessPositions(G):
positions =[]
count = 5
for i in range(0, G.numberOfNodes()):
positions.append(0)
for i in range(0,count):
bt = centrality.ApproxBetweenness(G, 0.2,0.1).run().scores()
ids = range(0, len(bt))
nodes_bt = dict(zip(ids, bt))
sorted_bt = sorted(nodes_bt.items(), key=operator.itemgetter(1))
pos = G.numberOfNodes()
for (nodeid, betweennes) in sorted_bt:
#if pos == 1: print(nodeid, betweennes)
positions[nodeid] = positions[nodeid] + pos
pos-=1
for i in range(0, len(positions)):
positions[i] = positions[i]/count
#print(positions[107])
return positions
def main():
if(len(sys.argv) < 4): #minimum to run
print("Invalid number of parameter: Usage: ./generateSparseGraphs name path numnodes")
sys.exit(0)
setLogLevel("FATAL")
name = sys.argv[1]
path = sys.argv[2]
num_nodes =sys.argv[3]
run_args = sys.argv[4:]
numrun= 2
print("Running ",name)
start_time = time.time()
if os.path.exists("reports/"+name):
rmtree("reports/"+name)
mkdir("reports/"+name)
if os.path.exists("out/"+name):
rmtree("out/"+name)
mkdir("out/"+name)
d_file = "reports/"+name+"/_diameter.txt"
c_file = "reports/"+name+"/_clust_coef.txt"
c_file2 = "reports/"+name+"/_clust_coef2.txt"
comp_file = "reports/"+name+"/_components.txt"
rho_deg_file = "reports/"+name+"/_degCentrality.txt"
rho_pag_file = "reports/"+name+"/_pagerank.txt"
rho_bet_file = "reports/"+name+"/_betweenness.txt"
mod_file = "reports/"+name+"/_modularity.txt"
clust_dist_file = "reports/"+name+"/_clust_dist.txt"
G = graphio.EdgeListReader(' ',0,"#",True, False).read(path)
orig_diameter = properties.Diameter.exactDiameter(G)
orig_clustC = clustering(G)
numComp = numberOfComponents(G)
loc_clust_dist = centrality.LocalClusteringCoefficient(G).run().scores()
#print(" computing degeree cent")
deg_centr = centrality.DegreeCentrality(G).run().scores()
#print(" computing page rank")
page_rank = centrality.PageRank(G).run().scores()
#print(" computing Betweenness")
betw = averageBetweennessPositions(G)
#print(" computing modularity")
modularity = getModularity(G)
# fitter = ParameterFitter(G, 20)
r = 0.1
#read the graphs and run computations
while(round(r,1) <= 0.9):
# e = fitter.binarySearchParameter(round(r,1))
try:
print("Generating graphs for "+ str(round(r,1)))
run_cmd = ["./minla", "-i", path, "-n", num_nodes, "--zero-index", "-b", str(numrun), "-s", str(round(r,1)), "--gname", name, ]
if(len(run_args)):
for a in run_args:
run_cmd.append(a)
#output = check_output(run_cmd)
subprocess.call(run_cmd)
except Exception as e:
print("Process execution failed.",e)
traceback.print_exc(file=sys.stdout)
sys.exit(0)
sparse_g = []
for i in range(numrun):
try:
#print(" Reading graph file for ", round(r,1)," at ",i)
s_file = "out/"+name+"/"+name+"_"+str(round(r,1))+"_"+str(i)+".txt"
sparse_g.append(graphio.EdgeListReader(' ',0,"#",True, False).read(s_file))
except Exception as e:
print("Failed to read the graph file at "+str(r)+" "+str(i),e)
sys.exit(0)
#print(" Computing properties at ", round(r,1))
#compute diameter
sumD = 0.0
sumC = 0.0
connComp = 0.0
rho_deg =0
rho_bet=0
rho_pag = 0
rho_clust = 0
mod =0
edge_avg = .0
for g in sparse_g:
sumD = sumD + properties.Diameter.exactDiameter(g)
sumC = sumC + clustering(g)
connComp = connComp + numberOfComponents(g)
mod += getModularity(g)
edge_avg+= g.numberOfEdges()
edge_avg = edge_avg/float(numrun)
for q in range(numrun):
sg = sparse_g[q]
rho_clust += spearmanr(loc_clust_dist, centrality.LocalClusteringCoefficient(sg).run().scores())[0]
rho_deg+= spearmanr(deg_centr,centrality.DegreeCentrality(sg).run().scores())[0]
rho_bet+= spearmanr(betw,averageBetweennessPositions(sg))[0]
rho_pag+= spearmanr(page_rank,centrality.PageRank(sg).run().scores())[0]
edgeRatio = edge_avg / float(G.numberOfEdges())
edgeRatio = round(edgeRatio,2)
avgD = sumD/len(sparse_g) #average Diameter
avgC = sumC/len(sparse_g) #average Clustering Coefficient
connComp = (connComp / len(sparse_g))/float(numComp)
rho_deg = rho_deg/len(sparse_g)
rho_bet = rho_bet/len(sparse_g)
rho_pag = rho_pag/len(sparse_g)
rho_clust = rho_clust/len(sparse_g)
mod = mod/len(sparse_g)
mod =mod/modularity
#print(" Writing to file ", round(r,1))
with open(d_file,"a") as f:
f.write(str(round(orig_diameter/avgD,4)) +" "+ str(edgeRatio) +"\n")
with open(c_file,"a") as f:
f.write(str(round(avgC - orig_clustC,4)) +" "+ str(edgeRatio) +"\n")
with open(c_file2,"a") as f:
f.write(str(round(avgC,4)) +" "+ str(edgeRatio) +"\n")
with open(comp_file,"a") as f:
f.write(str(round(connComp,2)) +" "+ str(edgeRatio) +"\n")
with open(rho_deg_file,"a") as f:
f.write(str(round(rho_deg,4)) +" "+ str(edgeRatio) +"\n")
with open(rho_bet_file,"a") as f:
f.write(str(round(rho_bet,4)) +" "+ str(edgeRatio) +"\n")
with open(rho_pag_file,"a") as f:
f.write(str(round(rho_pag,4)) +" "+ str(edgeRatio) +"\n")
with open(mod_file,"a") as f:
f.write(str(round(mod,4)) +" "+ str(edgeRatio) +"\n")
with open(clust_dist_file,"a") as f:
f.write(str(round(rho_clust,4)) +" "+ str(edgeRatio) +"\n")
r =round(r,1) + 0.1
#remove output files
if os.path.exists("out/"+name):
rmtree("out/"+name)
mkdir("out/"+name)
print("Finalizing ", name)
#write properties of the full graph
with open(d_file,"a") as f:
f.write("1.0 1.0\n")
with open(c_file,"a") as f:
f.write("0.0 1.0\n")
with open(c_file2,"a") as f:
f.write( str(round(orig_clustC,2)) +" 1.0\n")
with open(comp_file,"a") as f:
f.write("1.0 1.0\n")
with open(mod_file,"a") as f:
f.write("1.0 1.0\n")
with open(rho_deg_file,"a") as f:
f.write("1.0 1.0\n")
with open(rho_bet_file,"a") as f:
f.write("1.0 1.0\n")
with open(rho_pag_file,"a") as f:
f.write("1.0 1.0\n")
with open(clust_dist_file,"a") as f:
f.write("1.0 1.0\n")
print("Completed run for graph: "+name+" in "+ str(time.time() - start_time))
if __name__ == '__main__':
main()
|
emmanuj/ml-sparsifier
|
generateSparseGraphs.py
|
Python
|
mit
| 6,757
|
"""
Starts a celery worker for ORES. Note that
Usage:
celery_worker -h | --help
celery_worker [--config=<path>]
Options:
-h --help Prints this documentation
--config=<path> The path to a yaml config file
[default: config/ores-localdev.yaml]
"""
import logging
import docopt
import yamlconf
from ..score_processors import Celery
def main(argv=None):
logging.basicConfig(level=logging.DEBUG)
args = docopt.docopt(__doc__, argv=argv)
config = yamlconf.load(open(args['--config']))
name = config['ores']['score_processor']
score_processor = Celery.from_config(config, name)
score_processor.application.worker_main(
argv=["celery_worker", "--loglevel=INFO"]
)
|
aetilley/ores
|
ores/utilities/celery_worker.py
|
Python
|
mit
| 753
|
import pyparsing as pp
from mitmproxy.net import http
from mitmproxy.net.http import user_agents, Headers
from . import base, message
"""
Normal HTTP requests:
<method>:<path>:<header>:<body>
e.g.:
GET:/
GET:/:h"foo"="bar"
POST:/:h"foo"="bar":b'content body payload'
Normal HTTP responses:
<code>:<header>:<body>
e.g.:
200
302:h"foo"="bar"
404:h"foo"="bar":b'content body payload'
Individual HTTP/2 frames:
h2f:<payload_length>:<type>:<flags>:<stream_id>:<payload>
e.g.:
h2f:0:PING
h2f:42:HEADERS:END_HEADERS:0x1234567:foo=bar,host=example.com
h2f:42:DATA:END_STREAM,PADDED:0x1234567:'content body payload'
"""
def get_header(val, headers):
"""
Header keys may be Values, so we have to "generate" them as we try the
match.
"""
for h in headers:
k = h.key.get_generator({})
if len(k) == len(val) and k[:].lower() == val.lower():
return h
return None
class _HeaderMixin:
@property
def unique_name(self):
return None
def values(self, settings):
return (
self.key.get_generator(settings),
self.value.get_generator(settings),
)
class _HTTP2Message(message.Message):
@property
def actions(self):
return [] # self.toks(actions._Action)
@property
def headers(self):
headers = self.toks(_HeaderMixin)
if not self.raw:
if not get_header(b"content-length", headers):
if not self.body:
length = 0
else:
length = len(self.body.string())
headers.append(
Header(
base.TokValueLiteral("content-length"),
base.TokValueLiteral(str(length)),
)
)
return headers
@property
def raw(self):
return bool(self.tok(Raw))
@property
def body(self):
return self.tok(Body)
def resolve(self, settings):
return self
class StatusCode(base.Integer):
pass
class Method(base.OptionsOrValue):
options = [
"GET",
"HEAD",
"POST",
"PUT",
"DELETE",
]
class Path(base.Value):
pass
class Header(_HeaderMixin, base.KeyValue):
preamble = "h"
class ShortcutContentType(_HeaderMixin, base.Value):
preamble = "c"
key = base.TokValueLiteral("content-type")
class ShortcutLocation(_HeaderMixin, base.Value):
preamble = "l"
key = base.TokValueLiteral("location")
class ShortcutUserAgent(_HeaderMixin, base.OptionsOrValue):
preamble = "u"
options = [i[1] for i in user_agents.UASTRINGS]
key = base.TokValueLiteral("user-agent")
def values(self, settings):
value = self.value.val
if self.option_used:
value = user_agents.get_by_shortcut(value.lower().decode())[2].encode()
return (
self.key.get_generator(settings),
value
)
class Raw(base.CaselessLiteral):
TOK = "r"
class Body(base.Value):
preamble = "b"
class Times(base.Integer):
preamble = "x"
class Response(_HTTP2Message):
unique_name = None
comps = (
Header,
Body,
ShortcutContentType,
ShortcutLocation,
Raw,
)
def __init__(self, tokens):
super(Response, self).__init__(tokens)
self.rendered_values = None
self.stream_id = 2
@property
def status_code(self):
return self.tok(StatusCode)
@classmethod
def expr(cls):
parts = [i.expr() for i in cls.comps]
atom = pp.MatchFirst(parts)
resp = pp.And(
[
StatusCode.expr(),
pp.ZeroOrMore(base.Sep + atom)
]
)
resp = resp.setParseAction(cls)
return resp
def values(self, settings):
if self.rendered_values:
return self.rendered_values
else:
headers = Headers([header.values(settings) for header in self.headers])
body = self.body
if body:
body = body.string()
resp = http.Response(
http_version=b'HTTP/2.0',
status_code=int(self.status_code.string()),
reason=b'',
headers=headers,
content=body,
trailers=None,
timestamp_start=0,
timestamp_end=0
)
resp.stream_id = self.stream_id
self.rendered_values = settings.protocol.assemble(resp)
return self.rendered_values
def spec(self):
return ":".join([i.spec() for i in self.tokens])
class NestedResponse(message.NestedMessage):
preamble = "s"
nest_type = Response
class Request(_HTTP2Message):
comps = (
Header,
ShortcutContentType,
ShortcutUserAgent,
Raw,
NestedResponse,
Body,
Times,
)
logattrs = ["method", "path"]
def __init__(self, tokens):
super(Request, self).__init__(tokens)
self.rendered_values = None
self.stream_id = 1
@property
def method(self):
return self.tok(Method)
@property
def path(self):
return self.tok(Path)
@property
def nested_response(self):
return self.tok(NestedResponse)
@property
def times(self):
return self.tok(Times)
@classmethod
def expr(cls):
parts = [i.expr() for i in cls.comps]
atom = pp.MatchFirst(parts)
resp = pp.And(
[
Method.expr(),
base.Sep,
Path.expr(),
pp.ZeroOrMore(base.Sep + atom)
]
)
resp = resp.setParseAction(cls)
return resp
def values(self, settings):
if self.rendered_values:
return self.rendered_values
else:
path = self.path.string()
if self.nested_response:
path += self.nested_response.parsed.spec().encode()
headers = Headers([header.values(settings) for header in self.headers])
body = self.body
if body:
body = body.string()
req = http.Request(
"",
0,
self.method.string(),
b'http',
b'',
path,
b"HTTP/2.0",
headers,
body,
None,
0,
0,
)
req.stream_id = self.stream_id
self.rendered_values = settings.protocol.assemble(req)
return self.rendered_values
def spec(self):
return ":".join([i.spec() for i in self.tokens])
def make_error_response(reason, body=None):
tokens = [
StatusCode("800"),
Body(base.TokValueLiteral("pathod error: " + (body or reason))),
]
return Response(tokens)
|
vhaupert/mitmproxy
|
pathod/language/http2.py
|
Python
|
mit
| 7,126
|
#!/usr/bin/python
import os
import sys
import tempfile
import getopt
import re
import socket
import logging
import argparse
import xml.etree.ElementTree as ET
logging.basicConfig(level=logging.DEBUG)
try:
import MySQLdb
except ImportError:
print 'This program requires the python MySQLdb module'
sys.exit(10)
#Todo
# 1. Add code to verify prereqs including:
# MySQLdb python module
# mplayer
# avconv
# mythtranscode
# 2. Add code to transcode to a second file for example use on ipod/cell-phone/etc
###########################################################
# Set some reasonable defaults
###########################################################
# Default number of lines to crop off the top
TURBO = 213
MPEGQUALITY = 1800
MYTHRECORDINGSPATH = ['/var/lib/mythtv/recordings', '/usr/local/mythtv/recordings', '.']
# WORKDIRS is A list of directories we can use for temporary files, we will check if the directory exists and has
# adequate space and use the first directory from the list with enough room.
WORKDIRS = ['/tmp', '/work', '/var/lib/mythtv/recordings', '/usr/local/mythtv/recordings']
class ExecutionError(Exception):
pass
def system_with_exception(command):
logging.info('Running command: %s' % command)
rc = os.system(command) >> 8
if rc:
raise ExecutionError("Command '%s' exited with return code of %d" % (command, rc))
class video(object):
def __init__(
self, filename='', workdir='/var/lib/mythtv/recordings', logfile='/tmp/cleanupvideo.out',
horizcrop=1, horizcroppercent=0, dbhost='localhost', dbuser='mythtv', dbpass='mythtv'
):
self.filename = filename
self.width = 0
self.height = 0
self.framerate = 0
self.currentcrop = ''
self.frames = 0
self.horizcrop = horizcrop
self.horizcroppercent = horizcroppercent
self.croptop = 0
self.cropleft = 0
self.cropright = 0
self.cropbottom = 0
self.operationnumber = 0
self.logfile = logfile
self.workdir = workdir
self.dbhost = dbhost
self.dbuser = dbuser
self.dbpass = dbpass
def detectcropvalues(self, frames=0, horizcrop=-1, horizcroppercent=-1, turbo=TURBO):
segmentsecs = 5
if turbo < segmentsecs:
turbo = segmentsecs * 2
if horizcrop != -1:
# This value is the number of 16 line blocks to crop from the top/bottom, so we need to multiply by 16
self.horizcrop = int(horizcrop) * 16
if horizcroppercent != -1:
self.horizcroppercent = horizcroppercent
if frames != 0:
self.frames = frames
cropsizes = {}
crop = ''
WIDTH = 0
HEIGHT = 0
edlfilename = '%s/cleanvideo_tmp.edl' % self.workdir
edifilename = tempfile.mktemp('.edl', 'cleanupvideo_', self.workdir)
fh = open(edifilename, 'w')
for sec in range(1, 14000, turbo):
fh.write('%d %d 0\n' % (sec, sec + (turbo - segmentsecs)))
fh.close()
if frames == 0:
command = 'mplayer -quiet -edl %s -benchmark -nosound -vf cropdetect=24:16 -vo null %s 2> /dev/null' % (
edifilename, self.filename)
else:
command = 'mplayer -quiet -edl %s -benchmark -nosound -vf cropdetect=24:16 -frames %d -vo null %s 2>/dev/null' % (
edifilename, frames, self.filename)
logging.debug('Running command: %s' % command)
for line in os.popen(command).readlines():
splitline = line.strip().split()
if len(splitline) > 3 and splitline[0] == 'VIDEO:':
WIDTH = splitline[2].split('x')[0]
HEIGHT = splitline[2].split('x')[1]
self.framerate = splitline[5]
if len(splitline) > 7 and splitline[0] == '[CROP]':
crop = splitline[8][5:-2]
try:
cropsizes[crop] = cropsizes[crop] + 1
except KeyError:
cropsizes[crop] = 1
currentcropcount = 0
currentcrop = ''
for crop in cropsizes.keys():
if cropsizes[crop] > currentcropcount:
currentcrop = crop
currentcropcount = cropsizes[crop]
if len(currentcrop):
splitcrop = currentcrop.split(':')
height = int(splitcrop[1])
evenheight = (height / 16) * 16
remainder = height - evenheight
if horizcroppercent > 0:
horizcrop = int(float(height) * (float(horizcroppercent) * .01))
if remainder == horizcrop:
currentcrop = '%d:%d:%d:%d' % (
int(splitcrop[0]), int(splitcrop[1]) - (horizcrop / 2), int(splitcrop[2]),
int(splitcrop[3]) + (horizcrop / 2))
if remainder > horizcrop:
currentcrop = '%d:%d:%d:%d' % (
int(splitcrop[0]), int(splitcrop[1]) - (remainder / 2), int(splitcrop[2]),
int(splitcrop[3]) + (remainder / 2))
if remainder < horizcrop:
currentcrop = '%d:%d:%d:%d' % (
int(splitcrop[0]), evenheight - 16, int(splitcrop[2]), int(splitcrop[3]) + 8)
self.width = int(WIDTH)
self.height = int(HEIGHT)
self.currentcrop = currentcrop
if len(currentcrop):
cropvalues = currentcrop.split(':')
self.croptop = int(cropvalues[3])
self.cropleft = int(cropvalues[2])
self.cropright = self.width - (self.cropleft + int(cropvalues[0]))
self.cropbottom = self.height - (self.croptop + (int(cropvalues[1])))
if self.cropbottom < 0:
self.cropbottom = 0
logging.debug(
'Crop borders are: %s %s %s %s %s %s' % (
self.width, self.height, self.croptop, self.cropleft, self.cropbottom, self.cropright
)
)
os.remove(edifilename)
def createlockfile(self, completed=False):
fh = open("%s.cleanupvideoran" % self.filename, 'w')
fh.write('filename: %s\n' % self.filename)
fh.write('hostname: %s\n' % socket.gethostname())
#fh.write('cutcommercials: %d\n' % self.cutcommercials)
#fh.write('cropvideo: %d\n' % self.cropvideo)
fh.write('frames: %d\n' % self.frames)
fh.write('horizcrop: %d\n' % self.horizcrop)
fh.write('horizcroppercent: %d\n' % self.horizcroppercent)
fh.write('completed: %d\n' % completed)
fh.close()
def deletelockfile(self):
os.remove('%s.cleanupvideoran' % self.filename)
def checklockfile(self):
rc = 9
try:
fh = open("%s.cleanupvideoran" % self.filename, 'r')
except IOError:
return (2)
for line in fh.readlines():
if len(line):
splitline = line.strip().split()
if splitline[0] == 'completed:':
try:
rc = int(splitline[1])
except ValueError:
if splitline[1] == 'True':
rc = 1
else:
rc = 0
return (rc)
def swapfiles(self, keeporiginal=False):
logging.debug(
'Swapping files: %s/new.%s <-> %s' % (self.workdir, os.path.basename(self.filename), self.filename))
if keeporiginal:
for backupnumber in range(self.operationnumber, 999):
try:
fh = open('%s/%s.%d' % (self.workdir, os.path.basename(self.filename), backupnumber))
fh.close()
except IOError:
break
try:
fh = open('%s/new.%s' % (self.workdir, os.path.basename(self.filename)))
except IOError:
print 'New file did not get created'
return (1)
fh.close()
os.rename(self.filename, '%s.old.%d' % (self.filename, backupnumber))
# This doesn't work if the files aren't on the same device
#os.rename('%s/new.%s' % (self.workdir,os.path.basename(self.filename),self.filename))
os.system('mv %s/new.%s %s' % (self.workdir, os.path.basename(self.filename), self.filename))
else:
try:
fh = open('%s/new.%s' % (self.workdir, os.path.basename(self.filename)))
except IOError:
print 'New file did not get created'
return (1)
fh.close()
os.remove(self.filename)
# This doesn't work if both files aren't on the same filesystem
#os.rename('%s/new.%s' % (self.workdir,os.path.basename(self.filename)), self.filename)
os.system('mv %s/new.%s %s' % (self.workdir, os.path.basename(self.filename), self.filename))
self.operationnumber = self.operationnumber + 1
return (0)
def crop(self, keeporiginal=False, format='mp4', resize=False, mpegquality=MPEGQUALITY):
if self.currentcrop == '':
logging.debug('No crop boundries for this video object, running cropdetect')
self.detectcropvalues()
if self.croptop == 0 and self.cropbottom == 0 and self.cropleft == 0 and self.cropright == 0:
print 'No crop borders detected'
return (0)
# I am overriding the framerate here since it doesn't always get the correct framerate??
self.framerate = 29.97
# Try it first just cropping, if it doesn't work try specifying the target format
command = 'avconv >> %s 2>&1 -y -i "%s" -cropleft %d -cropright %d -croptop %d -cropbottom %d -aspect 16:9 ' \
'-f mp4 -b %dkb "%s/new.%s"' % (
self.logfile, self.filename, self.cropleft, self.cropright, self.croptop, self.cropbottom, mpegquality,
self.workdir, os.path.basename(self.filename)
)
logging.debug('Running command: %s' % command)
rc = os.system(command) >> 8
if rc:
command = 'avconv >>%s 2>>%s -y -i "%s" -cropleft %d -cropright %d -croptop %d -cropbottom %d -target ntsc-dvd -aspect 16:9 -b %dkb "%s/new.%s"' % (
self.logfile, self.logfile, self.filename, self.cropleft, self.cropright, self.croptop, self.cropbottom,
mpegquality, self.workdir, os.path.basename(self.filename)
)
logging.debug('Running command: %s' % command)
rc = os.system(command) >> 8
if rc == 1:
logging.debug('Crop failed, returning failure code')
return False
self.swapfiles(keeporiginal)
return True
def cut_commercials(self, keep_original=False):
# Commercial cutting is experimental and may not work as intended
logging.info('cutcommercials: Flagging commercials')
system_with_exception('mythcommflag -f %s' % self.filename)
logging.info('cutcommercials: Copying commercials to cutlist')
system_with_exception('mythcommflag --gencutlist -f %s' % self.filename)
logging.info('cutcommercials: transcoding and removing the commercial segments')
temppath = ''
system_with_exception(
'mythtranscode --honorcutlist -i "%s" -o "%s/new.%s"' % (
self.filename, self.workdir, os.path.basename(self.filename)
)
)
logging.info('cutcommercials: Replacing the original file')
self.swapfiles(keep_original)
# Remove the old cutlist since it's no longer valid
self.clearcutlist()
def transcode(self, keeporiginal=False):
rc = os.system('mythtranscode -i "%s" -o "%s/new.%s"' % (
self.filename, self.workdir, os.path.basename(self.filename))) >> 8
if rc != 0:
print 'Transcoding failed for %s with error %d' % (self.filename, rc)
return (2)
self.swapfiles(keeporiginal)
self.clearcutlist()
def rebuildseeklist(self):
rc = os.system('mythcommflag --video %s' % self.filename) >> 8
if rc != 0:
print 'Rebuilding seek list failed for %s with error %d' % (self.filename, rc)
return (1)
def clearcutlist(self):
rc = os.system('mythcommflag --clearcutlist -f %s' % self.filename) >> 8
conn = MySQLdb.connect(host=self.dbhost, user=self.dbuser, passwd=self.dbpass, db="mythconverg")
cursor = conn.cursor()
cursor.execute("UPDATE recorded SET cutlist=0,filesize=%ld WHERE basename='%s';" % (
os.path.getsize(self.filename), os.path.basename(self.filename)))
cursor.close()
conn.close()
if rc != 0:
print 'Clearing cutlist failed for %s with error %d' % (self.filename, rc)
return (1)
def marktranscoded(self):
conn = MySQLdb.connect(host=self.dbhost, user=self.dbuser, passwd=self.dbpass, db="mythconverg")
cursor = conn.cursor()
cursor.execute("update recorded set transcoded=1 where basename='%s';" % (os.path.basename(self.filename)))
cursor.close()
conn.close()
def deleteoldlocks(path=MYTHRECORDINGSPATH):
if len(path) == 0:
return
for directory in path:
try:
files = os.listdir(directory)
for file in files:
if re.search('cleanupvideoran$', file) != None:
tempfile = '.'.join(file.split('.')[:-1])
if tempfile not in files:
logging.debug('Deleting old lock file: %s' % os.path.join(directory, file))
os.remove(os.path.join(directory, file))
except OSError:
pass
def deleteoldbackups(path=MYTHRECORDINGSPATH):
if len(path) == 0:
return
for directory in path:
try:
files = os.listdir(directory)
for file in files:
if re.search('\.old\.*', file) != None:
tempfile = '.'.join(file.split('.')[:-2])
if tempfile not in files:
logging.debug('DEBUG: Deleting old backup file: %s' % os.path.join(directory, file))
os.remove(os.path.join(directory, file))
except OSError:
pass
def df(directory='/', humanreadable=False):
if humanreadable == True:
options = '-khP'
else:
options = '-kP'
try:
dfout = os.popen('df %s %s 2>/dev/null' % (options, directory)).readlines()[1]
except IndexError:
return 'None', 'None'
splitline = dfout.split()
return splitline[2], splitline[3]
def find_work_directory(filename):
"""
Search the work directories for one with space to fit the filename
"""
global WORKDIRS
workdir = None
FILESIZE = os.path.getsize(filename) / 1024
for entry in WORKDIRS:
used, available = df(entry)
if used != 'None':
if long(available) > long(FILESIZE):
workdir = entry
break
return workdir
def get_mythtv_database_settings():
"""
Get the mythtv database settings
"""
# First set reasonable defaults
settings = {
'host': 'localhost',
'username': 'mythtv',
'password': 'mythtv',
'databasename': 'mythconverg',
'port': '3306'
}
# If there's an old style mythtv config file, update the defaults with it's values
if os.path.exists('/etc/mythtv/mysql.txt'):
for line in open('/etc/mythtv/mysql.txt','r').readlines():
splitline=line.strip().split('=')
if splitline[0].lower() == 'dbhostname':
settings['host'] = splitline[1]
if splitline[0].lower() == 'dbusername':
settings['username'] = splitline[1]
if splitline[0].lower() == 'dbpassword':
settings['password'] = splitline[1]
# If there's a new config.xml file, update the settings from it
if os.path.exists('/etc/mythtv/config.xml'):
tree = ET.parse('/etc/mythtv/config.xml')
root = tree.getroot()
for dbset in root.find('Database'):
settings[dbset.tag.lower()] = dbset.text
return settings
if __name__ == "__main__":
db_settings = get_mythtv_database_settings()
parser = argparse.ArgumentParser()
db_group = parser.add_argument_group('Database Settings')
db_group.add_argument(
'--dbhostname', default=db_settings['host'], help='The hostname or ip address of the database server (%(default)s)'
)
db_group.add_argument(
'--dbusername', default=db_settings['username'], help='The database user to connect with (%(default)s)'
)
db_group.add_argument(
'--dbpassword', default=db_settings['password'], help='The database password for the user (%(default)s)'
)
parser.add_argument(
'filename', help='Name of file to transcode'
)
parser.add_argument(
'--cut_commercials', action="store_true",
help='Cut the commercials out of the video, this automatically enables the transcode option'
)
parser.add_argument(
'--cropvideo', action="store_true", help="Crop the black borders off the sides of the video"
)
parser.add_argument(
'--croptwice', action='store_true',
help='Crop a second time after removing the horizcrop/horizcropprecent and cutting out commercials'
)
parser.add_argument(
'--examineframes', type=int, default=0,
help='The number of frames to examine to determine the crop amount, the default is all frames'
)
parser.add_argument(
'--horizcrop', type=int, default=1,
help='Additional number of horizontal lines to crop from the top and the bottom'
)
parser.add_argument(
'--horizcroppercent', type=int, default=0,
help='Percentage of lines to crop from top and bottom, overrides horizcrop (useful for removing garbage lines '
'from the top of some recordings'
)
parser.add_argument(
'--allowrunagain', action='store_true',
help='Run even if the video has been operated on before'
)
parser.add_argument(
'--keep_original', action='store_true',
help='Keeps the original files with a .old.x extension (can increase space usage significantly)'
)
parser.add_argument(
'--transcode', action='store_true',
help='Transcode the video'
)
parser.add_argument(
'--deleteoldlocks', action='store_true',
help='Delete old lock files with no associated video file'
)
parser.add_argument(
'--deleteoldbackups', action='store_true',
help='Delete old backup files'
)
parser.add_argument(
'--runagain', action='store_true', help='Run operation on the file again'
)
args = parser.parse_args()
logging.debug('command is: %s' % ' '.join(sys.argv))
# This is needed because the video output by avconv generates an
# unable to initialize video error from the mythtv frontend until
# it is ran through mythtranscode.
if not args.cut_commercials:
args.transcode = True
if args.deleteoldlocks:
deleteoldlocks()
if args.deleteoldbackups:
deleteoldbackups()
#args.filename=args[0]
BASENAME = os.path.basename(args.filename)
if not os.path.exists(args.filename):
fileexists = False
for path in MYTHRECORDINGSPATH:
if os.path.exists('%s/%s' % (path, BASENAME)):
fileexists = True
DIRNAME = path
args.filename = '%s/%s' % (path, BASENAME)
break
if not fileexists:
print 'No such file, %s' % args.filename
sys.exit(5)
workdir = find_work_directory(args.filename)
logging.debug('Work Directory: %s', workdir)
# Create a video object
vid = video(
filename=args.filename, workdir=workdir, dbhost=args.dbhostname, dbuser=args.dbusername, dbpass=args.dbpassword
)
logging.debug('Checking for lock file')
if not args.runagain:
if vid.checklockfile():
print 'cleanupvideo.py has already been run on this file'
sys.exit(1)
else:
print('cleanvideo.py is currently running on this file or exited abnormally')
print('check file and rerun with --runagain if the previous run failed')
sys.exit(1)
logging.debug('Creating the lock file')
vid.createlockfile(completed=False)
if args.cropvideo:
logging.debug(
'Detecting Video cropborders with horizcrop=%d, horizcroppercent=%d' % (
args.horizcrop, args.horizcroppercent
)
)
vid.detectcropvalues(
frames=args.examineframes, horizcrop=args.horizcrop, horizcroppercent=args.horizcroppercent)
logging.debug('Cropping the video')
if not vid.crop(keeporiginal=args.keep_original):
vid.deletelockfile()
print 'ERROR: All cropping options failed, exiting without making any changes'
sys.exit(9)
if args.cut_commercials:
vid.cut_commercials(keep_original=args.keep_original)
if args.croptwice:
logging.debug('Detecting crop boundries a second time')
vid.detectcropvalues(frames=args.examineframes, horizcrop=0, horizcroppercent=0)
logging.debug('Cropping the video a second time')
vid.crop(keeporiginal=args.keep_original)
# If the video has not been transcoded and the transcode option is enabled, transcode the video
if not args.cut_commercials and args.transcode:
vid.transcode(keeporiginal=args.keep_original)
logging.debug('Updating the seeklist')
vid.rebuildseeklist()
logging.debug('Updating the lock file to indicate cleanvideo ran sucessfully')
vid.marktranscoded()
vid.createlockfile(completed=True)
|
dwighthubbard/mythtv_user_scripts
|
scripts/cleanvideo.py
|
Python
|
mit
| 22,022
|
# coding: UTF-8
from unittest import TestCase
from frame import matrix
from numpy import allclose
from math import radians
class MatrixTests(TestCase):
def test_transform_matrix(self):
# Y軸周りに-90度回転
a = matrix.transformMatrix(0, 0, 2.85, 0)
self.assertTrue(allclose((
(0., 0., 1.),
(0., 1., 0.),
(-1., 0., 0.)), a))
# Y軸周りに90度回転
a = matrix.transformMatrix(0, 0, -1.2, 0)
self.assertTrue(allclose((
(0., 0., -1.),
(0., 1., 0.),
(1., 0., 0.)), a))
# Y軸周りに-arctan(3/4)回転
a = matrix.transformMatrix(4, 0, 3, 0)
self.assertTrue(allclose((
(4./5, 0., 3./5),
(0., 1., 0.),
(-3./5, 0., 4./5)), a))
# Z軸周りに90回転
a = matrix.transformMatrix(0, 40.2, 0, 0)
self.assertTrue(allclose((
(0., 1., 0.),
(-1., 0., 0.),
(0., 0., 1.)), a))
# Z軸周りに-90回転
a = matrix.transformMatrix(0, -0.018, 0, 0)
self.assertTrue(allclose((
(0., -1., 0.),
(1., 0., 0.),
(0., 0., 1.)), a))
# Z軸周りにarctan(3/4)回転
a = matrix.transformMatrix(4, 3, 0, 0)
self.assertTrue(allclose((
(4./5, 3./5, 0.),
(-3./5, 4./5, 0.),
(0., 0., 1.)), a))
# X軸周りに90度回転
a = matrix.transformMatrix(20591281, 0, 0, radians(90))
self.assertTrue(allclose((
(1., 0., 0.),
(0., 0., 1.),
(0., -1., 0.)), a))
# YZ平面上に回転
a = matrix.transformMatrix(0, -4, -3, 0)
self.assertTrue(allclose((
(0., -4./5, -3./5),
(0., 3./5, -4./5),
(1., 0., 0.)), a))
|
1stop-st/jsonrpc-calculator
|
frame/tests/test_matrix.py
|
Python
|
mit
| 1,922
|
"""
Created by Gotham on 04-08-2018.
"""
from telegram.ext import ConversationHandler, CommandHandler, MessageHandler, Filters
import flood_protection
import sqlite3
import json
import time
import os
from utility import Utility
timeouts = flood_protection.Spam_settings()
BDC, DB, CF = range(12000, 12003)
class AdminHandle:
def __init__(self, mount_point, admin_list, fallback):
self.admin_list = admin_list
self.mount_point = mount_point
self.utility = Utility(mount_point)
self.conv_handler1 = ConversationHandler(
entry_points=[CommandHandler('broadcast', self.broadcast)],
allow_reentry=True,
states={
BDC: [MessageHandler(Filters.text, self.broadcast_message)]
},
fallbacks=[fallback]
)
self.conv_handler2 = ConversationHandler(
entry_points=[CommandHandler('senddb', self.getDb)],
allow_reentry=True,
states={
DB: [MessageHandler(Filters.document, self.db)]
},
fallbacks=[fallback]
)
# START OF ADMIN CONVERSATION HANDLER TO BROADCAST MESSAGE
@timeouts.wrapper_for_class_methods
def broadcast(self, bot, update):
if self.not_admin(update):
return ConversationHandler.END
update.message.reply_text("Send your message")
return BDC
def broadcast_message(self, bot, update):
message = update.message.text
conn = sqlite3.connect(self.mount_point + 'coders1.db')
c = conn.cursor()
c.execute('select id from handles union select id from subscribers')
for row in c.fetchall():
try:
bot.send_message(text=message, chat_id=row[0])
except:
pass
time.sleep(1)
c.close()
conn.close()
return ConversationHandler.END
# END OF ADMIN CONVERSATION HANDLER TO BROADCAST MESSAGE
# START OF ADMIN CONVERSATION HANDLER TO REPLACE THE DATABASE
@timeouts.wrapper_for_class_methods
def getDb(self, bot, update):
if self.not_admin(update):
return ConversationHandler.END
update.message.reply_text("send your sqlite database")
return DB
def db(self, bot, update):
file_id = update.message.document.file_id
newFile = bot.get_file(file_id)
newFile.download(self.mount_point + 'coders1.db')
update.message.reply_text("saved")
return ConversationHandler.END
# END OF ADMIN CONVERSATION HANDLER TO REPLACE THE DATABASE
def not_admin(self, update):
if not str(update.message.chat_id) in self.admin_list:
update.message.reply_text("sorry you are not an admin")
return True
else:
return False
# ADMIN COMMAND HANDLER FOR GETTING THE DATABASE
@timeouts.wrapper_for_class_methods
def givememydb(self, bot, update):
if self.not_admin(update):
return
bot.send_document(chat_id=update.message.chat_id, document=open(self.mount_point + 'coders1.db', 'rb'))
# ADMIN COMMAND HANDLER FOR GETTING THE CODEFORCES JSON
@timeouts.wrapper_for_class_methods
def getcfjson(self, bot, update):
if self.not_admin(update):
return
bot.send_document(chat_id=update.message.chat_id, document=open(self.mount_point + 'codeforces.json', 'rb'))
# ADMIN COMMAND HANDLER FUNCTION TO GET THE DETAILS OF HANDLES OF ALL THE USERS IN DATABASE
@timeouts.wrapper_for_class_methods
def adminhandle(self, bot, update):
if self.not_admin(update):
return
conn = sqlite3.connect(self.mount_point + 'coders1.db')
c = conn.cursor()
mysel = c.execute("SELECT * FROM handles")
self.utility.xlsx_creator(mysel, "admin.xlsx")
bot.send_document(chat_id=update.message.chat_id, document=open('admin.xlsx', 'rb'))
os.remove('admin.xlsx')
conn.close()
|
Gotham13121997/superCodingBot
|
handlers/admin.py
|
Python
|
mit
| 4,007
|
import json
from redlib.api.web import HtmlParser
from six.moves.urllib.parse import urlencode, urlparse, parse_qs
from ..util import log
from ..db.app.query_list import QueryList
from ..util.printer import printer
from .base import SourceError, SourceParams, Source
from .images import Images
from .http_helper import HttpHelper
from .trace import Trace
from .image import Image
from .filters.fansshare import FansshareFilter
class GoogleParams(SourceParams):
name = 'google'
def __init__(self, query=None, color=None, safesearch=True):
self.query = query
self.color = color
self.safesearch = safesearch
self.hash_params = ['query', 'color', 'safesearch']
class Google(Source):
name = 'google'
params_cls = GoogleParams
online = True
db = False
gen = False
host_url = "https://www.google.com"
search_base_url = "https://www.google.com/search?tbm=isch&"
colors = ['red', 'orange', 'yellow', 'green', 'teal', 'blue', 'purple', 'pink', 'white', 'gray', 'black', 'brown']
user_agent = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:41.0) Gecko/20100101 Firefox/41.0'
def __init__(self):
#super(Google, self).__init__()
pass
def get_image(self, params=None):
if params is None:
params = GoogleParams()
self._trace = Trace()
self._images = Images(params, cache=True, cache_timeout='1d', trace=self._trace)
fansshare_filter = FansshareFilter(referer=self.host_url)
self._images.add_select_filter(fansshare_filter.filter)
self._http = HttpHelper()
if not self._images.available():
self.search(params)
return self._http.download_image(self._images, self._trace, headers={'User-Agent': self.user_agent, 'Referer': self.host_url})
def search(self, params):
if params.query is None:
params.query = QueryList().random()
self._trace.add_step('random search term', params.query)
if params.color is not None and not params.color in self.colors:
msg = '%s is not a supported color for google image search. please choose from: %s'%(params.color, ', '.join(self.colors))
log.error(msg)
raise SourceError(msg)
elif params.color is not None:
self._trace.add_step('color', params.color)
q_params = {
'as_q' : params.query,
'as_st' : 'y',
'as_epq' : '',
'as_oq' : '',
'as_eq' : '',
'cr' : '',
'as_sitesearch' : '',
'safe' : 'active' if params.safesearch else 'images',
'tbs' : 'isz:lt,islt:xga' + ',ic:specific,isc:%s'%params.color if params.color is not None else ''
}
search_url = self.search_base_url + urlencode(q_params)
self._trace.add_step('search', params.query)
response = self._http.get(search_url, msg='searching google images', headers={'User-Agent': self.user_agent})
self.extract_results(response)
printer.printf('result', '%d images'%self._images.count, verbosity=2)
def extract_results(self, response):
etree = self.get_etree(response)
result_div_path = './/div[@class=\'rg_di rg_bx rg_el ivg-i\']'
for div in etree.findall(result_div_path):
meta_div = div.find(".//div[@class='rg_meta']")
if meta_div is not None:
meta = json.loads(meta_div.text)
image = Image()
image.url = meta.get('ou', None)
image.width = meta.get('ow', None)
image.height = meta.get('oh', None)
image.context_url = meta.get('ru', None)
image.title = meta.get('pt', None)
image.description = meta.get('s', None)
image.ext = meta.get('ity', None)
self._images.add(image)
def get_etree(self, html):
parser = HtmlParser()
parser.feed(html)
etree = parser.etree
return etree
def get_trace(self):
return self._trace.steps
|
amol9/wallp
|
wallp/source/google.py
|
Python
|
mit
| 3,624
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os
import json
class OnActiveStorePathResolver(object):
def __init__(self,resolver,dependency,resolve_config_path):
self.resolver=resolver
self.dependency=dependency
self.resolve_config_path=resolve_config_path
def resolve(self):
path=self.resolver.resolve()
self.__write_config(path=path)
return path
def __write_config(self,path):
config_path=os.path.join(self.resolve_config_path,self.dependency.name+'.resolve.json')
config={'sha1':self.dependency.sha1,'path':path,'is_symlink':self.dependency.is_symlink,'real_path':self.dependency.real_path}
with open(config_path,'w')as config_file:
json.dump(config,config_file,indent=4,sort_keys=True)
|
looopTools/sw9-source
|
.waf-1.9.8-6657823688b736c1d1a4e2c4e8e198b4/waflib/extras/wurf/on_active_store_path_resolver.py
|
Python
|
mit
| 796
|
import socket
import ssl
import logging
import datetime
import collections
from typing import Optional
from ..exceptions import SocketError, ListenerError
from ..compat import json
from .listener import BaseListener
logger = logging.getLogger(__name__)
class BetfairStream:
"""Socket holder, connects to betfair and
pushes any received data to listener
"""
__port = 443
__CRLF = "\r\n"
__encoding = "utf-8"
HOSTS = collections.defaultdict(
lambda: "stream-api.betfair.com",
integration="stream-api-integration.betfair.com",
race="sports-data-stream-api.betfair.com",
)
def __init__(
self,
unique_id: int,
listener: BaseListener,
app_key: str,
session_token: str,
timeout: float,
buffer_size: int,
host: Optional[str],
):
self._unique_id = unique_id
self.listener = listener
self.app_key = app_key
self.session_token = session_token
self.timeout = timeout
self.buffer_size = buffer_size
self.host = self.HOSTS[host]
self.receive_count = 0
self.datetime_last_received = None
self._socket = None
self._running = False
def start(self) -> None:
"""Starts read loop, connects/authenticates
if not already running.
"""
if not self._running:
self._connect()
self.authenticate()
self._read_loop()
def stop(self) -> None:
"""Stops read loop and closes socket if it has been created."""
self._running = False
if self._socket is None:
return
# attempt graceful shutdown
try:
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
# close socket
try:
self._socket.close()
except socket.error:
pass
self._socket = None
def authenticate(self) -> int:
"""Authentication request."""
unique_id = self.new_unique_id()
message = {
"op": "authentication",
"id": unique_id,
"appKey": self.app_key,
"session": self.session_token,
}
self._send(message)
return unique_id
def heartbeat(self) -> int:
"""Heartbeat request to keep session alive."""
unique_id = self.new_unique_id()
message = {"op": "heartbeat", "id": unique_id}
self._send(message)
return unique_id
def subscribe_to_markets(
self,
market_filter: dict,
market_data_filter: dict,
initial_clk: str = None,
clk: str = None,
conflate_ms: int = None,
heartbeat_ms: int = None,
segmentation_enabled: bool = True,
) -> int:
"""
Market subscription request.
:param dict market_filter: Market filter
:param dict market_data_filter: Market data filter
:param str initial_clk: Sequence token for reconnect
:param str clk: Sequence token for reconnect
:param int conflate_ms: conflation rate (bounds are 0 to 120000)
:param int heartbeat_ms: heartbeat rate (500 to 5000)
:param bool segmentation_enabled: allow the server to send large sets of data
in segments, instead of a single block
"""
unique_id = self.new_unique_id()
message = {
"op": "marketSubscription",
"id": unique_id,
"marketFilter": market_filter,
"marketDataFilter": market_data_filter,
"initialClk": initial_clk,
"clk": clk,
"conflateMs": conflate_ms,
"heartbeatMs": heartbeat_ms,
"segmentationEnabled": segmentation_enabled,
}
if initial_clk and clk:
# if resubscribe only update unique_id
self.listener.update_unique_id(unique_id)
else:
self.listener.register_stream(unique_id, "marketSubscription")
self._send(message)
return unique_id
def subscribe_to_orders(
self,
order_filter: dict = None,
initial_clk: str = None,
clk: str = None,
conflate_ms: int = None,
heartbeat_ms: int = None,
segmentation_enabled: bool = True,
) -> int:
"""
Order subscription request.
:param dict order_filter: Order filter to be applied
:param str initial_clk: Sequence token for reconnect
:param str clk: Sequence token for reconnect
:param int conflate_ms: conflation rate (bounds are 0 to 120000)
:param int heartbeat_ms: heartbeat rate (500 to 5000)
:param bool segmentation_enabled: allow the server to send large sets of data
in segments, instead of a single block
"""
unique_id = self.new_unique_id()
message = {
"op": "orderSubscription",
"id": unique_id,
"orderFilter": order_filter,
"initialClk": initial_clk,
"clk": clk,
"conflateMs": conflate_ms,
"heartbeatMs": heartbeat_ms,
"segmentationEnabled": segmentation_enabled,
}
if initial_clk and clk:
# if resubscribe only update unique_id
self.listener.update_unique_id(unique_id)
else:
self.listener.register_stream(unique_id, "orderSubscription")
self._send(message)
return unique_id
def subscribe_to_races(self) -> int:
"""Race subscription request."""
unique_id = self.new_unique_id()
message = {"op": "raceSubscription", "id": unique_id}
self.listener.register_stream(unique_id, "raceSubscription")
self._send(message)
return unique_id
def new_unique_id(self) -> int:
self._unique_id += 1
return self._unique_id
def _connect(self) -> None:
"""Creates socket and sets running to True."""
self._socket = self._create_socket()
self._running = True
def _create_socket(self) -> socket.socket:
"""Creates ssl socket, connects to stream api and
sets timeout.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s = ssl.wrap_socket(s)
s.settimeout(self.timeout)
s.connect((self.host, self.__port))
return s
def _read_loop(self) -> None:
"""Read loop, splits by CRLF and pushes received data
to _data.
"""
while self._running:
received_data_raw = self._receive_all()
if self._running:
self.receive_count += 1
self.datetime_last_received = datetime.datetime.utcnow()
received_data_split = received_data_raw.split(self.__CRLF)
for received_data in received_data_split:
if received_data:
self._data(received_data)
def _receive_all(self) -> Optional[str]:
"""Whilst socket is running receives data from socket,
till CRLF is detected.
"""
(data, part) = ("", "")
crlf_bytes = bytes(self.__CRLF, encoding=self.__encoding)
while self._running and part[-2:] != crlf_bytes:
try:
part = self._socket.recv(self.buffer_size)
except (socket.timeout, socket.error) as e:
if self._running:
self.stop()
raise SocketError("[Connect: %s]: Socket %s" % (self._unique_id, e))
else:
return # 133, prevents error if stop is called mid recv
# an empty string indicates the server shutdown the socket
if len(part) == 0:
if self._running:
self.stop()
raise SocketError(
"[Connect: %s]: Connection closed by server"
% (self._unique_id,)
)
else:
return # 165, prevents error if stop is called mid recv
data += part.decode(self.__encoding)
return data
def _data(self, received_data: str) -> None:
"""Sends data to listener, if False is returned; socket
is closed.
:param received_data: Decoded data received from socket.
"""
if self.listener.on_data(received_data) is False:
self.stop()
raise ListenerError(self.listener.connection_id, received_data)
def _send(self, message: dict) -> None:
"""If not running connects socket and
authenticates. Adds CRLF and sends message
to Betfair.
:param message: Data to be sent to Betfair.
"""
if not self._running:
self._connect()
self.authenticate()
message_dumped = json.dumps(message)
if not isinstance(
message_dumped, bytes
): # handles orjson as `orjson.dumps -> bytes` but `json.dumps -> str`
message_dumped = message_dumped.encode(encoding=self.__encoding)
crlf = bytes(self.__CRLF, encoding=self.__encoding)
message_dumped += crlf
logger.debug(
"[Subscription: %s] Sending: %s" % (self._unique_id, repr(message_dumped))
)
try:
self._socket.sendall(message_dumped)
except (socket.timeout, socket.error) as e:
self.stop()
raise SocketError("[Connect: %s]: Socket %s" % (self._unique_id, e))
def __str__(self) -> str:
return "<BetfairStream [%s]>" % ("running" if self._running else "not running")
def __repr__(self) -> str:
return "<BetfairStream>"
class HistoricalStream:
"""Copy of 'Betfair Stream' for parsing
historical data.
"""
def __init__(
self, file_path: str, listener: BaseListener, operation: str, unique_id: int
):
"""
:param str file_path: Directory of betfair data
:param BaseListener listener: Listener object
:param str operation: Operation type
:param int unique_id: Stream id (added to updates)
"""
self.file_path = file_path
self.listener = listener
self.operation = operation
self.unique_id = unique_id
self._running = False
def start(self) -> None:
self._running = True
self._read_loop()
def stop(self) -> None:
self._running = False
def _read_loop(self) -> None:
self.listener.register_stream(self.unique_id, self.operation)
with open(self.file_path, "r") as f:
for update in f:
if self.listener.on_data(update) is False:
# if on_data returns an error stop the stream and raise error
self.stop()
raise ListenerError("HISTORICAL", update)
if not self._running:
break
else:
# if f has finished, also stop the stream
self.stop()
class HistoricalGeneratorStream(HistoricalStream):
"""Copy of 'Betfair Stream' for parsing
historical data (no threads).
"""
def get_generator(self):
return self._read_loop
def _read_loop(self) -> dict:
self._running = True
self.listener.register_stream(self.unique_id, self.operation)
with open(self.file_path, "r") as f:
for update in f:
if self.listener.on_data(update) is False:
# if on_data returns an error stop the stream and raise error
self.stop()
raise ListenerError("HISTORICAL", update)
if not self._running:
break
else:
data = self.listener.snap()
if data: # can return empty list
yield data
else:
# if f has finished, also stop the stream
self.stop()
|
liampauling/betfairlightweight
|
betfairlightweight/streaming/betfairstream.py
|
Python
|
mit
| 12,065
|
import tensorflow as tf
import numpy as np
import os
import sys
import math
from audio import format_feedval, raw
from layers import conv1d, dilated_conv1d
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Model: neural network model for HeaviNet level
# inputs: init function
# level, integer of corresponding level
# receptive_field, integer of receptive field
# data_location, string value of data location to store neural network
# n_levels, integer value of number of total levels
# outputs:
# neural network stored in data_location with save command and loaded with load command
class Model(object):
# format_target: make target matrix into onehot format
# inputs
# target_c, matrix of target classes to be formatted into onehot
# outputs:
# target, matrix of targets with a width of n_target_classes
# target_c, matrix of origianl target classes
# n_target_classes, integer of the number of targets (256 for 8 bit targets)
def format_target(self, target_c):
target = tf.one_hot(target_c, self.n_target_classes)
target = tf.reshape(target, (-1, self.n_target_classes))
return target, target_c, self.n_target_classes
# wavenet_model: creates network structure according to fast-wavenet implementation
# https://github.com/tomlepaine/fast-wavenet
# inputs
# image, matrix of a given batch of inputs with dimenstions [batch_size]
# outputs:
# outputs, matrix of computed results with shape [batch_size, n_target_classes]
def wavenet_model(self, image):
num_blocks = 2
num_layers = 14
num_hidden = 128
# reshape image according to fast-wavenet model
image = tf.reshape(image, (-1, self.batch_size, 1))
image = tf.cast(image, tf.float32)
print("Image", image.shape, image.dtype)
# compute layers according to num_layers and num_blocks
h = image
hs = []
for b in range(num_blocks):
for i in range(num_layers):
rate = 2**i
name = 'b{}-l{}'.format(b, i)
h = dilated_conv1d(h, num_hidden, rate=rate, name=name)
hs.append(h)
# compute final layer acording to n_target_classes
outputs = conv1d(h,
self.n_target_classes,
filter_width=1,
gain=1.0,
activation=None,
bias=True)
# reshape output for softmax computation
outputs = tf.reshape(outputs, (-1, self.n_target_classes))
if (not os.path.isdir(self.save_dir)):
print(" Wavenet Model")
print(" Image" , image.shape)
print(" Outputs" , outputs.shape)
print(" Batch size" , self.batch_size )
return outputs
# init: neural network model for HeaviNet level
# inputs: init function
# level, integer of corresponding level
# receptive_field, integer of receptive field
# data_location, string value of data location to store neural network
# n_levels, integer value of number of total levels
# outputs:
# neural network stored in data_location with save command and loaded with load command
def __init__(self, level, receptive_field, data_location, n_levels ):
self.batch_size = 75000
self.level = level
self.receptive_field = receptive_field
self.n_levels = n_levels
# store n_epochs with neural network
self.n_epochs = tf.get_variable("n_epochs", shape=[], dtype=tf.int32, initializer = tf.zeros_initializer)
self.in_bits = 8
self.out_bits = 8
self.n_target_classes = 2**(self.out_bits)
self.target_classes_max = self.n_target_classes - 1
# format name to save multiple levels in single directory
self.model_string = "w"+str(self.batch_size)+"_"
self.name = self.model_string + str(level) + "_r" + str(self.receptive_field)
self.seed_name = self.model_string + str(level-1) + "_r" + str(self.receptive_field)
self.save_dir = data_location + "/" + self.name
self.save_file = self.save_dir + "/" + self.name + ".ckpt"
# inputs and targets for training or generation
target_class = tf.placeholder(tf.int64, [None])
input_class = tf.placeholder(tf.float64, [None])
self.target_class = target_class
self.input_class = input_class
# format targets into class and onehot formats
nn_targets, nn_target_class, nn_n_targets = self.format_target(target_class)
# compute original wavenet logits
self.logits_original = self.wavenet_model(input_class)
# compute reversed wavenet logits
self.in_backwards = tf.reverse(input_class,[0])
with tf.variable_scope('backwards'):
self.logits_backwards = self.wavenet_model( self.in_backwards)
self.logits_b = tf.reverse(self.logits_backwards, [0])
# sum logits and backwards logits
self.logits = tf.reduce_sum( tf.stack( [self.logits_original, self.logits_b], axis=0), axis=0)
logits = self.logits
# compute prediction and prediction class based on logits
prediction = tf.nn.softmax(logits)
prediction_class = tf.argmax(prediction, dimension=1)
# compute cross entropy and correpsonding optimization
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=nn_targets)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)
correct_prediction = tf.equal(nn_target_class, prediction_class)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32) )
self.prediction_value = prediction_class
sess = tf.Session()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
self.optimizer = optimizer
self.cost = cost
self.accuracy = accuracy
self.correct_prediction = correct_prediction
self.best_accuracy = 0
self.loss_change = 0
self.sess = sess
self.saver = saver
if( os.path.isdir(self.save_dir) ):
print("Loading previous:", self.name)
self.load()
else:
os.makedirs( self.save_dir )
print("Creating level directory at:", self.save_dir)
# train: train model on inputs according to number of epochs
# inputs:
# x_list, list of matricies containing all input levels
# ytrue_class, matrix of target values for corresponding x matrix input values
# x, matrix of level inputs
# epochs, integer of number of epochs to train
# outputs:
# none, trains neural network over inputs and targets
def train(self, ytrue_class, x, epochs=1 ):
ytrue_class = np.reshape(ytrue_class, (-1))
print("Trainging:", self.name, "Inputs", x.shape, "Targets", ytrue_class.shape, "epochs:", epochs)
# loop through the number of epochs
e = 0
print("Previous Epochs", self.n_epochs.eval(session=self.sess) )
inc_epochs = self.n_epochs.assign(self.n_epochs + epochs)
inc_epochs.op.run(session=self.sess)
while ( e < epochs ):
epoch_loss = 0
epoch_correct = 0
epoch_total = 0
for i in range(0, len(ytrue_class), self.batch_size):
# only train on factor of batch_size
if i + self.batch_size >= len(ytrue_class):
continue
# use feed dictionary with inputs and targets
feed_dict_train = {
self.target_class: ytrue_class[i:i+self.batch_size],
self.input_class: x[i:i+self.batch_size]
}
# train while calculating epoch accuracy
_, c, correct = self.sess.run([self.optimizer, self.cost, self.correct_prediction ],
feed_dict = feed_dict_train)
epoch_correct+= np.sum(correct)
epoch_total+= correct.size
epoch_loss+= c
print(" epoch", e, "loss", epoch_loss)
e = e+1
if (epoch_total != 0):
epoch_accuracy = 100.0 * float(epoch_correct) / float(epoch_total)
print(" accuracy:", epoch_accuracy)
if epoch_accuracy > self.best_accuracy :
self.best_accuracy = epoch_accuracy
# generate: generate a level given inputs
# inputs
# seed, matrix of level seed to generate next level
# outputs:
# y_generate, matrix of generated audio samples
def generate(self, seed):
y_generated = np.zeros(len(seed))
print("Generating with seed:", seed.shape)
print("Y generate", y_generated.shape )
for i in range(0, len(seed), self.batch_size):
if i + self.batch_size >= len(seed):
continue
feed_dict_gen = {self.input_class: seed[i:i+self.batch_size] }
y_g = self.sess.run( [self.prediction_value], feed_dict=feed_dict_gen)
y_generated[i:i+self.batch_size] = raw(y_g[0])
prev_epochs = self.n_epochs.eval(session=self.sess)
print("Generated song:", len(y_generated), "with Epochs", prev_epochs)
return y_generated, prev_epochs
# save: save neural network in specified self.data_location
# inputs
# close, bool value to specify if neural network should be closed
# outputs:
# none, saved neural network
def save(self, close=False):
self.saver.save(self.sess, self.save_file)
print("Saving:", self.name)
if close==True:
self.sess.close()
# load: loads neural network
# inputs
# none, requires a neural network chkpt file to be found in named directory
# outputs:
# none, runs save.restore() on the given nerual network
def load(self):
if os.path.isdir(self.save_dir):
try:
self.saver.restore(self.sess, self.save_file)
except:
print("Failed to load previous session")
os.rmdir(self.save_dir)
sys.exit()
return True
else:
print("Failed loading:", self.name)
return False
# close: closes neural network safely within tensorflow
# inputs
# none, closes session
# outputs:
# none, session is closed
def close(self):
print("Closing" , self.name)
self.sess.close()
|
taylorm7/HeaviNet
|
models.py
|
Python
|
mit
| 10,781
|
################################################################################
# Copyright 2020-2021 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
import yaml
import os
import sys
import argparse
from copy import deepcopy
from enum import IntEnum
verbosity = 1
def ensurePath(path):
if not os.path.exists(path):
os.makedirs(path)
return path
def allFiles(startDir):
current = os.listdir(startDir)
files = []
for filename in [_current for _current in current if os.path.splitext(_current)[-1].lower() == '.yaml']:
fullPath = os.path.join(startDir,filename)
if os.path.isdir(fullPath):
files = files + allFiles(fullPath)
else:
files.append(fullPath)
return files
def reindexSolutions(data):
for i, _ in enumerate(data[5]):
data[5][i]["SolutionIndex"] = i
return data
def fixSizeInconsistencies(sizes, fileType):
duplicates = list()
for i in range(0,len(sizes)):
currSize = sizes[i][0]
# >= so size will be trimmed when a SolutionTag is included
if len(currSize) >= 8:
currSize = currSize[:-4]
if currSize in (item for index in sizes for item in index):
duplicates.append(i-len(duplicates))
else:
sizes[i][0] = currSize
sizes_ = deepcopy(sizes)
if len(duplicates) > 0:
for i in duplicates:
sizes_.pop(i)
verbose(len(duplicates), "duplicate size(s) removed from", fileType, "logic file")
return sizes_, len(sizes_)
# remove dict keys "SolutionIndex" and "SolutionNameMin" from dict
def cmpHelper(sol):
return {k:v for k, v in sol.items() if k!="SolutionIndex" and k!="SolutionNameMin"}
def addKernel(solutionPool, solution):
for item in solutionPool:
if cmpHelper(item) == cmpHelper(solution):
index = item["SolutionIndex"]
debug("...Reuse previously existed kernel", end="")
break
else:
index = len(solutionPool)
_solution = deepcopy(solution) # if we don't we will see some subtle errors
_solution["SolutionIndex"] = index
solutionPool.append(_solution)
debug("...A new kernel has been added", end="")
debug("({}) {}".format(index, solutionPool[index]["SolutionNameMin"] if "SolutionNameMin" in solutionPool[index] else "(SolutionName N/A)"))
return solutionPool, index
# update dependant parameters if StaggerU == 0
def sanitizeSolutions(solList):
for sol in solList:
if sol.get("StaggerU") == 0:
sol["StaggerUMapping"] = 0
sol["StaggerUStride"] = 0
sol["_staggerStrideShift"] = 0
def removeUnusedKernels(origData, prefix=""):
origNumSolutions = len(origData[5])
kernelsInUse = [ index for _, [index, _] in origData[7] ]
for i, solution in enumerate(origData[5]):
solutionIndex = solution["SolutionIndex"]
origData[5][i]["__InUse__"] = True if solutionIndex in kernelsInUse else False
# debug prints
for o in [o for o in origData[5] if o["__InUse__"]==False]:
debug("{}Solution ({}) {} is unused".format(
prefix,
o["SolutionIndex"],
o["SolutionNameMin"] if "SolutionNameMin" in o else "(SolutionName N/A)"))
# filter out dangling kernels
origData[5] = [ {k: v for k, v in o.items() if k != "__InUse__"}
for o in origData[5] if o["__InUse__"]==True ]
# reindex solutions
idMap = {} # new = idMap[old]
for i, solution in enumerate(origData[5]):
idMap[solution["SolutionIndex"]] = i
origData[5][i]["SolutionIndex"] = i
for i, [size, [oldSolIndex, eff]] in enumerate(origData[7]):
origData[7][i] = [size, [idMap[oldSolIndex], eff]]
numInvalidRemoved = origNumSolutions - len(origData[5])
return origData, numInvalidRemoved
def loadData(filename):
try:
stream = open(filename, "r")
except IOError:
print("Cannot open file: ", filename)
sys.stdout.flush()
sys.exit(-1)
data = yaml.load(stream, yaml.SafeLoader)
return data
# this is for complying the behavior of legacy merge script, where incremental logic
# file always replaces the base logic file even it's slower in performance -
# in the future we may let default force merge policy = False
def defaultForceMergePolicy(incFile):
if "arcturus" in incFile:
forceMerge = False
else:
forceMerge = True
return forceMerge
def msg(*args, **kwargs):
for i in args: print(i, end=" ")
print(**kwargs)
def verbose(*args, **kwargs):
if verbosity < 1: return
msg(*args, **kwargs)
def debug(*args, **kwargs):
if verbosity < 2: return
msg(*args, **kwargs)
# Tags distinguishing solution types
# Can be added to size key to allow solutions of each type to be present
# in logic file for a given size
class MfmaTag(IntEnum):
VALU = 0
MFMA = 1
def __str__(self):
return ["VALU", "MFMA"][self]
def __repr__(self):
return str(self)
class AlphaValueTag(IntEnum):
ANY = 0
ONE = 1
NEG_ONE = 2
ZERO = 3
def __str__(self):
return "Alpha="+["Any", "1", "-1", "0"][self]
def __repr__(self):
return str(self)
class BetaValueTag(IntEnum):
ANY = 0
ONE = 1
NEG_ONE = 2
ZERO = 3
def __str__(self):
return "Beta="+["Any", "1", "-1", "0"][self]
def __repr__(self):
return str(self)
def strToScalarValueTag(Class, value):
if value == "Any":
return Class.ANY
if value == 1:
return Class.ONE
if value == -1:
return Class.NEG_ONE
if value == 0:
return Class.ZERO
else:
raise RuntimeError("Unsupported value for Alpha/Beta scalar value")
class CEqualsDTag(IntEnum):
C_EQ_D = 0
C_NEQ_D = 1
def __str__(self):
return ["C=D", "C!=D"][self]
def __repr__(self):
return str(self)
# Tag of form (MFMATag, AlphaValueTag, BetaValueTag, CEqualsDTag)
def getSolutionTag(solution):
tagTuple = ()
if solution.get("EnableMatrixInstruction", False) or solution.get("MatrixInstruction", False):
tagTuple = tagTuple + (MfmaTag.MFMA,)
else:
tagTuple = tagTuple + (MfmaTag.VALU,)
tagTuple = tagTuple + (strToScalarValueTag(AlphaValueTag, solution.get("AssertAlphaValue", "Any")),)
tagTuple = tagTuple + (strToScalarValueTag(BetaValueTag, solution.get("AssertBetaValue", "Any")),)
tagTuple = tagTuple + (CEqualsDTag.C_EQ_D if solution.get("AssertCEqualsD", False) else CEqualsDTag.C_NEQ_D ,)
return tagTuple
def findSolutionWithIndex(solutionData, solIndex):
# Check solution at the index corresponding to solIndex first
if solIndex < len(solutionData) and solutionData[solIndex]["SolutionIndex"] == solIndex:
return solutionData[solIndex]
else:
debug("Searching for index...")
solution = [s for s in solutionData if s["SolutionIndex"]==solIndex]
assert(len(solution) == 1)
return solution[0]
def addSolutionTagToKeys(solutionMap, solutionPool):
return [[[getSolutionTag(findSolutionWithIndex(solutionPool, idx))] + keys, [idx, eff]]
for [keys, [idx, eff]] in solutionMap]
def removeSolutionTagFromKeys(solutionMap):
return [[keys[1:], [idx, incEff]] for keys, [idx, incEff] in solutionMap]
# To be used with add_solution_tags to allow faster general solutions to supercede slower specific ones
def findFastestCompatibleSolution(origDict, sizeMapping):
tags = sizeMapping[0]
# Tag of form (MFMATag, AlphaValueTag, BetaValueTag, CEqualsDTag)
compatibleTagList = [tags]
# Add all compatible tags to the list
if tags[1] != AlphaValueTag.ANY:
compatibleTagList = compatibleTagList + [(t[0], AlphaValueTag.ANY) + t[2:] for t in compatibleTagList]
if tags[2] != BetaValueTag.ANY:
compatibleTagList = compatibleTagList + [t[:2] + (BetaValueTag.ANY,) + t[3:] for t in compatibleTagList]
if tags[3] != CEqualsDTag.C_NEQ_D:
compatibleTagList = compatibleTagList + [t[:3] + (CEqualsDTag.C_NEQ_D,) + t[4:] for t in compatibleTagList]
#Find the fastest efficiency of all compatible tags
maxEfficiency = 0
for tag in compatibleTagList:
result = origDict.get((tag,) + sizeMapping[1:], None)
if result:
_, eff = origDict[(tag,) + sizeMapping[1:]]
maxEfficiency = max(maxEfficiency, eff)
return maxEfficiency
# returns merged logic data as list
def mergeLogic(origData, incData, forceMerge, trimSize=True, addSolutionTags=False):
origNumSizes = len(origData[7])
origNumSolutions = len(origData[5])
incNumSizes = len(incData[7])
incNumSolutions = len(incData[5])
verbose(origNumSizes, "sizes and", origNumSolutions, "kernels in base logic file")
verbose(incNumSizes, "sizes and", incNumSolutions, "kernels in incremental logic file")
# Add SolutionTag to distinguish solutions with different requirements
origTaggedSizes = addSolutionTagToKeys(origData[7], origData[5])
incTaggedSizes = addSolutionTagToKeys(incData[7], incData[5])
if addSolutionTags:
origData[7] = origTaggedSizes
incData[7] = incTaggedSizes
# Print warning if addSolutionTags=False results in removed sizes
else:
origSet = {tuple(size) for size, [_, _] in origData[7]}
origTaggedSet = {tuple(size) for size, [_, _] in origTaggedSizes}
incSet = {tuple(size) for size, [_, _] in incData[7]}
incTaggedSet = {tuple(size) for size, [_, _] in incTaggedSizes}
if len(origSet) != len(origTaggedSet):
verbose("Warning:", len(origTaggedSet) - len(origSet), "duplicate sizes are present in base logic",
"that may not be handled correctly unless --add_solution_tags is used")
if len(incSet) != len(incTaggedSet):
verbose("Warning:", len(incTaggedSet) - len(incSet), "duplicate sizes are present in incremental logic",
"that may not be handled correctly unless --add_solution_tags is used")
if trimSize:
# trim 8-tuple gemm size format to 4-tuple [m, n, b, k]
# TODO future gemm size could include dictionary format so need robust preprocessing
[origData[7], origNumSizes] = fixSizeInconsistencies(origData[7], "base")
[incData[7], incNumSizes] = fixSizeInconsistencies(incData[7], "incremental")
sanitizeSolutions(origData[5])
sanitizeSolutions(incData[5])
origData, numOrigRemoved = removeUnusedKernels(origData, "Base logic file: ")
incData, numIncRemoved = removeUnusedKernels(incData, "Inc logic file: ")
solutionPool = deepcopy(origData[5])
solutionMap = deepcopy(origData[7])
origDict = {tuple(origSize): [i, origEff] for i, [origSize, [origIndex, origEff]] in enumerate(origData[7])}
for incSize, [incIndex, incEff] in incData[7]:
incSolution = findSolutionWithIndex(incData[5], incIndex)
try:
j, origEff = origDict[tuple(incSize)]
if incEff > origEff or forceMerge:
if incEff > origEff:
verbose("[O]", incSize, "already exists and has improved in performance.", end="")
elif forceMerge:
verbose("[!]", incSize, "already exists but does not improve in performance.", end="")
verbose("Efficiency:", origEff, "->", incEff, "(force_merge=True)" if forceMerge else "")
solutionPool, index = addKernel(solutionPool, incSolution)
solutionMap[j][1] = [index, incEff]
else:
verbose("[X]", incSize, "already exists but does not improve in performance.", end="")
verbose("Efficiency:", origEff, "->", incEff)
except KeyError:
if addSolutionTags and findFastestCompatibleSolution(origDict, tuple(incSize)) > incEff:
verbose("[X]", incSize, "has been rejected because a compatible solution already exists with higher performance")
else:
verbose("[-]", incSize, "has been added to solution table, Efficiency: N/A ->", incEff)
solutionPool, index = addKernel(solutionPool, incSolution)
solutionMap.append([incSize,[index, incEff]])
verbose(numOrigRemoved, "unused kernels removed from base logic file")
verbose(numIncRemoved, "unused kernels removed from incremental logic file")
# Remove SolutionTag for yaml output
if addSolutionTags:
solutionMap = removeSolutionTagFromKeys(solutionMap)
mergedData = deepcopy(origData)
mergedData[5] = solutionPool
mergedData[7] = solutionMap
mergedData, numReplaced = removeUnusedKernels(mergedData, "Merged data: ")
numSizesAdded = len(solutionMap)-len(origData[7])
numSolutionsAdded = len(solutionPool)-len(origData[5])
numSolutionsRemoved = numReplaced+numOrigRemoved # incremental file not counted
return [mergedData, numSizesAdded, numSolutionsAdded, numSolutionsRemoved]
def avoidRegressions(originalDir, incrementalDir, outputPath, forceMerge, trimSize=True, addSolutionTags=False):
originalFiles = allFiles(originalDir)
incrementalFiles = allFiles(incrementalDir)
ensurePath(outputPath)
# filter the incremental logic files that have the corresponding base file
incrementalFiles = [ i for i in incrementalFiles
if os.path.split(i)[-1] in [os.path.split(o)[-1] for o in originalFiles] ]
for incFile in incrementalFiles:
basename = os.path.split(incFile)[-1]
origFile = os.path.join(originalDir, basename)
forceMerge = defaultForceMergePolicy(incFile) if forceMerge is None else forceMerge
msg("Base logic file:", origFile, "| Incremental:", incFile, "| Merge policy: %s"%("Forced" if forceMerge else "Winner"), "| Trim size:", trimSize,
"| Add solution tags:", addSolutionTags)
origData = loadData(origFile)
incData = loadData(incFile)
# So far "SolutionIndex" in logic yamls has zero impact on actual 1-1 size mapping (but the order of the Solution does)
# since mergeLogic() takes that value very seriously so we reindex them here so it doesn't choke on duplicated SolutionIndex
origData = reindexSolutions(origData)
incData = reindexSolutions(incData)
mergedData, *stats = mergeLogic(origData, incData, forceMerge, trimSize, addSolutionTags)
msg(stats[0], "size(s) and", stats[1], "kernel(s) added,", stats[2], "kernel(s) removed")
with open(os.path.join(outputPath, basename), "w") as outFile:
yaml.safe_dump(mergedData,outFile,default_flow_style=None)
msg("File written to", os.path.join(outputPath, basename))
msg("------------------------------")
# partialLogicFilePaths: list of full paths to partial logic files
# outputDir: Directory to write the final result to
# forceMerge:
# trimSize:
# Expects: that all the partial logic files
# have the same base name, but are located
# in different folders.
# Provides: one final logic file that is the
# merged result of all partial files.
# This is useful for when a tuning task is
# shared between multiple machines who each
# will provide a partial result.
def mergePartialLogics(partialLogicFilePaths, outputDir, forceMerge, trimSize=True, addSolutionTags=False):
logicFiles = deepcopy(partialLogicFilePaths)
ensurePath(outputDir)
baseLogicFile = logicFiles.pop(0)
baseLogicData = loadData(baseLogicFile)
msg("Base logic file:", baseLogicFile)
for f in logicFiles:
forceMerge = defaultForceMergePolicy(f) if forceMerge is None else forceMerge
msg("Incremental file:", f, "| Merge policy: %s"%("Forced" if forceMerge else "Winner"), "| Trim size:", trimSize)
incLogicData = loadData(f)
# So far "SolutionIndex" in logic yamls has zero impact on actual 1-1 size mapping (but the order of the Solution does)
# since mergeLogic() takes that value very seriously so we reindex them here so it doesn't choke on duplicated SolutionIndex
baseLogicData = reindexSolutions(baseLogicData)
incLogicData = reindexSolutions(incLogicData)
mergedData, *stats = mergeLogic(baseLogicData, incLogicData, forceMerge, trimSize, addSolutionTags)
msg(stats[0], "size(s) and", stats[1], "kernel(s) added,", stats[2], "kernel(s) removed")
# Use the merged data as the base data for the next partial logic file
baseLogicData = deepcopy(mergedData)
baseFileName = os.path.basename(baseLogicFile)
outputFilePath = os.path.join(outputDir, baseFileName)
with open(outputFilePath, "w") as outFile:
yaml.safe_dump(baseLogicData, outFile, default_flow_style=None)
msg("File written to", outputFilePath)
msg("------------------------------")
if __name__ == "__main__":
argParser = argparse.ArgumentParser()
argParser.add_argument("original_dir", help="The library logic directory without tuned sizes")
argParser.add_argument("incremental_dir", help="The incremental logic directory")
argParser.add_argument("output_dir", help="The output logic directory")
argParser.add_argument("-v", "--verbosity", help="0: summary, 1: verbose, 2: debug", default=1, type=int)
argParser.add_argument("--force_merge", help="Merge previously known sizes unconditionally. Default behavior if not arcturus", default="none")
argParser.add_argument("--notrim", help="Do not trim long size format down to short format (m,n,b,k). Default is --trim", action="store_false")
argParser.add_argument("--add_solution_tags", help="Add tags to the size key for solution properies, allowing for solutions with different requirements "
"to exist for the same size. Default doesn't add this tag.", action="store_true")
args = argParser.parse_args(sys.argv[1:])
originalDir = args.original_dir
incrementalDir = args.incremental_dir
outputPath = args.output_dir
verbosity = args.verbosity
forceMerge = args.force_merge.lower()
trimSize = args.notrim
add_solution_tags = args.add_solution_tags
if forceMerge in ["none"]: forceMerge=None
elif forceMerge in ["true", "1"]: forceMerge=True
elif forceMerge in ["false", "0"]: forceMerge=False
avoidRegressions(originalDir, incrementalDir, outputPath, forceMerge, trimSize, add_solution_tags)
|
ROCmSoftwarePlatform/Tensile
|
Tensile/Utilities/merge.py
|
Python
|
mit
| 19,651
|
import math
# http://www.ariel.com.au/a/python-point-int-poly.html
def point_in_polygon(point, polygon):
x, y = point['x'], point['z']
n = len(polygon)
inside = False
p1x, p1y = polygon[0]
for i in range(n + 1):
p2x, p2y = polygon[i % n]
if min(p1y, p2y) < y <= max(p1y, p2y) and x <= max(p1x, p2x):
if p1y != p2y:
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x, p1y = p2x, p2y
return inside
def distance(p1, p2):
return math.hypot(p2['x'] - p1['x'], p2['z'] - p1['z'])
def is_pos_correct(pos):
if not pos or pos == {'x': 0.0, 'y': 0.0, 'z': 0.0}:
return False
return True
|
vaal-/il2_stats
|
src/mission_report/helpers.py
|
Python
|
mit
| 793
|
from django.views.generic.list import ListView
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse, reverse_lazy
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from orders.models import RaceOrder, \
LineFollowerJuniorStage, LineFollowerJuniorRaceOrder
# LineFollowerStage, LineFollowerRaceOrder,
from sumo.models import *
# class LineFollowerStageOrderListView(ListView):
# model = LineFollowerStage
# template_name = 'orders/line_follower_stage_list.html'
#
# def dispatch(self, *args, **kwargs):
# if not settings.PROJECT_ORDERS or \
# not "line_follower" in dict(settings.ORDER_CATEGORIES).keys() or \
# not LineFollowerStage.objects.filter(orders_available=True).exists():
# raise PermissionDenied
# return super(LineFollowerStageOrderListView, self).dispatch(
# *args, **kwargs)
#
# def get_queryset(self):
# return LineFollowerStage.objects.filter(orders_available=True)
# class LineFollowerRaceOrderListView(ListView):
# model = LineFollowerRaceOrder
# template_name = 'orders/race_order_list.html'
#
# def dispatch(self, *args, **kwargs):
# order = self.kwargs.get("order")
# if not LineFollowerStage.objects.filter(
# order=order, orders_available=True).exists():
# return PermissionDenied
# return super(LineFollowerRaceOrderListView, self).dispatch(
# *args, **kwargs)
#
# def get_context_data(self, **kwargs):
# context = super(LineFollowerRaceOrderListView, self).get_context_data(
# **kwargs)
# context['category'] = dict(settings.ALL_CATEGORIES)["line_follower"]
# context['stage'] = LineFollowerStage.objects.filter(
# order=self.kwargs.get("order"))[0]
# return context
#
# def get_queryset(self):
# return LineFollowerRaceOrder.objects.filter(
# stage__order=self.kwargs.get("order"))
class LineFollowerJuniorStageOrderListView(ListView):
model = LineFollowerJuniorStage
template_name = 'orders/line_follower_junior_stage_list.html'
def dispatch(self, *args, **kwargs):
if not settings.PROJECT_ORDERS or \
not "line_follower_junior" in dict(settings.ORDER_CATEGORIES).keys() or \
not LineFollowerJuniorStage.objects.filter(orders_available=True).exists():
raise PermissionDenied
return super(LineFollowerJuniorStageOrderListView, self).dispatch(
*args, **kwargs)
def get_queryset(self):
return LineFollowerJuniorStage.objects.filter(orders_available=True)
class LineFollowerJuniorRaceOrderListView(ListView):
model = LineFollowerJuniorRaceOrder
template_name = 'orders/junior_race_order_list.html'
def dispatch(self, *args, **kwargs):
order = self.kwargs.get("order")
if not LineFollowerJuniorStage.objects.filter(
order=order, orders_available=True).exists():
return PermissionDenied
return super(LineFollowerJuniorRaceOrderListView, self).dispatch(
*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(LineFollowerJuniorRaceOrderListView, self).get_context_data(
**kwargs)
context['category'] = dict(settings.ALL_CATEGORIES)["line_follower_junior"]
context['stage'] = LineFollowerJuniorStage.objects.filter(
order=self.kwargs.get("order"))[0]
return context
def get_queryset(self):
return LineFollowerJuniorRaceOrder.objects.filter(
stage__order=self.kwargs.get("order"))
class RaceOrderListView(ListView):
model = RaceOrder
template_name = 'orders/race_order_list.html'
def dispatch(self, *args, **kwargs):
category = self.kwargs.get('slug')
if not category in dict(settings.ALL_CATEGORIES).keys():
raise Http404
if not settings.PROJECT_ORDERS or \
not category in dict(settings.ORDER_CATEGORIES).keys():
raise PermissionDenied
if category == 'line_follower':
return HttpResponseRedirect(
reverse('line_follower_stage_order_list'))
elif category == 'line_follower_junior':
return redirect(reverse('line_follower_junior_stage_order_list'))
elif category == 'micro_sumo':
return Http404
return super(RaceOrderListView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(RaceOrderListView, self).get_context_data(**kwargs)
context['category'] = dict(
settings.ALL_CATEGORIES)[self.kwargs.get('slug')]
return context
def get_queryset(self):
return RaceOrder.objects.filter(
project__category=self.kwargs.get('slug'))
class SumoOrderHomeView(TemplateView):
template_name = "orders/sumo_home.html"
def dispatch(self, *args, **kwargs):
if not "micro_sumo" in dict(settings.ORDER_CATEGORIES).keys():
raise PermissionDenied
return super(SumoOrderHomeView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(SumoOrderHomeView, self).get_context_data(**kwargs)
context["groups"] = settings.SUMO_GROUP_ORDERS
context["stages"] = settings.SUMO_STAGE_ORDERS
context["final"] = settings.SUMO_FINAL_ORDERS
return context
class SumoOrderGroupListView(ListView):
model = SumoGroup
template_name = 'orders/sumo_group_list.html'
def dispatch(self, *args, **kwargs):
if not settings.SUMO_GROUP_ORDERS:
raise PermissionDenied
return super(SumoOrderGroupListView, self).dispatch(*args, **kwargs)
def get_queryset(self):
return SumoGroup.objects.filter(is_final=False)
class SumoOrderGroupDetailView(DetailView):
model = SumoGroup
template_name = "orders/sumo_group_detail.html"
def dispatch(self, *args, **kwargs):
if not settings.SUMO_GROUP_ORDERS:
raise PermissionDenied
return super(SumoOrderGroupDetailView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
group = self.get_object()
context = super(SumoOrderGroupDetailView, self).get_context_data(
**kwargs)
context["matches"] = SumoGroupMatch.objects.filter(group=group)
context["teams"] = SumoGroupTeam.objects.filter(group=group)
return context
class SumoOrderStageListView(ListView):
model = SumoStage
template_name = "orders/sumo_stage_list.html"
def dispatch(self, *args, **kwargs):
if not settings.SUMO_STAGE_ORDERS:
raise PermissionDenied
return super(SumoOrderStageListView, self).dispatch(*args, **kwargs)
class SumoOrderStageDetailView(ListView):
model = SumoStageMatch
template_name = "orders/sumo_stage_detail.html"
def dispatch(self, *args, **kwargs):
if not settings.SUMO_STAGE_ORDERS:
raise PermissionDenied
return super(SumoOrderStageDetailView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(SumoOrderStageDetailView, self).get_context_data(
**kwargs)
context["stage"] = SumoStage.objects.get(pk=self.kwargs.get("pk"))
return context
def get_queryset(self):
return SumoStageMatch.objects.filter(stage__pk=self.kwargs.get("pk"))
class SumoOrderFinalDetailView(TemplateView):
model = SumoGroup
template_name = "orders/sumo_group_detail.html"
def dispatch(self, *args, **kwargs):
if not settings.SUMO_FINAL_ORDERS:
raise PermissionDenied
return super(SumoOrderFinalDetailView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(SumoOrderFinalDetailView, self).get_context_data(**kwargs)
group = SumoGroup.objects.get(is_final=True)
context["group"] = group
context["teams"] = SumoGroupTeam.objects.filter(group=group)
context["matches"] = SumoGroupMatch.objects.filter(group=group)
return context
|
ITURO/ituro
|
ituro/orders/views.py
|
Python
|
mit
| 8,516
|
import _plotly_utils.basevalidators
class YpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="ypad", parent_name="cone.colorbar", **kwargs):
super(YpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/cone/colorbar/_ypad.py
|
Python
|
mit
| 482
|
"""
This module contains a set of methods that can be used for page loads and
for waiting for elements to appear on a page.
These methods improve on and expand existing WebDriver commands.
Improvements include making WebDriver commands more robust and more reliable
by giving page elements enough time to load before taking action on them.
The default option for searching for elements is by CSS Selector.
This can be changed by overriding the "By" parameter.
Options are:
By.CSS_SELECTOR
By.CLASS_NAME
By.ID
By.NAME
By.LINK_TEXT
By.XPATH
By.TAG_NAME
By.PARTIAL_LINK_TEXT
"""
import codecs
import os
import sys
import time
from selenium.common.exceptions import ElementNotInteractableException
from selenium.common.exceptions import ElementNotVisibleException
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import NoSuchAttributeException
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoSuchFrameException
from selenium.common.exceptions import NoSuchWindowException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from seleniumbase.config import settings
from seleniumbase.fixtures import shared_utils as s_utils
def is_element_present(driver, selector, by=By.CSS_SELECTOR):
"""
Returns whether the specified element selector is present on the page.
@Params
driver - the webdriver object (required)
selector - the locator for identifying the page element (required)
by - the type of selector being used (Default: By.CSS_SELECTOR)
@Returns
Boolean (is element present)
"""
try:
driver.find_element(by=by, value=selector)
return True
except Exception:
return False
def is_element_visible(driver, selector, by=By.CSS_SELECTOR):
"""
Returns whether the specified element selector is visible on the page.
@Params
driver - the webdriver object (required)
selector - the locator for identifying the page element (required)
by - the type of selector being used (Default: By.CSS_SELECTOR)
@Returns
Boolean (is element visible)
"""
try:
element = driver.find_element(by=by, value=selector)
return element.is_displayed()
except Exception:
return False
def is_element_enabled(driver, selector, by=By.CSS_SELECTOR):
"""
Returns whether the specified element selector is enabled on the page.
@Params
driver - the webdriver object (required)
selector - the locator for identifying the page element (required)
by - the type of selector being used (Default: By.CSS_SELECTOR)
@Returns
Boolean (is element enabled)
"""
try:
element = driver.find_element(by=by, value=selector)
return element.is_enabled()
except Exception:
return False
def is_text_visible(driver, text, selector, by=By.CSS_SELECTOR):
"""
Returns whether the specified text is visible in the specified selector.
@Params
driver - the webdriver object (required)
text - the text string to search for
selector - the locator for identifying the page element (required)
by - the type of selector being used (Default: By.CSS_SELECTOR)
@Returns
Boolean (is text visible)
"""
try:
element = driver.find_element(by=by, value=selector)
return element.is_displayed() and text in element.text
except Exception:
return False
def is_attribute_present(
driver, selector, attribute, value=None, by=By.CSS_SELECTOR
):
"""
Returns whether the specified attribute is present in the given selector.
@Params
driver - the webdriver object (required)
selector - the locator for identifying the page element (required)
attribute - the attribute that is expected for the element (required)
value - the attribute value that is expected (Default: None)
by - the type of selector being used (Default: By.CSS_SELECTOR)
@Returns
Boolean (is attribute present)
"""
try:
element = driver.find_element(by=by, value=selector)
found_value = element.get_attribute(attribute)
if found_value is None:
return False
if value is not None:
if found_value == value:
return True
else:
return False
else:
return True
except Exception:
return False
def hover_on_element(driver, selector, by=By.CSS_SELECTOR):
"""
Fires the hover event for the specified element by the given selector.
@Params
driver - the webdriver object (required)
selector - the locator for identifying the page element (required)
by - the type of selector being used (Default: By.CSS_SELECTOR)
"""
element = driver.find_element(by=by, value=selector)
hover = ActionChains(driver).move_to_element(element)
hover.perform()
def hover_element(driver, element):
"""
Similar to hover_on_element(), but uses found element, not a selector.
"""
hover = ActionChains(driver).move_to_element(element)
hover.perform()
def timeout_exception(exception, message):
exception, message = s_utils.format_exc(exception, message)
raise exception(message)
def hover_and_click(
driver,
hover_selector,
click_selector,
hover_by=By.CSS_SELECTOR,
click_by=By.CSS_SELECTOR,
timeout=settings.SMALL_TIMEOUT,
):
"""
Fires the hover event for a specified element by a given selector, then
clicks on another element specified. Useful for dropdown hover based menus.
@Params
driver - the webdriver object (required)
hover_selector - the css selector to hover over (required)
click_selector - the css selector to click on (required)
hover_by - the hover selector type to search by (Default: By.CSS_SELECTOR)
click_by - the click selector type to search by (Default: By.CSS_SELECTOR)
timeout - number of seconds to wait for click element to appear after hover
"""
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
element = driver.find_element(by=hover_by, value=hover_selector)
hover = ActionChains(driver).move_to_element(element)
for x in range(int(timeout * 10)):
try:
hover.perform()
element = driver.find_element(by=click_by, value=click_selector)
element.click()
return element
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
plural = "s"
if timeout == 1:
plural = ""
message = "Element {%s} was not present after %s second%s!" % (
click_selector,
timeout,
plural,
)
timeout_exception(NoSuchElementException, message)
def hover_element_and_click(
driver,
element,
click_selector,
click_by=By.CSS_SELECTOR,
timeout=settings.SMALL_TIMEOUT,
):
"""
Similar to hover_and_click(), but assumes top element is already found.
"""
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
hover = ActionChains(driver).move_to_element(element)
for x in range(int(timeout * 10)):
try:
hover.perform()
element = driver.find_element(by=click_by, value=click_selector)
element.click()
return element
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
plural = "s"
if timeout == 1:
plural = ""
message = "Element {%s} was not present after %s second%s!" % (
click_selector,
timeout,
plural,
)
timeout_exception(NoSuchElementException, message)
def hover_element_and_double_click(
driver,
element,
click_selector,
click_by=By.CSS_SELECTOR,
timeout=settings.SMALL_TIMEOUT,
):
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
hover = ActionChains(driver).move_to_element(element)
for x in range(int(timeout * 10)):
try:
hover.perform()
element_2 = driver.find_element(by=click_by, value=click_selector)
actions = ActionChains(driver)
actions.move_to_element(element_2)
actions.double_click(element_2)
actions.perform()
return element_2
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
plural = "s"
if timeout == 1:
plural = ""
message = "Element {%s} was not present after %s second%s!" % (
click_selector,
timeout,
plural,
)
timeout_exception(NoSuchElementException, message)
def wait_for_element_present(
driver, selector, by=By.CSS_SELECTOR, timeout=settings.LARGE_TIMEOUT
):
"""
Searches for the specified element by the given selector. Returns the
element object if it exists in the HTML. (The element can be invisible.)
Raises NoSuchElementException if the element does not exist in the HTML
within the specified timeout.
@Params
driver - the webdriver object
selector - the locator for identifying the page element (required)
by - the type of selector being used (Default: By.CSS_SELECTOR)
timeout - the time to wait for elements in seconds
@Returns
A web element object
"""
element = None
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 10)):
s_utils.check_if_time_limit_exceeded()
try:
element = driver.find_element(by=by, value=selector)
return element
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
plural = "s"
if timeout == 1:
plural = ""
if not element:
message = "Element {%s} was not present after %s second%s!" % (
selector,
timeout,
plural,
)
timeout_exception(NoSuchElementException, message)
def wait_for_element_visible(
driver, selector, by=By.CSS_SELECTOR, timeout=settings.LARGE_TIMEOUT
):
"""
Searches for the specified element by the given selector. Returns the
element object if the element is present and visible on the page.
Raises NoSuchElementException if the element does not exist in the HTML
within the specified timeout.
Raises ElementNotVisibleException if the element exists in the HTML,
but is not visible (eg. opacity is "0") within the specified timeout.
@Params
driver - the webdriver object (required)
selector - the locator for identifying the page element (required)
by - the type of selector being used (Default: By.CSS_SELECTOR)
timeout - the time to wait for elements in seconds
@Returns
A web element object
"""
element = None
is_present = False
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 10)):
s_utils.check_if_time_limit_exceeded()
try:
element = driver.find_element(by=by, value=selector)
is_present = True
if element.is_displayed():
return element
else:
element = None
raise Exception()
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
plural = "s"
if timeout == 1:
plural = ""
if not element and by != By.LINK_TEXT:
if not is_present:
# The element does not exist in the HTML
message = "Element {%s} was not present after %s second%s!" % (
selector,
timeout,
plural,
)
timeout_exception(NoSuchElementException, message)
# The element exists in the HTML, but is not visible
message = "Element {%s} was not visible after %s second%s!" % (
selector,
timeout,
plural,
)
timeout_exception(ElementNotVisibleException, message)
if not element and by == By.LINK_TEXT:
message = "Link text {%s} was not visible after %s second%s!" % (
selector,
timeout,
plural,
)
timeout_exception(ElementNotVisibleException, message)
def wait_for_text_visible(
driver,
text,
selector,
by=By.CSS_SELECTOR,
timeout=settings.LARGE_TIMEOUT,
browser=None
):
"""
Searches for the specified element by the given selector. Returns the
element object if the text is present in the element and visible
on the page.
Raises NoSuchElementException if the element does not exist in the HTML
within the specified timeout.
Raises ElementNotVisibleException if the element exists in the HTML,
but the text is not visible within the specified timeout.
@Params
driver - the webdriver object (required)
text - the text that is being searched for in the element (required)
selector - the locator for identifying the page element (required)
by - the type of selector being used (Default: By.CSS_SELECTOR)
timeout - the time to wait for elements in seconds
@Returns
A web element object that contains the text searched for
"""
element = None
is_present = False
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 10)):
s_utils.check_if_time_limit_exceeded()
try:
element = driver.find_element(by=by, value=selector)
is_present = True
if browser == "safari":
if (
element.is_displayed()
and text in element.get_attribute("innerText")
):
return element
else:
element = None
raise Exception()
else:
if element.is_displayed() and text in element.text:
return element
else:
element = None
raise Exception()
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
plural = "s"
if timeout == 1:
plural = ""
if not element:
if not is_present:
# The element does not exist in the HTML
message = "Element {%s} was not present after %s second%s!" % (
selector,
timeout,
plural,
)
timeout_exception(NoSuchElementException, message)
# The element exists in the HTML, but the text is not visible
message = (
"Expected text {%s} for {%s} was not visible after %s second%s!"
% (text, selector, timeout, plural)
)
timeout_exception(ElementNotVisibleException, message)
def wait_for_exact_text_visible(
driver,
text,
selector,
by=By.CSS_SELECTOR,
timeout=settings.LARGE_TIMEOUT,
browser=None
):
"""
Searches for the specified element by the given selector. Returns the
element object if the text matches exactly with the text in the element,
and the text is visible.
Raises NoSuchElementException if the element does not exist in the HTML
within the specified timeout.
Raises ElementNotVisibleException if the element exists in the HTML,
but the exact text is not visible within the specified timeout.
@Params
driver - the webdriver object (required)
text - the exact text that is expected for the element (required)
selector - the locator for identifying the page element (required)
by - the type of selector being used (Default: By.CSS_SELECTOR)
timeout - the time to wait for elements in seconds
@Returns
A web element object that contains the text searched for
"""
element = None
is_present = False
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 10)):
s_utils.check_if_time_limit_exceeded()
try:
element = driver.find_element(by=by, value=selector)
is_present = True
if browser == "safari":
if (
element.is_displayed()
and text.strip() == element.get_attribute(
"innerText").strip()
):
return element
else:
element = None
raise Exception()
else:
if (
element.is_displayed()
and text.strip() == element.text.strip()
):
return element
else:
element = None
raise Exception()
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
plural = "s"
if timeout == 1:
plural = ""
if not element:
if not is_present:
# The element does not exist in the HTML
message = "Element {%s} was not present after %s second%s!" % (
selector,
timeout,
plural,
)
timeout_exception(NoSuchElementException, message)
# The element exists in the HTML, but the exact text is not visible
message = (
"Expected exact text {%s} for {%s} was not visible "
"after %s second%s!" % (text, selector, timeout, plural)
)
timeout_exception(ElementNotVisibleException, message)
def wait_for_attribute(
driver,
selector,
attribute,
value=None,
by=By.CSS_SELECTOR,
timeout=settings.LARGE_TIMEOUT,
):
"""
Searches for the specified element attribute by the given selector.
Returns the element object if the expected attribute is present
and the expected attribute value is present (if specified).
Raises NoSuchElementException if the element does not exist in the HTML
within the specified timeout.
Raises NoSuchAttributeException if the element exists in the HTML,
but the expected attribute/value is not present within the timeout.
@Params
driver - the webdriver object (required)
selector - the locator for identifying the page element (required)
attribute - the attribute that is expected for the element (required)
value - the attribute value that is expected (Default: None)
by - the type of selector being used (Default: By.CSS_SELECTOR)
timeout - the time to wait for the element attribute in seconds
@Returns
A web element object that contains the expected attribute/value
"""
element = None
element_present = False
attribute_present = False
found_value = None
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 10)):
s_utils.check_if_time_limit_exceeded()
try:
element = driver.find_element(by=by, value=selector)
element_present = True
attribute_present = False
found_value = element.get_attribute(attribute)
if found_value is not None:
attribute_present = True
else:
element = None
raise Exception()
if value is not None:
if found_value == value:
return element
else:
element = None
raise Exception()
else:
return element
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
plural = "s"
if timeout == 1:
plural = ""
if not element:
if not element_present:
# The element does not exist in the HTML
message = "Element {%s} was not present after %s second%s!" % (
selector,
timeout,
plural,
)
timeout_exception(NoSuchElementException, message)
if not attribute_present:
# The element does not have the attribute
message = (
"Expected attribute {%s} of element {%s} was not present "
"after %s second%s!" % (attribute, selector, timeout, plural)
)
timeout_exception(NoSuchAttributeException, message)
# The element attribute exists, but the expected value does not match
message = (
"Expected value {%s} for attribute {%s} of element {%s} was not "
"present after %s second%s! (The actual value was {%s})"
% (value, attribute, selector, timeout, plural, found_value)
)
timeout_exception(NoSuchAttributeException, message)
def wait_for_element_absent(
driver, selector, by=By.CSS_SELECTOR, timeout=settings.LARGE_TIMEOUT
):
"""
Searches for the specified element by the given selector.
Raises an exception if the element is still present after the
specified timeout.
@Params
driver - the webdriver object
selector - the locator for identifying the page element (required)
by - the type of selector being used (Default: By.CSS_SELECTOR)
timeout - the time to wait for elements in seconds
"""
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 10)):
s_utils.check_if_time_limit_exceeded()
try:
driver.find_element(by=by, value=selector)
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
except Exception:
return True
plural = "s"
if timeout == 1:
plural = ""
message = "Element {%s} was still present after %s second%s!" % (
selector,
timeout,
plural,
)
timeout_exception(Exception, message)
def wait_for_element_not_visible(
driver, selector, by=By.CSS_SELECTOR, timeout=settings.LARGE_TIMEOUT
):
"""
Searches for the specified element by the given selector.
Raises an exception if the element is still visible after the
specified timeout.
@Params
driver - the webdriver object (required)
selector - the locator for identifying the page element (required)
by - the type of selector being used (Default: By.CSS_SELECTOR)
timeout - the time to wait for the element in seconds
"""
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 10)):
s_utils.check_if_time_limit_exceeded()
try:
element = driver.find_element(by=by, value=selector)
if element.is_displayed():
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
else:
return True
except Exception:
return True
plural = "s"
if timeout == 1:
plural = ""
message = "Element {%s} was still visible after %s second%s!" % (
selector,
timeout,
plural,
)
timeout_exception(Exception, message)
def wait_for_text_not_visible(
driver, text, selector, by=By.CSS_SELECTOR, timeout=settings.LARGE_TIMEOUT
):
"""
Searches for the text in the element of the given selector on the page.
Returns True if the text is not visible on the page within the timeout.
Raises an exception if the text is still present after the timeout.
@Params
driver - the webdriver object (required)
text - the text that is being searched for in the element (required)
selector - the locator for identifying the page element (required)
by - the type of selector being used (Default: By.CSS_SELECTOR)
timeout - the time to wait for elements in seconds
@Returns
A web element object that contains the text searched for
"""
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 10)):
s_utils.check_if_time_limit_exceeded()
if not is_text_visible(driver, text, selector, by=by):
return True
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
plural = "s"
if timeout == 1:
plural = ""
message = "Text {%s} in {%s} was still visible after %s second%s!" % (
text,
selector,
timeout,
plural,
)
timeout_exception(Exception, message)
def wait_for_attribute_not_present(
driver,
selector,
attribute,
value=None,
by=By.CSS_SELECTOR,
timeout=settings.LARGE_TIMEOUT
):
"""
Searches for the specified element attribute by the given selector.
Returns True if the attribute isn't present on the page within the timeout.
Also returns True if the element is not present within the timeout.
Raises an exception if the attribute is still present after the timeout.
@Params
driver - the webdriver object (required)
selector - the locator for identifying the page element (required)
attribute - the element attribute (required)
value - the attribute value (Default: None)
by - the type of selector being used (Default: By.CSS_SELECTOR)
timeout - the time to wait for the element attribute in seconds
"""
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 10)):
s_utils.check_if_time_limit_exceeded()
if not is_attribute_present(
driver, selector, attribute, value=value, by=by
):
return True
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
plural = "s"
if timeout == 1:
plural = ""
message = (
"Attribute {%s} of element {%s} was still present after %s second%s!"
"" % (attribute, selector, timeout, plural)
)
if value:
message = (
"Value {%s} for attribute {%s} of element {%s} was still present "
"after %s second%s!"
"" % (value, attribute, selector, timeout, plural)
)
timeout_exception(Exception, message)
def find_visible_elements(driver, selector, by=By.CSS_SELECTOR):
"""
Finds all WebElements that match a selector and are visible.
Similar to webdriver.find_elements.
@Params
driver - the webdriver object (required)
selector - the locator for identifying the page element (required)
by - the type of selector being used (Default: By.CSS_SELECTOR)
"""
elements = driver.find_elements(by=by, value=selector)
try:
v_elems = [element for element in elements if element.is_displayed()]
return v_elems
except (StaleElementReferenceException, ElementNotInteractableException):
time.sleep(0.1)
elements = driver.find_elements(by=by, value=selector)
v_elems = []
for element in elements:
if element.is_displayed():
v_elems.append(element)
return v_elems
def save_screenshot(
driver, name, folder=None, selector=None, by=By.CSS_SELECTOR
):
"""
Saves a screenshot of the current page.
If no folder is specified, uses the folder where pytest was called.
The screenshot will include the entire page unless a selector is given.
If a provided selector is not found, then takes a full-page screenshot.
If the folder provided doesn't exist, it will get created.
The screenshot will be in PNG format: (*.png)
"""
if not name.endswith(".png"):
name = name + ".png"
if folder:
abs_path = os.path.abspath(".")
file_path = abs_path + "/%s" % folder
if not os.path.exists(file_path):
os.makedirs(file_path)
screenshot_path = "%s/%s" % (file_path, name)
else:
screenshot_path = name
if selector:
try:
element = driver.find_element(by=by, value=selector)
element_png = element.screenshot_as_png
with open(screenshot_path, "wb") as file:
file.write(element_png)
except Exception:
if driver:
driver.get_screenshot_as_file(screenshot_path)
else:
pass
else:
if driver:
driver.get_screenshot_as_file(screenshot_path)
else:
pass
def save_page_source(driver, name, folder=None):
"""
Saves the page HTML to the current directory (or given subfolder).
If the folder specified doesn't exist, it will get created.
@Params
name - The file name to save the current page's HTML to.
folder - The folder to save the file to. (Default = current folder)
"""
from seleniumbase.core import log_helper
if not name.endswith(".html"):
name = name + ".html"
if folder:
abs_path = os.path.abspath(".")
file_path = abs_path + "/%s" % folder
if not os.path.exists(file_path):
os.makedirs(file_path)
html_file_path = "%s/%s" % (file_path, name)
else:
html_file_path = name
page_source = driver.page_source
html_file = codecs.open(html_file_path, "w+", "utf-8")
rendered_source = log_helper.get_html_source_with_base_href(
driver, page_source
)
html_file.write(rendered_source)
html_file.close()
def _get_last_page(driver):
try:
last_page = driver.current_url
except Exception:
last_page = "[WARNING! Browser Not Open!]"
if len(last_page) < 5:
last_page = "[WARNING! Browser Not Open!]"
return last_page
def save_test_failure_data(driver, name, browser_type=None, folder=None):
"""
Saves failure data to the current directory, or to a subfolder if provided.
If {name} does not end in ".txt", it will get added to it.
If {browser_type} is provided, the logs will include that.
If the folder provided doesn't exist, it will get created.
"""
import traceback
if not name.endswith(".txt"):
name = name + ".txt"
if folder:
abs_path = os.path.abspath(".")
file_path = abs_path + "/%s" % folder
if not os.path.exists(file_path):
os.makedirs(file_path)
failure_data_file_path = "%s/%s" % (file_path, name)
else:
failure_data_file_path = name
failure_data_file = codecs.open(failure_data_file_path, "w+", "utf-8")
last_page = _get_last_page(driver)
data_to_save = []
data_to_save.append("Last_Page: %s" % last_page)
if browser_type:
data_to_save.append("Browser: %s " % browser_type)
data_to_save.append(
"Traceback: "
+ "".join(
traceback.format_exception(
sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]
)
)
)
failure_data_file.writelines("\r\n".join(data_to_save))
failure_data_file.close()
def wait_for_and_accept_alert(driver, timeout=settings.LARGE_TIMEOUT):
"""
Wait for and accept an alert. Returns the text from the alert.
@Params
driver - the webdriver object (required)
timeout - the time to wait for the alert in seconds
"""
alert = wait_for_and_switch_to_alert(driver, timeout)
alert_text = alert.text
alert.accept()
return alert_text
def wait_for_and_dismiss_alert(driver, timeout=settings.LARGE_TIMEOUT):
"""
Wait for and dismiss an alert. Returns the text from the alert.
@Params
driver - the webdriver object (required)
timeout - the time to wait for the alert in seconds
"""
alert = wait_for_and_switch_to_alert(driver, timeout)
alert_text = alert.text
alert.dismiss()
return alert_text
def wait_for_and_switch_to_alert(driver, timeout=settings.LARGE_TIMEOUT):
"""
Wait for a browser alert to appear, and switch to it. This should be usable
as a drop-in replacement for driver.switch_to.alert when the alert box
may not exist yet.
@Params
driver - the webdriver object (required)
timeout - the time to wait for the alert in seconds
"""
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 10)):
s_utils.check_if_time_limit_exceeded()
try:
alert = driver.switch_to.alert
# Raises exception if no alert present
dummy_variable = alert.text # noqa
return alert
except NoAlertPresentException:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
message = "Alert was not present after %s seconds!" % timeout
timeout_exception(Exception, message)
def switch_to_frame(driver, frame, timeout=settings.SMALL_TIMEOUT):
"""
Wait for an iframe to appear, and switch to it. This should be
usable as a drop-in replacement for driver.switch_to.frame().
@Params
driver - the webdriver object (required)
frame - the frame element, name, id, index, or selector
timeout - the time to wait for the alert in seconds
"""
from seleniumbase.fixtures import page_utils
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 10)):
s_utils.check_if_time_limit_exceeded()
try:
driver.switch_to.frame(frame)
return True
except (NoSuchFrameException, TimeoutException):
if type(frame) is str:
by = None
if page_utils.is_xpath_selector(frame):
by = By.XPATH
else:
by = By.CSS_SELECTOR
if is_element_visible(driver, frame, by=by):
try:
element = driver.find_element(by=by, value=frame)
driver.switch_to.frame(element)
return True
except Exception:
pass
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
plural = "s"
if timeout == 1:
plural = ""
message = "Frame {%s} was not visible after %s second%s!" % (
frame,
timeout,
plural,
)
timeout_exception(Exception, message)
def switch_to_window(driver, window, timeout=settings.SMALL_TIMEOUT):
"""
Wait for a window to appear, and switch to it. This should be usable
as a drop-in replacement for driver.switch_to.window().
@Params
driver - the webdriver object (required)
window - the window index or window handle
timeout - the time to wait for the window in seconds
"""
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
if isinstance(window, int):
for x in range(int(timeout * 10)):
s_utils.check_if_time_limit_exceeded()
try:
window_handle = driver.window_handles[window]
driver.switch_to.window(window_handle)
return True
except IndexError:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
plural = "s"
if timeout == 1:
plural = ""
message = "Window {%s} was not present after %s second%s!" % (
window,
timeout,
plural,
)
timeout_exception(Exception, message)
else:
window_handle = window
for x in range(int(timeout * 10)):
s_utils.check_if_time_limit_exceeded()
try:
driver.switch_to.window(window_handle)
return True
except NoSuchWindowException:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
plural = "s"
if timeout == 1:
plural = ""
message = "Window {%s} was not present after %s second%s!" % (
window,
timeout,
plural,
)
timeout_exception(Exception, message)
|
seleniumbase/SeleniumBase
|
seleniumbase/fixtures/page_actions.py
|
Python
|
mit
| 37,037
|
#!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
if len(s)< 3:
return s
elif s[-3:] =='ing':
return s + 'ly'
else:
return s + 'ing'
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
ns = s.find("not")
nb =s.find("bad")
if ns < nb:
s = s.replace(s[ns:nb+3],"good")
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
afront =sum(divmod(len(a),2))
bfront =sum(divmod(len(b),2))
return a[:afront] + b[:bfront] +a[afront:] +b[bfront:]
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
|
arghasen/Poker
|
google/basic/string2.py
|
Python
|
mit
| 2,594
|
#sudo install pip pygal
#this is just Documentation for the code, it will be modified once we figure out how to fetch data as an array
import pygal
data_val = [1, 2, 3, 4] #put data values in array format
camera1 = 'Nixon' #additional info such as Camera in a string
bar_chart = pygal.Bar() #create bar graph
bar_chart.title = "Flickr Queries" #create title
bar_chart.x_labels = map(str, range(2000,2004)) #x lables
bar_chart.add(camera1, data_val) #add graph
bar_chart.render_to_file('bar_chart.svg') #render graph open in browser
|
rayxke/Redshift-Project
|
flask/pygalgraph.py
|
Python
|
mit
| 566
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AvailableServiceAliasesOperations(object):
"""AvailableServiceAliasesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AvailableServiceAliasesResult"]
"""Gets all available service aliases for this subscription in this region.
:param location: The location.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailableServiceAliasesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_11_01.models.AvailableServiceAliasesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableServiceAliasesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailableServiceAliasesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/availableServiceAliases'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AvailableServiceAliasesResult"]
"""Gets all available service aliases for this resource group in this region.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param location: The location.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailableServiceAliasesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_11_01.models.AvailableServiceAliasesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableServiceAliasesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailableServiceAliasesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/availableServiceAliases'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/operations/_available_service_aliases_operations.py
|
Python
|
mit
| 9,072
|
from crypto.hashes.hashinterface import HashInterface
from Crypto.Hash import SHA512 as libsha512
class SHA512(HashInterface):
def hashString(self, stringMessage):
sha512 = libsha512.new()
sha512.update(stringMessage.encode())
return sha512.digest()
def getDigestSize(self):
return 64
def isValidHash(self, stringMessage, hashBytes):
return self.hashString(stringMessage) == hashBytes
|
bensoer/pychat
|
crypto/hashes/sha512.py
|
Python
|
mit
| 444
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('todos', '0005_auto_20150126_1238'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='due',
field=models.DateField(default=datetime.datetime.now),
preserve_default=True,
),
]
|
Anjali2906/lifetab
|
todos/migrations/0006_auto_20150126_1500.py
|
Python
|
mit
| 462
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DjangoTutorial.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
pranavj1001/LearnLanguages
|
python/DjangoTutorial/manage.py
|
Python
|
mit
| 812
|
#!/usr/bin/env python
import sys
import re
import shlex
import os
from argparse import ArgumentParser
try:
from Bio import AlignIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import generic_dna
from Bio.Align import MultipleSeqAlignment
except ImportError:
sys.exit('Biopython needs to be installed to use this script')
from pygot.biopyutils import *
#from oryzautils import *
from pygot.utils import extract_sequences_and_stuff_from_nexus_file
#use argparse module to parse commandline input
parser = ArgumentParser()
parser.add_argument('-m', '--masterfile', type=str, default=None, required=True,
help='file containing list of gffs, toplevels, names, etc. (required)')
parser.add_argument('filenames', nargs='*', default=[],
help='series of alignment files')
parser.add_argument('-na', '--no-alignments', default=False, action='store_true',
help='don\'t actually write alignment files, just the code strings')
#now process the command line
options = parser.parse_args()
#file with lines containing short taxon identifiers, sequence files and gff files for each taxon
allTaxonInfo = get_taxon_genomic_information_dict(options.masterfile, readToplevels=False, usePickle=True)
for nfile in options.filenames:
print nfile
taxToSequenceDict = {}
beginningLinesInNexus = []
endLinesInNexus = []
extract_sequences_and_stuff_from_nexus_file(nfile, taxToSequenceDict, beginningLinesInNexus, endLinesInNexus)
print len(taxToSequenceDict), len(beginningLinesInNexus), len(endLinesInNexus)
parsedAlignment = extract_all_information_for_seqs_in_alignments(nfile)
convertedSequences = {}
intronsStrippedSequences = {}
exonsStrippedSequences = {}
parsed = parsedAlignment[0][1]
for taxon in oryza.taxon_names:
shortName = oryza.long_name_to_short(taxon)
if shortName in allTaxonInfo:
this = [x[1] for x in parsed if x[0] == taxon]
if len(this) > 0:
thisParsedSeqInfo = this[0]
feat = allTaxonInfo[shortName].gff_feature_dict[re.sub('FGT', 'FG', thisParsedSeqInfo.name)]
#D'OH! problem here with satj, in that start of gene is not start of coding due to UTRs being in exons
#THIS WOULD NEED TO BE CHANGED IF UTRs WERE INCLUDED IN SATIVA SEQS AGAIN
#adjust_feature_coords([feat], -int_feature_location(feat)[0])
if feat.strand == -1:
adjust_feature_coords([feat], -find_cds_end_coordinate(feat))
else:
adjust_feature_coords([feat], -find_cds_start_coordinate(feat))
exons = get_features_by_name(feat, "CDS")
exonPos = []
for exon in exons:
r = int_feature_location(exon)
exonPos.extend(xrange(r[0], r[1]))
if feat.strand == -1:
#this will "flip" the positions, since the first base of the gene on the neg strand
#is the one with the largest coord
n = max(exonPos)
exonPos = [ n - pos for pos in exonPos ]
exonPos.sort()
rawPos = 0
seq = taxToSequenceDict[taxon]
newSeq = [ char.upper() for char in seq ]
seqLen = len(newSeq)
#this is to mask off a terminal stop codon, which CDS sequences aligned as AA's won't have
#there is probably a better algorithm for this
stops = [ ['T', 'A', 'G'], ['T', 'A', 'A'], ['T', 'G', 'A'] ]
bases = [ b for b in newSeq if b != '-' ]
endCodon = bases[-3:]
codonPos = 2
if endCodon in stops:
for pos in xrange(len(newSeq) - 1, -1, -1):
if newSeq[pos] == endCodon[codonPos]:
newSeq[pos] = 'N'
codonPos -= 1
if codonPos == -1:
break
#noIntronSeq = copy.deepcopy(newSeq)
#noExonSeq = copy.deepcopy(newSeq)
noIntronSeq = list(newSeq)
noExonSeq = list(newSeq)
for alignedPos, char in enumerate(seq):
if char.isalpha():
if rawPos not in exonPos:
newSeq[alignedPos] = char.lower()
noIntronSeq[alignedPos] = 'N'
else:
noExonSeq[alignedPos] = 'N'
if char not in ['-', '?']:
rawPos += 1
convertedSequences[taxon] = newSeq
intronsStrippedSequences[taxon] = noIntronSeq
exonsStrippedSequences[taxon] = noExonSeq
#write an alignment that is identical except in the case of intron and exon characters
#write an alignment that only contains exon characters, with characters annotated as introns changed to N's
#write an alignment that only contains intron characters, with characters annotated as exons changed to N's
if not options.no_alignments:
for (sequences, directory) in [ (convertedSequences, "alteredCases"), (intronsStrippedSequences, "intronCharsStripped"), (exonsStrippedSequences, "exonCharsStripped") ]:
if not os.path.exists(directory):
os.makedirs(directory)
f = directory + '/' + 'aligned.blink.' + parsedAlignment[0][0] + '.nex'
outnex = open(f, 'w')
outnex.write('%s' % ''.join(beginningLinesInNexus[:-1]))
outnex.write('[this can be used to view upper and lower case as different characters]\n[format datatype=standard respectcase missing=? gap=- symbols="a c g t n A C G T N";]\n')
outnex.write('%s' % ''.join(beginningLinesInNexus[-1]))
for taxon in oryza.taxon_names:
if taxon in sequences:
outnex.write("\'%s\'\t%s\n" % (taxon, ''.join(sequences[taxon])))
outnex.write('%s' % ''.join(endLinesInNexus))
#now write versions will all missing columns removed
for (sequences, directory) in [ (intronsStrippedSequences, "intronCharsStripped.collapsed"), (exonsStrippedSequences, "exonCharsStripped.collapsed") ]:
if not os.path.exists(directory):
os.makedirs(directory)
collapsedSequences = dict((k, []) for k in sequences.keys())
for site in xrange(seqLen):
thisCol = []
for tax, seq in sequences.items():
thisCol.append(seq[site])
if len(''.join(thisCol).translate(None, 'nN?-')):
for tax, seq in sequences.items():
collapsedSequences[tax].append(seq[site])
newLen = len(collapsedSequences.values()[0])
lenStr = 'nchar=%d;' % newLen
f = directory + '/' + 'aligned.blink.' + parsedAlignment[0][0] + '.nex'
outnex = open(f, 'w')
#outnex.write('%s' % ''.join(beginningLinesInNexus[:-1]))
outnex.write('%s' % re.sub('nchar.*;', lenStr, ''.join(beginningLinesInNexus[:-1])))
outnex.write('[this can be used to view upper and lower case as different characters]\n[format datatype=standard respectcase missing=? gap=- symbols="a c g t n A C G T N";]\n')
outnex.write('%s' % ''.join(beginningLinesInNexus[-1]))
for taxon in oryza.taxon_names:
if taxon in sequences:
#outnex.write("\'%s\'\t%s\n" % (taxon, ''.join(sequences[taxon])))
outnex.write("\'%s\'\t%s\n" % (taxon, ''.join(collapsedSequences[taxon])))
outnex.write('%s' % ''.join(endLinesInNexus))
countColumns = True
if countColumns:
align = MultipleSeqAlignment( [SeqRecord(Seq(''.join(convertedSequences[taxon]), generic_dna), id=taxon) for taxon in oryza.taxon_names if taxon in convertedSequences] )
empty_align = MultipleSeqAlignment( [SeqRecord(Seq('', generic_dna), id=taxon) for taxon in oryza.taxon_names if taxon in convertedSequences] )
mixedG = []
mixedNG = []
intronG = []
intronNG = []
exonG = []
exonNG = []
pureG = []
codeString = []
informativeCounts = []
#codeTranslation will be indexed by codeTranslation[bool(gaps)][bool(upper)][bool(lower)]
#X shouldn't be possible
codeTranslation = [['X', 'I'], ['E', 'M']], [['G', 'J'], ['F', 'N']]
'''
G = full gap
E, F = full exon (no gap, gap)
I, J = full intron (no gap, gap)
M, N = mixed intron/exon (no gap, gap)
'''
for colnum in xrange(len(align[0])):
#this should pull out individual columns
colStr = align[:, colnum]
lower = re.search("[a-z]", colStr)
upper = re.search("[A-Z]", colStr)
gaps = re.search("-", colStr)
lowerCountNoNs = len(re.findall("[a-mo-z]", colStr))
upperCountNoNs = len(re.findall("[A-MO-Z]", colStr))
informativeCount = lowerCountNoNs + upperCountNoNs
if not (lower or upper or gaps):
raise RuntimeError('WTF is up with seq column %d?: %s' % (colnum, colStr))
codeString.append(codeTranslation[bool(gaps)][bool(upper)][bool(lower)])
informativeCounts.append(informativeCount)
if gaps:
if lower and upper:
mixedG.append(colnum)
elif lower:
intronG.append(colnum)
elif upper:
exonG.append(colnum)
else:
pureG.append(colnum)
else:
if lower and upper:
mixedNG.append(colnum)
elif lower:
intronNG.append(colnum)
elif upper:
exonNG.append(colnum)
mixedIntronsAndExons = sorted(mixedG + mixedNG)
intronsOnly = sorted(intronG + intronNG)
exonsOnly = sorted(exonG + exonNG)
noFullIntrons = sorted(mixedIntronsAndExons + exonsOnly)
noFullExons = sorted(mixedIntronsAndExons + intronsOnly)
noGaps = sorted(mixedNG + exonNG + intronNG)
someGaps = sorted(mixedG + exonG + intronG)
codeStringFilename = 'aligned.blink.%s.dat' % extract_core_filename(nfile)
directory = 'codeStrings'
if not os.path.exists(directory):
os.makedirs(directory)
with open(directory + '/' + codeStringFilename, 'w') as strFile:
#strFile.write('%s\n' % '\n'.join(codeString))
strFile.write('%s\n' % '\n'.join(['%s\t%s\t%d' % (code, count, site + 1) for (code, count, site) in zip(codeString, informativeCounts, xrange(len(codeString)))]))
if not options.noAlignments:
outnex.write('[\nmixed %d\nmixedG %d\nmixedNG %d\nintron %d\nintronG %d\nintronNG %d\nexon %d\nexonG %d\nexonNG %d\nnoGaps %d\nsomeGaps %d\n]\n' %
len(mixedIntronsAndExons),
len(mixedG),
len(mixedNG),
len(intronsOnly),
len(intronG),
len(intronNG),
len(exonsOnly),
len(exonG),
len(exonNG),
len(noGaps),
len(someGaps),
)
newAligns = [
(mixedIntronsAndExons, 'mixedIntronsAndExons'),
(intronsOnly, 'intronsOnly'),
(exonsOnly, 'exonsOnly'),
(noFullIntrons, 'noFullIntrons'),
(noFullExons, 'noFullExons'),
(noGaps, 'noGaps'),
(someGaps, 'someGaps')
]
outputFilename = 'aligned.blink.%s.nex' % extract_core_filename(nfile)
for (sites, directory) in newAligns:
if not os.path.exists(directory):
os.makedirs(directory)
thisAln = copy.deepcopy(empty_align)
for site in sites:
thisAln += align[:, site:site + 1]
if len(thisAln[0].seq):
fname = directory + '/' + outputFilename
#outfile = open(fname, 'w')
#AlignIO.write(thisAln, outfile, 'nexus')
#outfile.close()
#I think that this will work and automatically close the file, since we need to open and read it below
AlignIO.write(thisAln, fname, 'nexus')
#this is a little silly, but to add in this one line we need to read and rewrite the whole fie
outfile = open(fname, 'w')
for l in open(fname, 'r'):
if 'format' in l:
outfile.write('[this can be used to view upper and lower case as different characters]\n[format datatype=standard respectcase missing=? gap=- symbols="a c g t n A C G T N";]\n')
outfile.write(l)
outfile.write("[columns:\n%s\n]\n" % ' '.join([str(c + 1) for c in sites]))
outfile.close()
outnex.close()
|
zwickl/pygot
|
scripts/intronExonAlignments.py
|
Python
|
mit
| 13,381
|
from api.forms import ContextAwareModelForm, HashidModelChoiceField
from django import forms
from django.utils.translation import ugettext_lazy as _
from submissions.models import Submission
from voting.models import Vote
from .fields import VoteValueField
class SendVoteForm(ContextAwareModelForm):
value = VoteValueField()
submission = HashidModelChoiceField(queryset=Submission.objects.all())
def clean(self):
cleaned_data = super().clean()
submission = cleaned_data.get("submission")
if submission.conference and not submission.conference.is_voting_open:
raise forms.ValidationError(_("The voting session is not open!"))
logged_user = self.context["request"].user
if not logged_user.can_vote(submission.conference):
raise forms.ValidationError(_("You cannot vote without a ticket"))
def save(self, commit=True):
request = self.context["request"]
submission = self.cleaned_data.get("submission")
try:
self.instance = Vote.objects.get(user=request.user, submission=submission)
except Vote.DoesNotExist:
pass
self.instance.user = request.user
self.instance.value = self.cleaned_data["value"]
return super().save(commit=commit)
class Meta:
model = Vote
fields = ("submission", "value")
|
patrick91/pycon
|
backend/api/voting/forms.py
|
Python
|
mit
| 1,378
|
import os
import sys
import string
from SCons.Script import *
from utils import _make_path_relative
BuildOptions = {}
Projects = []
WD_Root = ''
Env = None
class Win32Spawn:
def spawn(self, sh, escape, cmd, args, env):
import subprocess
newargs = string.join(args[1:], ' ')
cmdline = cmd + " " + newargs
startupinfo = subprocess.STARTUPINFO()
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo, shell = False)
data, err = proc.communicate()
rv = proc.wait()
if data:
print data
if err:
print err
if rv:
return rv
return 0
def PrepareBuilding(env, root_directory, has_libcpu=False, remove_components = []):
import SCons.cpp
import wdconfig
global BuildOptions
global Projects
global Env
global WD_Root
Env = env
WD_Root = root_directory
# add compability with Keil MDK 4.6 which changes the directory of armcc.exe
if wdconfig.PLATFORM == 'armcc':
if not os.path.isfile(os.path.join(wdconfig.EXEC_PATH, 'armcc.exe')):
if wdconfig.EXEC_PATH.find('bin40') > 0:
wdconfig.EXEC_PATH = wdconfig.EXEC_PATH.replace('bin40', 'armcc/bin')
# patch for win32 spawn
if env['PLATFORM'] == 'win32' and wdconfig.PLATFORM == 'gcc':
win32_spawn = Win32Spawn()
win32_spawn.env = env
env['SPAWN'] = win32_spawn.spawn
if env['PLATFORM'] == 'win32':
os.environ['PATH'] = wdconfig.EXEC_PATH + ";" + os.environ['PATH']
else:
os.environ['PATH'] = wdconfig.EXEC_PATH + ":" + os.environ['PATH']
# add program path
env.PrependENVPath('PATH', wdconfig.EXEC_PATH)
# parse wdconfig.h to get used component
PreProcessor = SCons.cpp.PreProcessor()
f = file('wdconfig.h', 'r')
contents = f.read()
f.close()
PreProcessor.process_contents(contents)
BuildOptions = PreProcessor.cpp_namespace
# add copy option
AddOption('--copy',
dest='copy',
action='store_true',
default=False,
help='copy rt-thread directory to local.')
AddOption('--copy-header',
dest='copy-header',
action='store_true',
default=False,
help='copy header of rt-thread directory to local.')
# add build library option
AddOption('--buildlib',
dest='buildlib',
type='string',
help='building library of a component')
# add target option
AddOption('--target',
dest='target',
type='string',
help='set target project: mdk')
#{target_name:(CROSS_TOOL, PLATFORM)}
tgt_dict = {'mdk':('keil', 'armcc'),
'mdk4':('keil', 'armcc'),
'iar':('iar', 'iar'),
'vs':('msvc', 'cl')}
tgt_name = GetOption('target')
if tgt_name:
SetOption('no_exec', 1)
try:
wdconfig.CROSS_TOOL, wdconfig.PLATFORM = tgt_dict[tgt_name]
except KeyError:
print 'Unknow target: %s. Avaible targets: %s' % \
(tgt_name, ', '.join(tgt_dict.keys()))
sys.exit(1)
elif (GetDepend('RT_USING_NEWLIB') == False and GetDepend('RT_USING_NOLIBC') == False) \
and wdconfig.PLATFORM == 'gcc':
AddDepend('RT_USING_MINILIBC')
#env['CCCOMSTR'] = "CC $TARGET"
#env['ASCOMSTR'] = "AS $TARGET"
#env['LINKCOMSTR'] = "Link $TARGET"
# board build script
objs = SConscript('SConscript', variant_dir='build', duplicate=0)
Repository(WD_Root)
# include kernel
#objs.append(SConscript(WD_Root + '/src/SConscript', variant_dir='build/src', duplicate=0))
# include libcpu
#if not has_libcpu:
# objs.append(SConscript(WD_Root + '/libcpu/SConscript', variant_dir='build/libcpu', duplicate=0))
# include components
#objs.append(SConscript(WD_Root + '/components/SConscript',
# variant_dir='build/components',
# duplicate=0,
# exports='remove_components'))
return objs
def PrepareModuleBuilding(env, root_directory):
import SCons.cpp
import wdconfig
global BuildOptions
global Projects
global Env
global WD_Root
Env = env
WD_Root = root_directory
# add program path
env.PrependENVPath('PATH', wdconfig.EXEC_PATH)
def GetConfigValue(name):
assert type(name) == str, 'GetConfigValue: only string parameter is valid'
try:
return BuildOptions[name]
except:
return ''
def GetDepend(depend):
building = True
if type(depend) == type('str'):
if not BuildOptions.has_key(depend) or BuildOptions[depend] == 0:
building = False
elif BuildOptions[depend] != '':
return BuildOptions[depend]
return building
# for list type depend
for item in depend:
if item != '':
if not BuildOptions.has_key(item) or BuildOptions[item] == 0:
building = False
return building
def AddDepend(option):
BuildOptions[option] = 1
def MergeGroup(src_group, group):
src_group['src'] = src_group['src'] + group['src']
if group.has_key('CCFLAGS'):
if src_group.has_key('CCFLAGS'):
src_group['CCFLAGS'] = src_group['CCFLAGS'] + group['CCFLAGS']
else:
src_group['CCFLAGS'] = group['CCFLAGS']
if group.has_key('CPPPATH'):
if src_group.has_key('CPPPATH'):
src_group['CPPPATH'] = src_group['CPPPATH'] + group['CPPPATH']
else:
src_group['CPPPATH'] = group['CPPPATH']
if group.has_key('CPPDEFINES'):
if src_group.has_key('CPPDEFINES'):
src_group['CPPDEFINES'] = src_group['CPPDEFINES'] + group['CPPDEFINES']
else:
src_group['CPPDEFINES'] = group['CPPDEFINES']
if group.has_key('LINKFLAGS'):
if src_group.has_key('LINKFLAGS'):
src_group['LINKFLAGS'] = src_group['LINKFLAGS'] + group['LINKFLAGS']
else:
src_group['LINKFLAGS'] = group['LINKFLAGS']
if group.has_key('LIBS'):
if src_group.has_key('LIBS'):
src_group['LIBS'] = src_group['LIBS'] + group['LIBS']
else:
src_group['LIBS'] = group['LIBS']
if group.has_key('LIBPATH'):
if src_group.has_key('LIBPATH'):
src_group['LIBPATH'] = src_group['LIBPATH'] + group['LIBPATH']
else:
src_group['LIBPATH'] = group['LIBPATH']
def DefineGroup(name, src, depend, **parameters):
global Env
if not GetDepend(depend):
return []
group = parameters
group['name'] = name
if type(src) == type(['src1', 'str2']):
group['src'] = File(src)
else:
group['src'] = src
if group.has_key('CCFLAGS'):
Env.Append(CCFLAGS = group['CCFLAGS'])
if group.has_key('CPPPATH'):
Env.Append(CPPPATH = group['CPPPATH'])
if group.has_key('CPPDEFINES'):
Env.Append(CPPDEFINES = group['CPPDEFINES'])
if group.has_key('LINKFLAGS'):
Env.Append(LINKFLAGS = group['LINKFLAGS'])
if group.has_key('LIBS'):
Env.Append(LIBS = group['LIBS'])
if group.has_key('LIBPATH'):
Env.Append(LIBPATH = group['LIBPATH'])
objs = Env.Object(group['src'])
if group.has_key('LIBRARY'):
objs = Env.Library(name, objs)
# merge group
for g in Projects:
if g['name'] == name:
# merge to this group
MergeGroup(g, group)
return objs
# add a new group
Projects.append(group)
return objs
def GetCurrentDir():
conscript = File('SConscript')
fn = conscript.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
return path
PREBUILDING = []
def RegisterPreBuildingAction(act):
global PREBUILDING
assert callable(act), 'Could only register callable objects. %s received' % repr(act)
PREBUILDING.append(act)
def PreBuilding():
global PREBUILDING
for a in PREBUILDING:
a()
def DoBuilding(target, objects):
program = None
# check whether special buildlib option
lib_name = GetOption('buildlib')
if lib_name:
print lib_name
# build library with special component
for Group in Projects:
if Group['name'] == lib_name:
objects = Env.Object(Group['src'])
program = Env.Library(lib_name, objects)
break
else:
program = Env.Program(target, objects)
EndBuilding(target, program)
def EndBuilding(target, program = None):
import wdconfig
from keil import MDKProject
from keil import MDK4Project
from iar import IARProject
from vs import VSProject
Env.AddPostAction(target, wdconfig.POST_ACTION)
if GetOption('target') == 'mdk':
template = os.path.isfile('template.Uv2')
if template:
MDKProject('project.Uv2', Projects)
else:
template = os.path.isfile('template.uvproj')
if template:
MDK4Project('project.uvproj', Projects)
else:
print 'No template project file found.'
if GetOption('target') == 'mdk4':
MDK4Project('project.uvproj', Projects)
if GetOption('target') == 'iar':
IARProject('project.ewp', Projects)
if GetOption('target') == 'vs':
VSProject('project.vcproj', Projects, program)
if GetOption('copy') and program != None:
MakeCopy(program)
if GetOption('copy-header') and program != None:
MakeCopyHeader(program)
def SrcRemove(src, remove):
if type(src[0]) == type('str'):
for item in src:
if os.path.basename(item) in remove:
src.remove(item)
return
for item in src:
if os.path.basename(item.rstr()) in remove:
src.remove(item)
def GetVersion():
import SCons.cpp
import string
rtdef = os.path.join(WD_Root, 'include', 'rtdef.h')
# parse rtdef.h to get RT-Thread version
prepcessor = SCons.cpp.PreProcessor()
f = file(rtdef, 'r')
contents = f.read()
f.close()
prepcessor.process_contents(contents)
def_ns = prepcessor.cpp_namespace
version = int(filter(lambda ch: ch in '0123456789.', def_ns['RT_VERSION']))
subversion = int(filter(lambda ch: ch in '0123456789.', def_ns['RT_SUBVERSION']))
if def_ns.has_key('RT_REVISION'):
revision = int(filter(lambda ch: ch in '0123456789.', def_ns['RT_REVISION']))
return '%d.%d.%d' % (version, subversion, revision)
return '0.%d.%d' % (version, subversion)
def GlobSubDir(sub_dir, ext_name):
import os
import glob
def glob_source(sub_dir, ext_name):
list = os.listdir(sub_dir)
src = glob.glob(os.path.join(sub_dir, ext_name))
for item in list:
full_subdir = os.path.join(sub_dir, item)
if os.path.isdir(full_subdir):
src += glob_source(full_subdir, ext_name)
return src
dst = []
src = glob_source(sub_dir, ext_name)
for item in src:
dst.append(os.path.relpath(item, sub_dir))
return dst
def do_copy_file(src, dst):
import shutil
# check source file
if not os.path.exists(src):
return
path = os.path.dirname(dst)
# mkdir if path not exist
if not os.path.exists(path):
os.makedirs(path)
shutil.copy2(src, dst)
def do_copy_folder(src_dir, dst_dir):
import shutil
# check source directory
if not os.path.exists(src_dir):
return
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
shutil.copytree(src_dir, dst_dir)
source_ext = ["c", "h", "s", "S", "cpp", "xpm"]
source_list = []
def walk_children(child):
global source_list
global source_ext
# print child
full_path = child.rfile().abspath
file_type = full_path.rsplit('.',1)[1]
#print file_type
if file_type in source_ext:
if full_path not in source_list:
source_list.append(full_path)
children = child.all_children()
if children != []:
for item in children:
walk_children(item)
def MakeCopy(program):
global source_list
global WD_Root
global Env
target_path = os.path.join(Dir('#').abspath, 'rt-thread')
if Env['PLATFORM'] == 'win32':
WD_Root = WD_Root.lower()
else:
WD_Root = WD_Root
if target_path.startswith(WD_Root):
return
for item in program:
walk_children(item)
source_list.sort()
# filte source file in RT-Thread
target_list = []
for src in source_list:
if Env['PLATFORM'] == 'win32':
src = src.lower()
if src.startswith(WD_Root):
target_list.append(src)
source_list = target_list
# get source path
src_dir = []
for src in source_list:
src = src.replace(WD_Root, '')
if src[0] == os.sep or src[0] == '/':
src = src[1:]
path = os.path.dirname(src)
sub_path = path.split(os.sep)
full_path = WD_Root
for item in sub_path:
full_path = os.path.join(full_path, item)
if full_path not in src_dir:
src_dir.append(full_path)
for item in src_dir:
source_list.append(os.path.join(item, 'SConscript'))
for src in source_list:
dst = src.replace(WD_Root, '')
if dst[0] == os.sep or dst[0] == '/':
dst = dst[1:]
print '=> ', dst
dst = os.path.join(target_path, dst)
do_copy_file(src, dst)
# copy tools directory
print "=> tools"
do_copy_folder(os.path.join(WD_Root, "tools"), os.path.join(target_path, "tools"))
do_copy_file(os.path.join(WD_Root, 'AUTHORS'), os.path.join(target_path, 'AUTHORS'))
do_copy_file(os.path.join(WD_Root, 'COPYING'), os.path.join(target_path, 'COPYING'))
def MakeCopyHeader(program):
global source_ext
source_ext = []
source_ext = ["h", "xpm"]
global source_list
global WD_Root
global Env
target_path = os.path.join(Dir('#').abspath, 'rt-thread')
if Env['PLATFORM'] == 'win32':
WD_Root = WD_Root.lower()
else:
WD_Root = WD_Root
if target_path.startswith(WD_Root):
return
for item in program:
walk_children(item)
source_list.sort()
# filte source file in RT-Thread
target_list = []
for src in source_list:
if Env['PLATFORM'] == 'win32':
src = src.lower()
if src.startswith(WD_Root):
target_list.append(src)
source_list = target_list
for src in source_list:
dst = src.replace(WD_Root, '')
if dst[0] == os.sep or dst[0] == '/':
dst = dst[1:]
print '=> ', dst
dst = os.path.join(target_path, dst)
do_copy_file(src, dst)
# copy tools directory
print "=> tools"
do_copy_folder(os.path.join(WD_Root, "tools"), os.path.join(target_path, "tools"))
do_copy_file(os.path.join(WD_Root, 'AUTHORS'), os.path.join(target_path, 'AUTHORS'))
do_copy_file(os.path.join(WD_Root, 'COPYING'), os.path.join(target_path, 'COPYING'))
|
fwindpeak/lava-emu-c
|
tools/building.py
|
Python
|
mit
| 15,516
|
#!/usr/bin/python
# coding: utf-8
class Solution(object):
def findShortestSubArray(self, nums):
left, right, count = {}, {}, {}
for i, x in enumerate(nums):
if x not in left: left[x] = i
right[x] = i
count[x] = count.get(x, 0) + 1
ans = len(nums)
degree = max(count.values())
for x in count:
if count[x] == degree:
ans = min(ans, right[x] - left[x] + 1)
return ans
|
Lanceolata/code-problems
|
python/leetcode_easy/Question_196_Degree_of_an_Array.py
|
Python
|
mit
| 483
|
from uuid import uuid4
from apollo.choices import STATION_TYPE_CHOICES, STATION_RIG, RENTAL_STATUS_TYPES, RENTAL_DELIVERY_REQUESTED
from django.core.validators import RegexValidator
from django.db import models
class Station(models.Model):
"""
Object model for determining where physical equipment would be delivered to and which business manages the station.
Also known as a rig in most cases, however the terminology is now more general (in the future, in case we start
renting out equipment to other places, like a drilling pit)
"""
type = models.CharField(
max_length=2, help_text="What type of station is this?", choices=STATION_TYPE_CHOICES, default=STATION_RIG
)
name = models.CharField(
max_length=255, help_text="What is the name of this station?"
)
description = models.TextField(
blank=True, help_text="What is the description for this station? How does a user get to this station?"
)
uuid = models.CharField(
max_length=36, default=uuid4, unique=True,
help_text="What is the universally unique identifier for this station?",
validators=[RegexValidator(regex="^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$")]
)
def __str__(self):
return "{type}: {name}".format(type=self.get_type_display(), name=self.name)
def __unicode__(self):
return u"{type}: {name}".format(type=self.get_type_display(), name=self.name)
class StationBusiness(models.Model):
"""
Object model for linking Stations and Businesses. Determines permissions and purchases for stations.
Station administrators can invite other businesses into the station.
"""
business = models.ForeignKey(
'business.Business', help_text="Which business comprises this station to business membership?"
)
station = models.ForeignKey(
'Station', help_text="Which station comprises this station to business membership?"
)
class Meta:
unique_together = ('station', 'business')
index_together = ('station', 'business')
def __str__(self):
return "{uname}: {business}".format(uname=self.station, business=self.business.name)
def __unicode__(self):
return u"{uname}: {business}".format(uname=self.station, business=self.business.name)
class StationRental(models.Model):
"""
Object model for linking equipment and stations. Will be created when a user purchases a charge type, will pull the
various price list items into the station and associate state.
"""
station = models.ForeignKey(
'Station', help_text="Which station is this rental located at?"
)
equipment = models.ForeignKey(
'assets.Equipment', help_text="Which equipment is this rental representing?"
)
status = models.CharField(
max_length=2, help_text="What is the status of this rental?", choices=RENTAL_STATUS_TYPES,
default=RENTAL_DELIVERY_REQUESTED
)
last_modified = models.DateTimeField(auto_now=True, help_text="When was this rental last modified?")
def __str__(self):
return "{status}: {equipment}".format(status=self.get_status_display(), equipment=self.equipment)
def __unicode__(self):
return u"{status}: {equipment}".format(status=self.get_status_display(), equipment=self.equipment)
|
awwong1/apollo
|
applications/station/models.py
|
Python
|
mit
| 3,350
|
import os
usage = "usage : program LearningMethod Feature PositivePath NegativPath SavePath"
base_learner_path = "../build-Learner-Desktop_Qt_5_5_0_MSVC2013_64bit-Release/release/Learner.exe"
#base_learner_path = "\""+base_learner_path+"\""
learning_method = str(1)
learning_feature = str(0)
positive_data_path = "../../BinPicking Images/positive/set3"
positive_data_path = "\""+positive_data_path+"\""
negative_data_path = "../../BinPicking Images/negative/all"
negative_data_path = "\""+negative_data_path+"\""
save_path = "./MLP.txt"
save_path = "\""+save_path+"\""
learn_cmd = base_learner_path + " " \
+ str(learning_method) + " " \
+ str(learning_feature) + " " \
+ positive_data_path + " " \
+ negative_data_path + " " \
+ save_path
print(learn_cmd)
print(os.execl(base_learner_path, learning_method, learning_feature, positive_data_path, negative_data_path, save_path))
|
goddoe/Learner
|
Learner/LearningTool/Learning.py
|
Python
|
mit
| 954
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-04-18 20:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lotes', '0015_lote_local'),
]
operations = [
migrations.AlterField(
model_name='lote',
name='local',
field=models.CharField(default=None, max_length=3),
),
]
|
anselmobd/fo2
|
src/lotes/migrations/0016_auto_20180418_1740.py
|
Python
|
mit
| 451
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# See: http://effbot.org/tkinterbook/panedwindow.htm
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/panedwindow.html
import tkinter as tk
root = tk.Tk()
paned_window = tk.PanedWindow(root, orient=tk.HORIZONTAL, sashwidth=2, sashrelief=tk.RIDGE)
paned_window.pack(fill=tk.BOTH, expand=1)
top = tk.Label(paned_window, text="left pane")
paned_window.add(top, minsize=100, height=200, width=200)
bottom = tk.Label(paned_window, text="right pane")
paned_window.add(bottom, minsize=100, height=200, width=200)
root.mainloop()
|
jeremiedecock/snippets
|
python/tkinter/python3/paned_window_horizontal.py
|
Python
|
mit
| 1,700
|
# Copyright (c) Andrew Helge Cox 2016-2019.
# All rights reserved worldwide.
#
# Parse the vulkan XML specifiction using Python XML APIs to generate
# a file of C++ functions to initialize standard Vulkan API structs.
#
# Usage, from project root:
#
# wget https://raw.githubusercontent.com/KhronosGroup/Vulkan-Docs/main/xml/vk.xml
# python3 tools/scripts/gen_info_struct_wrappers.py > krust/public-api/vulkan_struct_init.h
#
# If the library fails to compile due to unknown vulkan symbols, try downloading
# the tagged xml file that matches the vulkan headers installed on your system
# that your are building against. E.g. on ubuntu, check the package version of
# vulkan-headers and use that for the tag. Tagged versions live on paths like:
# https://github.com/KhronosGroup/Vulkan-Docs/releases/tag/v1.2.176
# from where you can navigate to find the raw file blob.
#
# wget https://raw.githubusercontent.com/KhronosGroup/Vulkan-Docs/67f599afee77b0e598e7a325f13b9878edcacdfd/xml/vk.xml
#
# The API used:
# https://docs.python.org/3/library/xml.etree.elementtree.html
#
import xml.etree.ElementTree
import re
path_to_spec='./vk.xml'
# Structures to not generate for:
IGNORED_STRUCTS = {
'VkBaseInStructure': True, # < Because we never make one: it is an "abstract base class".
'VkBaseOutStructure': True, # < Because we never make one: it is an "abstract base class".
'VkPhysicalDeviceFeatures': True, # < Because it is big and we will typically query the implementation, change a few fields, and send back the diff.
'VkTransformMatrixKHR' : True # < Because it holds a 2D matrix which our generated code mishandles [todo: fix this]
}
# Structures to not generate the longer function with parameters for:
IGNORED_STRUCTS_ALL_PARAMS = {
'VkPhysicalDeviceProperties2': True, # < Because it is big and we will typically query the implementation, change a few fields, and send back the diff
'VkPhysicalDeviceDescriptorIndexingFeaturesEXT': True,
}
ARRAY_LEN = "array_len"
root = xml.etree.ElementTree.parse(path_to_spec).getroot()
# Print to standard error for printf debugging.
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# Convert a structure name into the ALL_UPPER code for it:
# The RE is fragile and may require updating with new spec versions.
structNameSplitter = re.compile('(ID|8Bit|16Bit|Float16|Int8|Int64|Uint8|[1-9][1-9]*|[A-Z][a-z]+|[A-Z][^A-Z\d]+|[A-Z][A-Z]+)')
def StructToCode(name):
name = name[2:len(name)] # Nibble off Vk prefix.
pieces = structNameSplitter.findall(name)
code = "VK_STRUCTURE_TYPE_"
for piece in pieces:
upper = piece.upper()
code = code + upper + "_"
code = code[0:-1]
# Fixup any special cases that violate the general convention:
if code == 'VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT':
code = 'VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT'
elif code == 'VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_WS_STATE_CREATE_INFO_NV':
code = 'VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV'
elif code == 'VK_STRUCTURE_TYPE_TEXTURE_LODG_FORMAT_PROPERTIES_AMD':
code = 'VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD'
elif code == 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCIB_INFO_PROPERTIES_EXT':
code = 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT'
elif code == 'VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTCD_MODE_EXT':
code = 'VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT'
elif code == 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTCD_FEATURES_EXT':
code = 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT'
elif code == 'VK_STRUCTURE_TYPE_GEOMETRY_AABBNV':
code = 'VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV'
elif code == 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTCHDRF_EXT':
code = 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT'
elif code == 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SMB_PROPERTIES_NV':
code = 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV'
elif code == 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SMB_FEATURES_NV':
code = 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV'
elif code == 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_11_FEATURES':
code = 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES'
elif code == 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_12_FEATURES':
code = 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES'
elif code == 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_11_PROPERTIES':
code = 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES'
elif code == 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_12_PROPERTIES':
code = 'VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES'
return code
# Capture the info we need out of the DOM:
arrayCapture = re.compile(r"\s*\[(\d+)\]\s*");
nonWhitespace = re.compile(r"[^\s]")
constKeyword = re.compile(r"\bconst\b")
structs = [] # < For reach struct we generate wrappers for, the data captured from the xml tree to let us generate the wrapper for it.
platform_to_macro = {} # < For each plaform, the associated macro for ifdefs
type_macros = {} # < For each type the macro which ifdefs its use, else None.
# Find all the platforms so we can use the macros defined in here for ifdefing
# rather than hard-coding them:
platforms = root.find('platforms')
for platform in platforms.findall('platform'):
name = platform.get('name')
protect = platform.get('protect')
platform_to_macro[name] = protect
# Find all the extensions which are platform-specific so we can wrap their
# struct creation functions in platform macro ifdefs:
extensions = root.find('extensions')
for extension in extensions.findall('extension'):
platform = extension.get('platform')
if(platform != None):
for atype in extension.iter('type'):
type_name = atype.get('name')
type_macros[type_name] = platform_to_macro[platform]
# Grab the structs we want to wrap:
for atype in root.iter('type'):
name = atype.get('name')
if (atype.get('category') == 'struct') & (name != None):
if (name not in IGNORED_STRUCTS) and (atype.get('alias') == None) and (atype.get('returnedonly') != 'true'):
members = []
member_names = {}
for amember in atype.findall('member'):
member = {}
for aelem in list(amember):
member[aelem.tag] = aelem.text # < this includes name and type nested elements
if(aelem.tag == 'type'):
# Add const to front of type if it is floating in member text:
if(amember.text != None):
if(constKeyword.search(amember.text) != None):
member[aelem.tag] = "const " + member[aelem.tag]
if (aelem.tail != None):
if (nonWhitespace.search(aelem.tail) != None):
type_suffix = aelem.tail.strip()
member[aelem.tag] += type_suffix
elif(aelem.tag == 'name'):
member_names[aelem.text] = True
# Look for and array size in the tail text of this name element inside the member element:
if(aelem.tail != None):
array_size_enum = amember.find('enum')
if(array_size_enum == None):
m = arrayCapture.search(aelem.tail);
if(m != None):
member[ARRAY_LEN] = int(m.group(1))
else:
member[ARRAY_LEN] = array_size_enum.text
#eprint(name, "member", aelem.text, "array size =", array_size_enum.text)
members.append(member)
#print(members)
struct = {}
STRUCT_TYPE = StructToCode(name)
struct['name'] = name
struct['STRUCT_TYPE'] = STRUCT_TYPE
struct['members'] = members
if ('sType' in member_names) & ('pNext' in member_names):
struct['tagged'] = True
else:
struct['tagged'] = False
structs.append(struct)
#else:
#eprint("Ignoring", name, ", alias", atype.get('alias'))
# Free memory of the DOM ASAP:
root = None
# Big strings to include in generated code:
COPYRIGHT = '''// Copyright (c) 2016-2020 Andrew Helge Cox
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
'''
GENERATED_WARNING='''/// @note Do not edit this file. It is generated from the Vulkan XML
/// specification by the script at `tools/scripts/gen_info_struct_wrappers.py`.
'''
FILE_COMMENT='''/**
* @file Three sets of functions used to initialize Vulkan API structs.
* 1. Functions to initialize the type ID enum and pNext extension pointer
* of Vulkan API structures which require those as their first two fields.
* 2. Functions to initialize all members of Vulkan API structures, automatically
* supplying the type ID and pNext extension pointer while requiring all other
* fields to be supplied by the user.
* 3. Functions initialize all members of small Vulkan structures from parameters
* supplied by the user.
*
* @see VulkanTaggedStructSimpleInit, VulkanTaggedStructParamsInit,
* VulkanUntaggedStructParamsInit
*/
'''
INCLUDES='''// External includes:
#include <vulkan/vulkan.h>
'''
FILE_TOP = '''#ifndef KRUST_STRUCT_INIT_H_INCLUDED_E26EF
#define KRUST_STRUCT_INIT_H_INCLUDED_E26EF
''' + COPYRIGHT + GENERATED_WARNING + FILE_COMMENT + INCLUDES + '''namespace Krust
{
'''
SIMPLE_TOP='''/**
* @name VulkanTaggedStructSimpleInit For each Vulkan API struct tagged with a
* type enum and possessing an extension pointer, a function to initialize the
* first two fields of that struct.
*
* The use of these functions saves some code and makes sure the type
* and the extension field of each struct are set correctly and reliably.
*
* Usage without these helpers:
*
* VkImageCreateInfo info;
* info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
* info.pNext = nullptr;
* info.flags = 0;
* info.imageType = VK_IMAGE_TYPE_2D;
* // ...
*
* Usage with these helpers:
*
* auto info = kr::ImageCreateInfo();
* info.flags = 0;
* info.imageType = VK_IMAGE_TYPE_2D;
* // ...
*
* In the second example those first two lines of member initialization are saved.
*
* See `krust-examples/clear/clear.cpp` for more usage examples.
*/
///@{
'''
PARAMS_TOP='''
/**
* @name VulkanTaggedStructParamsInit For each Vulkan API struct tagged with a
* type enum and possessing an extension pointer, a function to initialize the
* members of the struct without having to set the first two fields.
*
* The use of these functions saves some code and makes sure the type
* and the extension field of each struct are set correctly and reliably.
* It also ensures no member is forgotten by the user.
*
* Usage without these helpers:
*
* VkImageCreateInfo info;
* info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
* info.pNext = nullptr;
* info.flags = 0;
* info.imageType = VK_IMAGE_TYPE_2D;
* // ...
*
* Usage with these helpers:
*
* auto info = kr::ImageCreateInfo(
* 0,
* VK_IMAGE_TYPE_2D;
* // ...
* );
*
* See `krust-examples/clear2/clear2.cpp` for more usage examples.
*/
///@{
'''
UNTAGGED_TOP='''
/**
* @name VulkanUntaggedStructParamsInit For each small Vulkan API struct,
* a function to initialize the members of the struct.
*
* The use of these functions ensures no member is forgotten by the user.
*
* Usage without these helpers:
*
* VkOffset2D offset;
* offset.x = 64;
* offset.y = 128;
*
* Usage with these helpers:
*
* auto offset = kr::Offset2D(64, 128);
*
*/
///@{
'''
SIMPLE_BOTTOM = '''///@}'''
PARAMS_BOTTOM = '''///@}'''
FILE_BOTTOM = '''///@}
} // namespace Krust
#endif // #ifndef KRUST_STRUCT_INIT_H_INCLUDED_E26EF'''
# Print wrapper #ifdef for platform-specific functions:
# Don't forget to print the #endif after the function too.
def PrintPlatformIfdef(name):
ifdefed = False
platform_macro = type_macros.get(name)
if platform_macro != None:
ifdefed = True
print("#ifdef ", platform_macro)
return ifdefed
def PrintParameterList(members):
parameter_list = ""
for member in members:
member_name = member['name']
member_type = member['type']
if (member_name != "sType") & (member_name != "pNext"):
member_array_len = None
if ARRAY_LEN in member:
member_array_len = member[ARRAY_LEN]
parameter_list += " " + member_type + " " + member_name
if member_array_len != None:
parameter_list += "[" + str(member_array_len) + "]"
parameter_list += ",\n"
# Trim training "," and newline:
if len(parameter_list) > 0:
parameter_list = parameter_list[0:len(parameter_list) - 2]
print(parameter_list)
def PrintMemberAssignments(local, members):
assignments = ""
for member in members:
member_name = member['name']
member_type = member['type'];
if (member_name != "sType") & (member_name != "pNext"):
member_array_len = None
if ARRAY_LEN in member:
member_array_len = member[ARRAY_LEN]
if member_array_len != None:
assignments += " for(size_t i = 0; i < " + str(member_array_len) + "; ++i){\n"
assignments += " " + local + "." + member_name + "[i] = " + member_name + "[i];\n }\n"
else:
assignments += " " + local + "." + member_name + " = " + member_name + ";\n"
print(assignments)
# Print file header:
print(FILE_TOP)
# ------------------------------------------------------------------------------
# Generate the simple wrappers for tagged sructs that save the user from sType
# and pNext init:
print(SIMPLE_TOP)
if 1 == 1:
for struct in structs:
#print(struct)
if struct['tagged'] != True:
continue;
name = struct['name']
funcName = name[2:len(name)]
# generate platform-specific ifdefs if required:
ifdefed = PrintPlatformIfdef(name)
print("inline " + name, funcName + "()")
print("{")
print(" " + name, "info;")
print(" info.sType =", struct['STRUCT_TYPE'] + ";")
print(" info.pNext = nullptr;")
print(" return info;")
print("}")
if ifdefed:
print('#endif')
print("")
print(SIMPLE_BOTTOM)
# ------------------------------------------------------------------------------
# Generate the fuller-featured init functions which set all members:
print(PARAMS_TOP)
for struct in structs:
#print(struct)
if struct['tagged'] != True:
continue;
name = struct['name']
if name in IGNORED_STRUCTS_ALL_PARAMS:
continue
# Skip if there are no parameters to initialise:
members = struct['members']
if len(members) < 3:
continue
funcName = name[2:len(name)]
# generate platform-specific ifdefs if required:
ifdefed = PrintPlatformIfdef(name)
print("inline " + name, funcName + "(")
# Spew out the members as a parameter list:
PrintParameterList(members)
print(")")
print("{")
print(" " + name, "temp;")
print(" temp.sType =", struct['STRUCT_TYPE'] + ";")
print(" temp.pNext = nullptr;")
# Generate member initialisations:
PrintMemberAssignments("temp", members)
print(" return temp;")
print("}")
if ifdefed:
print('#endif')
print("")
print(PARAMS_BOTTOM)
# ------------------------------------------------------------------------------
# Generate the creation wrappers for structs that are not tagged:
print(UNTAGGED_TOP)
if 1 == 1:
for struct in structs:
if struct['tagged'] == True:
continue
name = struct['name']
if name == "VkRect3D":
continue
funcName = name[2:len(name)]
# generate platform-specific ifdefs if required:
ifdefed = PrintPlatformIfdef(name)
print("inline " + name, funcName + "(")
members = struct['members']
PrintParameterList(members)
print(")")
print("{")
print(" " + name, "temp;")
PrintMemberAssignments("temp", members)
print(" return temp;")
print("}")
if ifdefed:
print('#endif')
print("")
print(FILE_BOTTOM)
|
ahcox/krust
|
tools/scripts/gen_info_struct_wrappers.py
|
Python
|
mit
| 17,637
|
from elasticsearch import Elasticsearch
from benchmarks.elasticsearch import settings
def clean():
es = Elasticsearch(hosts=settings.storage['elasticsearch']['hosts'])
es.indices.delete([
index
for index in es.indices.status(index='_all')['indices']
if index.startswith(settings.storage['elasticsearch']['index_prefix'])])
|
Locu/chronology
|
kronos/benchmarks/elasticsearch/__init__.py
|
Python
|
mit
| 342
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.opt import Opt
from parlai.utils.misc import Timer, round_sigfigs, set_namedtuple_defaults, nice_report
import parlai.utils.strings as string_utils
from copy import deepcopy
import random
import time
import unittest
from parlai.utils.data import DatatypeHelper
from parlai.utils.curated_response import generate_init_prompot, generate_safe_response
class TestUtils(unittest.TestCase):
def test_report_render(self):
"""
Test rendering of nice reports.
"""
report_s = nice_report({'foo': 3})
assert "foo" in report_s
assert "3" in report_s
assert nice_report({}) == ""
def test_round_sigfigs(self):
x = 0
y = 0
assert round_sigfigs(x, 2) == y
x = 100
y = 100
assert round_sigfigs(x, 2) == y
x = 0.01
y = 0.01
assert round_sigfigs(x, 2) == y
x = 0.00123
y = 0.001
assert round_sigfigs(x, 1) == y
x = 0.37
y = 0.4
assert round_sigfigs(x, 1) == y
x = 2353
y = 2350
assert round_sigfigs(x, 3) == y
x = 3547345734
y = 3547350000
assert round_sigfigs(x, 6) == y
x = 0.0000046246
y = 0.00000462
assert round_sigfigs(x, 3) == y
def test_timer(self):
t = Timer()
time.sleep(1e-6)
elapsed = t.stop().time()
assert elapsed > 0
same = t.time()
assert elapsed == same
t.resume()
time.sleep(1e-6)
more = t.time()
assert more > elapsed
rabbit = Timer()
time.sleep(1e-6)
turtle = Timer()
time.sleep(1e-6)
assert turtle.time() > 0
assert turtle.time() < rabbit.time()
def test_setnamedtupledefaults(self):
from collections import namedtuple
NT = namedtuple("NT", ("a", "b", "c"))
# Shouldn't be able to construct a namedtuple without providing info
try:
NT()
self.fail("Shouldn't be able to construct namedtuple")
except TypeError:
pass
# Test setting default value
set_namedtuple_defaults(NT)
nt = NT()
assert nt.a is None
assert nt.b is None
assert nt.c is None
# Test setting it with something else
set_namedtuple_defaults(NT, default=1)
nt = NT()
assert nt.a == 1
assert nt.b == 1
assert nt.c == 1
def test_opt(self):
opt = {'x': 0}
opt = Opt(opt)
opt['x'] += 1
opt['x'] = 10
self.assertEqual(opt.history[0][0], 'x', 'History not set properly')
self.assertEqual(opt.history[0][1], 1, 'History not set properly')
self.assertEqual(opt.history[1][0], 'x', 'History not set properly')
self.assertEqual(opt.history[1][1], 10, 'History not set properly')
opt_copy = deepcopy(opt)
self.assertEqual(opt_copy.history[0][1], 1, 'Deepcopy history not set properly')
self.assertEqual(
opt_copy.history[1][1], 10, 'Deepcopy history not set properly'
)
class TestStrings(unittest.TestCase):
def test_normalize_reply_version1(self):
assert string_utils.normalize_reply("I ' ve a cat .") == "I've a cat."
assert (
string_utils.normalize_reply("do you think i can dance?")
== "Do you think I can dance?"
)
assert string_utils.normalize_reply("I ' m silly '") == "I'm silly'"
def test_normalize_reply_version2(self):
assert string_utils.normalize_reply("Add a period", 2) == "Add a period."
assert string_utils.normalize_reply("Add a period?", 2) == "Add a period?"
assert string_utils.normalize_reply("Add a period!", 2) == "Add a period!"
assert string_utils.normalize_reply('"Add a period"', 2) == '"add a period"'
def test_uppercase(self):
assert string_utils.uppercase("this is a test") == "This is a test"
assert string_utils.uppercase("tEst") == "TEst"
class TestDatatypeHelper(unittest.TestCase):
def test_fold(self):
assert DatatypeHelper.fold("train") == "train"
assert DatatypeHelper.fold("train:ordered") == "train"
assert DatatypeHelper.fold("train:stream") == "train"
assert DatatypeHelper.fold("train:stream:ordered") == "train"
assert DatatypeHelper.fold("train:evalmode") == "train"
assert DatatypeHelper.fold("train:stream:evalmode") == "train"
assert DatatypeHelper.fold("valid") == "valid"
assert DatatypeHelper.fold("valid:stream") == "valid"
assert DatatypeHelper.fold("test") == "test"
assert DatatypeHelper.fold("test:stream") == "test"
def test_should_cycle(self):
assert DatatypeHelper.should_cycle("train") is True
assert DatatypeHelper.should_cycle("train:evalmode") is False
assert DatatypeHelper.should_cycle("train:ordered") is False
assert DatatypeHelper.should_cycle("train:stream") is True
assert DatatypeHelper.should_cycle("valid") is False
assert DatatypeHelper.should_cycle("valid:stream") is False
assert DatatypeHelper.should_cycle("test") is False
assert DatatypeHelper.should_cycle("test:stream") is False
def test_should_shuffle(self):
assert DatatypeHelper.should_shuffle("train") is True
assert DatatypeHelper.should_shuffle("train:evalmode") is False
assert DatatypeHelper.should_shuffle("train:ordered") is False
assert DatatypeHelper.should_shuffle("train:stream") is False
assert DatatypeHelper.should_shuffle("valid") is False
assert DatatypeHelper.should_shuffle("valid:stream") is False
assert DatatypeHelper.should_shuffle("test") is False
assert DatatypeHelper.should_shuffle("test:stream") is False
def test_is_training(self):
assert DatatypeHelper.is_training("train") is True
assert DatatypeHelper.is_training("train:evalmode") is False
assert DatatypeHelper.is_training("train:ordered") is True
assert DatatypeHelper.is_training("train:stream") is True
assert DatatypeHelper.is_training("valid") is False
assert DatatypeHelper.is_training("valid:stream") is False
assert DatatypeHelper.is_training("test") is False
assert DatatypeHelper.is_training("test:stream") is False
def test_split_subset_data_by_fold(self):
TOTAL_LEN = random.randint(100, 200)
a_end = random.randrange(1, TOTAL_LEN)
b_end = random.randrange(a_end, TOTAL_LEN)
SUBSET_A = [i for i in range(0, a_end)]
SUBSET_B = [i for i in range(a_end, b_end)]
SUBSET_C = [i for i in range(b_end, TOTAL_LEN)]
SUBSETS_A = [deepcopy(SUBSET_A)]
SUBSETS_A_B = [deepcopy(SUBSET_A), deepcopy(SUBSET_B)]
SUBSETS_C_B_A = [deepcopy(SUBSET_C), deepcopy(SUBSET_B), deepcopy(SUBSET_A)]
train_frac = random.uniform(0, 1)
valid_frac = random.uniform(0, 1 - train_frac)
test_frac = 1 - train_frac - valid_frac
TRAIN_A = DatatypeHelper.split_subset_data_by_fold(
"train", SUBSETS_A, train_frac, valid_frac, test_frac
)
TRAIN_A_B = DatatypeHelper.split_subset_data_by_fold(
"train", SUBSETS_A_B, train_frac, valid_frac, test_frac
)
TRAIN_C_B_A = DatatypeHelper.split_subset_data_by_fold(
"train", deepcopy(SUBSETS_C_B_A), train_frac, valid_frac, test_frac
)
# Check to make sure selected values for a fold within a domain are consistent even if different domains are used, and presented in different orders
for val in SUBSET_A:
state = bool(val in TRAIN_A)
assert bool(val in TRAIN_A_B) == state
assert bool(val in TRAIN_C_B_A) == state
for val in SUBSET_B:
state = bool(val in TRAIN_A_B)
assert bool(val in TRAIN_C_B_A) == state
# Check that train + valid + test covers everything
VALID_C_B_A = DatatypeHelper.split_subset_data_by_fold(
"valid", deepcopy(SUBSETS_C_B_A), train_frac, valid_frac, test_frac
)
TEST_C_B_A = DatatypeHelper.split_subset_data_by_fold(
"test", deepcopy(SUBSETS_C_B_A), train_frac, valid_frac, test_frac
)
assert len(TRAIN_C_B_A) + len(VALID_C_B_A) + len(TEST_C_B_A) is TOTAL_LEN
assert len(set(TRAIN_C_B_A + VALID_C_B_A + TEST_C_B_A)) is TOTAL_LEN
class TestCuratedResponseGenerator(unittest.TestCase):
def test_init_dialogue(self):
init_prompt_txt = generate_init_prompot()
self.assertIsNotNone(init_prompt_txt)
self.assertIsInstance(init_prompt_txt, str)
self.assertGreater(len(init_prompt_txt), 5)
def test_safe_response(self):
safe_txt = generate_safe_response()
self.assertIsNotNone(safe_txt)
self.assertIsInstance(safe_txt, str)
self.assertGreater(len(safe_txt), 10)
self.assertGreater(len(safe_txt.split(' ')), 3)
if __name__ == '__main__':
unittest.main()
|
facebookresearch/ParlAI
|
tests/test_utils.py
|
Python
|
mit
| 9,336
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# utils.py file is part of spman
#
# spman - Slackware package manager
# Home page: https://github.com/MyRequiem/spman
#
# Copyright (c) 2018 Vladimir MyRequiem Astrakhan, Russia
# <mrvladislavovich@gmail.com>
# All rights reserved
# See LICENSE for details.
"""
Utils
"""
from .maindata import MainData
def get_line(char: str, length: int) -> str:
"""
return string
"""
return char * length
def get_indent(width1: int, width2: int) -> str:
"""
get space indent for format print
"""
return ' ' * (width2 - width1)
def pkg_not_found_mess(pkgname: str, reponame: str) -> None:
"""
print message if package not found in repository
"""
meta = MainData()
print(('{0}Package {1}{2} {0}not found in \'{3}\' '
'repository.{4}').format(meta.clrs['red'],
meta.clrs['lcyan'],
pkgname,
reponame,
meta.clrs['reset']))
def get_all_files(pathdir: str) -> list:
"""
return list of all files in directory and subdirectories
"""
from os import path, walk
'''
os.walk(root_path) - directory tree generator.
For each directory on root_path return a tuple:
(path_for_dir, list_dirs_on_the_dir, list_files_on_the_dir)
trash
├── dir1
│ ├── dir2
│ │ ├── dir3
│ │ └── file3
│ ├── file1
│ └── file2
└── dir4
├── dir5
│ ├── file5
│ └── file6
└── file4
>>> import os
>>> list(os.walk('/home/myrequiem/trash'))
[
('trash', ['dir1', 'dir4'], []),
('trash/dir1', ['dir2'], ['file2', 'file1']),
('trash/dir1/dir2', ['dir3'], ['file3']),
('trash/dir1/dir2/dir3', [], []),
('trash/dir4', ['dir5'], ['file4']),
('trash/dir4/dir5', [], ['file5', 'file6'])
]
'''
allfiles = []
try:
from tqdm import tqdm
except ImportError:
def tqdm(*args, **kwargs):
if args:
return args[0]
return kwargs.get('iterable', None)
for root, dirs, files in tqdm(walk(pathdir), leave=False,
ncols=80, unit=''):
del dirs
for fls in files:
allfiles.append(path.join(root, fls))
return allfiles
def get_packages_in_current_dir() -> list:
"""
return list of packages in the current directory
"""
from os import listdir
pkgs = []
ext = ('.tgz', '.txz')
for file_in_current_dir in sorted(listdir()):
if file_in_current_dir.endswith(ext):
pkgs.append(file_in_current_dir)
return pkgs
def update_pkg_db(db_path: str = '') -> None:
"""
update package database
"""
meta = MainData()
spman_conf = meta.get_spman_conf()
db_path_exists = db_path
if not db_path_exists:
db_path = '{0}{1}'.format(spman_conf['REPOS_PATH'], meta.pkg_db_name)
# create a backup of the database
if not db_path_exists:
from shutil import copy2
db_path_backup = '{0}~'.format(db_path)
copy2(db_path, db_path_backup)
print('A backup was created: {0}'.format(db_path_backup))
# write current time in db file
from datetime import datetime
date_now = datetime.utcnow().strftime("%d/%m/%Y %H:%M:%S")
pkgdb = open(db_path, 'w')
pkgdb.write('Last database update: {0} UTC\n'.format(date_now))
pkgdb.close()
from .pkgs import Pkgs
pkgdb = open(db_path, 'a')
for pkg in Pkgs().find_pkgs_on_system():
pkgdb.write('{0}\n'.format(pkg.strip()))
pkgdb.close()
def error_open_mess(url: str) -> None:
"""
Displaying the error message
"""
meta = MainData()
print(('{0}Can not open URL: {1} {2}{3}').format(meta.clrs['red'],
meta.clrs['lblue'],
url,
meta.clrs['reset']))
def url_is_alive(url: str) -> object:
"""
Checks that a given URL is reachable
"""
from ssl import _create_unverified_context
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
try:
return urlopen(url, context=_create_unverified_context())
except HTTPError:
return False
except URLError:
return False
def get_remote_file_size(url: str = '', httpresponse: object = False) -> int:
"""
Get the size of the remote file
"""
need_to_close = False
if not httpresponse:
httpresponse = url_is_alive(url)
if not httpresponse:
error_open_mess(url)
return 0
need_to_close = True
content_length = httpresponse.getheader('Content-Length')
if need_to_close:
httpresponse.close()
return int(content_length) if content_length else 0
def get_md5_hash(file_path: str) -> str:
"""
get md5sum of remote or local file
"""
from hashlib import md5
# local file
if file_path.startswith('/'):
return md5(open(file_path, 'rb').read()).hexdigest()
# remote file
httpresponse = url_is_alive(file_path)
if not httpresponse:
error_open_mess(file_path)
return ''
md5hash = md5()
max_file_size = 100 * 1024 * 1024
total_read = 0
while True:
data = httpresponse.read(4096)
total_read += 4096
if not data or total_read > max_file_size:
break
md5hash.update(data)
httpresponse.close()
return md5hash.hexdigest()
def check_md5sum(file1: str, file2: str) -> bool:
"""
check md5sum of two files
"""
return get_md5_hash(file1) == get_md5_hash(file2)
def check_internet_connection() -> bool:
"""
checking Internet connection
"""
meta = MainData()
spman_conf = meta.get_spman_conf()
host = spman_conf['TEST_CONNECTION_HOST']
port = spman_conf['TEST_CONNECTION_PORT']
try:
port = int(port)
except ValueError:
print(('{0}{4}{5}.conf{3}: {1}port is not valid{2}\n'
'TEST_CONNECTION_PORT={6}{3}').format(meta.clrs['cyan'],
meta.clrs['red'],
meta.clrs['grey'],
meta.clrs['reset'],
meta.configs_path,
meta.prog_name,
port))
return False
try:
import socket
sockt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockt.settimeout(7)
sockt.connect((host, port))
sockt.shutdown(1)
sockt.close()
return True
except socket.error:
print(('{0}No internet connection!{1}\nIP address and port '
'for verification: {3}{4}:{5}{1}\nCheck your internet '
'connection and see parameters\n{2}TEST_CONNECTION_HOST{1} '
'and {2}TEST_CONNECTION_PORT {1}in '
'{6}{7}.conf{3}').format(meta.clrs['red'],
meta.clrs['grey'],
meta.clrs['cyan'],
meta.clrs['reset'],
host,
port,
meta.configs_path,
meta.prog_name))
return False
|
MyRequiem/spman
|
src/utils.py
|
Python
|
mit
| 7,830
|
from seasons.models import Season
def seasons(request):
"""Add available seasons to context."""
available_seasons = Season.objects.all()
return {'available_seasons': available_seasons}
|
pawelad/nba-rank
|
src/seasons/context_processors.py
|
Python
|
mit
| 200
|
# -*- coding: utf-8 -*-
'''
Created on 9 déc. 2012
@author: Vincent Bruneau, Johann Verbroucht
'''
import unicodedata
from Student import Student
from Teacher import Teacher
class Code(object):
'''
Pour réaliser ces exercices il n'y a pas besoin de modifier les autres
classes, il suffit d'écrire les fonctions nécessaires.
'''
'''Exercice 1:
Développez la fonction permettant de retourner le nombre d'élément d'une
liste.
'''
def get_list_size(self, mylist):
return 0
'''
Exercice 2:
Développez la fonction permettant de retourner la factoriel d'un nombre.
Exemple: 6! = 6*5*4*3*2*1 = 720
'''
def factoriel(self, number):
return 0
'''
Exercice 3:
Développez la fonction permettant de retourner le plus grand nombre
d'une liste.
Si la liste est vide, la fonction renvoie 0.
'''
def get_max_in_list(self, mylist):
return 0
'''
Exercice 4:
Développez la fonction qui renvoie la liste triée par ordre croissant.
'''
def sort_list(self, mylist):
return 0
'''
Exercice 5:
Développez la fonction qui renvoie une liste sans nombres impairs.
'''
def delete_uneven(self, mylist):
return 0
'''
Exercice 6:
Développez la fonction permettant de retourner le nombre d'occurrences
d'une chaîne de caractères dans une autre.
Exemples:
get_occurrence('foo', 'foobar foo') retourne 2
get_occurrence('foo', 'foofoo foobar') retourne 3
'''
def get_occurrence(self, string1, string2):
return 0
'''
Exercice 7:
Développez la fonction permettant de créer un nouvel élève en remplissant
ses informations.
Il faut aussi créer un professeur et l'associer à un élève.
'''
def create_student(self, studentid, studentlastname, studentfirstname, teacherid, teacherlastname, teacherfirstname):
return 0
'''
Exercice 8:
Développez la fonction qui renvoie la moyenne de l'élève.
'''
def get_average(self, student):
return 0
'''
Exercice 9:
Développez la fonction qui renvoie la meilleure note de l'élève.
'''
def get_best_mark(self, student):
return 0
'''
Exercice 10:
Développez la fonction qui renvoie la liste des notes par ordre croissant.
'''
def sort_mark_list(self, student):
return student
'''
Exercice 11:
Un nombre de Kaprekar est un nombre qui lorsqu'il est élevé au carré,
peut être séparé en une partie gauche et une partie droite (non nulle)
telles que la somme donne le nombre initial.
Exemples:
703 est un nombre de Kaprekar car 703² = 494 209 et que 494 + 209 = 703.
4879 est un nombre de Kaprekar car 4879² = 23 804 641 et 238 + 04641 = 4879.
Développez la fonction qui permet de tester si un nombre est un nombre de
Kaprekar ou non.
Attention: 1 est considéré comme un nombre de Kaprekar,
2 et 3 ne le sont pas.
'''
def is_a_kaprekar_number(self, number):
return False
'''
Exercice 12:
Développez la fonction qui indique si un mot est un palindrome ou non. Un
palindrome est un mot ou une phrase dont l'ordre des lettres reste le
même qu'on le lise de gauche à droite ou de droite à gauche.
Attention: Ne pas tenir compte de la ponctuation, ni des accents.
Exemples:
Eh ! ça va la vache.
Kayak ...
'''
def is_a_palindrome(self, string):
return False
'''
Cette fonction permet de supprimer les caractères accentués, les espaces
et la ponctuation d'une chaîne de caractère. Exemple:
"Il a arrêté une voiture." ==> "Ilaarreteunevoiture"
'''
def normalize_string(self, string):
return ''.join(c for c in unicodedata.normalize('NFKD', unicode(string)) if c.isalnum()).lower()
|
johannv/pythonTestEasy
|
src/Code.py
|
Python
|
mit
| 3,960
|
#!/usr/bin/env python3
# https://www.hackerrank.com/challenges/connected-cell-in-a-grid
import collections
import os
import unittest
from typing import List
DELTAS = [
(-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (0, 1), (1, 1)
]
def check(matrix: List[List[int]], r: int, c: int) -> bool:
return 0 <= r < len(matrix) and 0 <= c < len(matrix[r])
def traverse(matrix: List[List[int]], r: int, c: int) -> int:
size = 0
queue = collections.deque()
queue.append((r, c))
while queue:
r, c = queue.pop()
if matrix[r][c] == 1:
matrix[r][c] = -1
size += 1
for rd, cd in DELTAS:
rn, cn = (r + rd, c + cd)
if check(matrix, rn, cn):
queue.append((rn, cn))
return size
def connected_cell(matrix: List[List[int]]) -> int:
result = 0
for r, _ in enumerate(matrix):
for c, _ in enumerate(matrix[r]):
result = max(traverse(matrix, r, c), result)
return result
class TestCode(unittest.TestCase):
def runner(self, name):
io_lines = [[[]]] * 2
for index, template in enumerate(['input%s.txt', 'output%s.txt']):
path = os.path.join(os.path.split(__file__)[0], template % name)
with open(path, 'r') as handle:
lines = handle.readlines()
io_lines[index] = [line.strip().split(' ') for line in lines]
matrix = io_lines[0][2:]
matrix = [[int(item) for item in row] for row in matrix]
result = connected_cell(matrix)
expected = int(io_lines[1][0][0])
self.assertEqual(expected, result)
def test_example(self):
self.runner('_example')
|
altermarkive/Coding-Interviews
|
algorithm-design/hackerrank/connected_cell_in_a_grid/connected_cell_in_a_grid.py
|
Python
|
mit
| 1,719
|
import sys, os
from PyQt5.QtCore import QObject, pyqtSlot, QTimer
from .modules import ModuleInfo
from .modules.api.view_components import ARow, AColumn, ACard
import alfred.modules.api.a_module_globals as amg
class WidgetManager(QObject):
def __init__(self, view_widget):
QObject.__init__(self)
self.view_widget = view_widget
self.widgets_container = None
self.started_widgets = []
self.loading_timer = QTimer()
self.loading_timer.setInterval(500)
self.loading_timer.setSingleShot(True)
self.loading_timer.timeout.connect(self.widgets_container_loaded)
def prepare_widgets(self):
self.widgets_container = ARow()
self.view_widget.set_widget_view([self.widgets_container])
self.loading_timer.start()
def widgets_container_loaded(self):
for m in ModuleInfo.all():
if not os.path.exists(os.path.join(m.root(), m.package_name(), "{}_widget.py".format(m.package_name()))):
continue
amg.module_db_path = os.path.join(m.root(), 'data', 'db.sqlite')
package_name = m.package_name()
if m.root() in sys.path:
sys.path.remove(m.root())
sys.path.append(m.root())
module = __import__('{}.{}_widget'.format(package_name, package_name),
fromlist=package_name)
widget_class = getattr(module, "{}Widget".format(m.class_name()), None)
if widget_class is not None:
widget = widget_class()
self.started_widgets.append(widget)
widget.finished.connect(self.widget_thread_finished)
widget.start()
@pyqtSlot()
def widget_thread_finished(self):
widget = self.sender()
if widget.title is not None and widget.content:
widget_view = AColumn(6, ACard(widget.title, *widget.content,
image_url=widget.image_url, color=widget.color,
title_on_image=widget.title_on_image))
self.view_widget.append_content(self.widgets_container.dom_id, str(widget_view))
|
Sefrwahed/Alfred
|
alfred/widget_manager.py
|
Python
|
mit
| 2,199
|
import random
class DataLoader:
def __init__(self, name):
self.name = name
self._data = []
def add_data(self, data):
self._data.append(data)
def _iter_data(self):
while True:
yield {d.name: next(d) for d in self._data}
def __iter__(self):
return self._iter_data()
|
WhatDo/FlowFairy
|
flowfairy/data/loader.py
|
Python
|
mit
| 339
|
# coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import typing # noqa: F401
from frozendict import frozendict # noqa: F401
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from openapi_client.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
InstantiationMetadata,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
NumberBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
class PipelineBranches(
ListSchema
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
@classmethod
@property
def _items(cls) -> typing.Type['PipelineBranchesitem']:
return PipelineBranchesitem
from openapi_client.model.pipeline_branchesitem import PipelineBranchesitem
|
cliffano/swaggy-jenkins
|
clients/python-experimental/generated/openapi_client/model/pipeline_branches.py
|
Python
|
mit
| 1,575
|
"""All blast commands used by aTRAM."""
import sys
import os
from os.path import basename, dirname, join
import re
import glob
import json
from shutil import which
from . import log
from . import util
def create_db(temp_dir, fasta_file, shard):
"""Create a blast database."""
cmd = 'makeblastdb -dbtype nucl -in {} -out {}'
cmd = cmd.format(fasta_file, shard)
log.subcommand(cmd, temp_dir)
def against_sra(args, state, hits_file, shard):
"""Blast the query sequences against an SRA blast database."""
cmd = []
if args['protein'] and state['iteration'] == 1:
cmd.append('tblastn')
cmd.append('-db_gencode {}'.format(args['db_gencode']))
else:
cmd.append('blastn')
cmd.append('-evalue {}'.format(args['evalue']))
cmd.append('-outfmt 15')
cmd.append('-max_target_seqs {}'.format(args['max_target_seqs']))
cmd.append('-out {}'.format(hits_file))
cmd.append('-db {}'.format(shard))
cmd.append('-query {}'.format(state['query_file']))
if args['word_size']:
cmd.append('-word_size {}'.format(args['word_size']))
command = ' '.join(cmd)
log.subcommand(command, args['temp_dir'], timeout=args['timeout'])
def against_contigs(blast_db, query_file, hits_file, **kwargs):
"""
Blast the query sequence against the contigs.
The blast output will have the scores for later processing.
"""
cmd = []
if kwargs['protein']:
cmd.append('tblastn')
cmd.append('-db_gencode {}'.format(kwargs['db_gencode']))
else:
cmd.append('blastn')
cmd.append('-db {}'.format(blast_db))
cmd.append('-query {}'.format(query_file))
cmd.append('-out {}'.format(hits_file))
cmd.append('-outfmt 15')
command = ' '.join(cmd)
log.subcommand(command, kwargs['temp_dir'], timeout=kwargs['timeout'])
def all_shard_paths(blast_db):
"""Get all of the BLAST shard names built by the preprocessor."""
pattern = '{}.*.blast.nhr'.format(blast_db)
files = glob.glob(pattern)
if not files:
err = ('No blast shards found. Looking for "{}"\n'
'Verify the --work-dir and --file-prefix options.').format(
pattern[:-4])
log.fatal(err)
return sorted(f[:-4] for f in files)
def output_file_name(temp_dir, shrd_path):
"""Create a file name for blast results."""
shard_name = basename(shrd_path)
file_name = '{}.results.json'.format(shard_name)
return join(temp_dir, file_name)
def temp_db_name(temp_dir, blast_db):
"""Generate a name for the temp DB used to filter the contigs."""
file_name = basename(blast_db)
return join(temp_dir, file_name)
def get_raw_hits(json_file):
"""Extract the raw blast hits from the blast json output file."""
with open(json_file) as blast_file:
raw = blast_file.read()
# Allow empty results
if not raw:
return []
# Do not allow bad json
try:
obj = json.loads(raw)
except json.decoder.JSONDecodeError:
err = ('Blast output is not in JSON format. '
'You may need to upgrade blast.')
log.fatal(err)
return obj['BlastOutput2'][0]['report']['results']['search'].get(
'hits', [])
def hits(json_file):
"""Extract the blast hits from the blast json output file."""
hits_list = []
raw_hits = get_raw_hits(json_file)
for raw in raw_hits:
for i, desc in enumerate(raw['description']):
hit = dict(desc)
hit['len'] = raw['len']
hit.update(raw['hsps'][i])
hits_list.append(hit)
return hits_list
def command_line_args(parser):
"""Add optional blast arguments to the command-line parser."""
group = parser.add_argument_group('optional blast arguments')
group.add_argument('--db-gencode', type=int, default=1,
metavar='CODE',
help="""The genetic code to use during blast runs.
The default is "1".""")
group.add_argument('--evalue', type=float, default=1e-10,
help="""The default evalue is "1e-10".""")
group.add_argument('--word-size', type=int,
help="""Word size for wordfinder algorithm.
'Must be >= 2.""")
group.add_argument('--max-target-seqs', type=int, default=100000000,
metavar='MAX',
help="""Maximum hit sequences per shard.
Default is calculated based on the available
memory and the number of shards.""")
group.add_argument('--batch-size', type=int,
help="""Use this option to control blast memory usage
and the concatenation of queries. Setting this
value too low can degrade performance.""")
def check_args(args):
"""Validate blast arguments."""
if args['word_size'] and args['word_size'] < 2:
sys.exit('--word-size must be >= 2.')
def default_max_target_seqs(max_target_seqs, blast_db, max_memory):
"""Calculate the default max_target_seqs per shard."""
if not max_target_seqs:
all_shards = all_shard_paths(blast_db)
max_target_seqs = int(2 * max_memory / len(all_shards)) * 1e6
return max_target_seqs
def default_shard_count(args, sra_files):
"""Calculate the default number of shards."""
shard_count = args['shard_count']
if not shard_count:
total_fasta_size = 0
for file_name in sra_files:
total_fasta_size += util.shard_file_size(args, file_name)
shard_count = int(total_fasta_size / 2.5e8)
shard_count = shard_count if shard_count else 1
return shard_count
def make_blast_output_dir(blast_db):
"""Make blast DB output directory."""
output_dir = dirname(blast_db)
if output_dir and output_dir not in ['.', '..']:
os.makedirs(output_dir, exist_ok=True)
def touchup_blast_db_names(blast_dbs):
"""Allow users to enter blast DB names with various suffixes."""
pattern = re.compile(
r'^ (.*?)'
r'( \.atram(_preprocessor)?\.log'
r' | \.blast_\d{3}\.(nhr|nin|nsq)'
r' | \.sqlite\.db )?$',
re.I | re.X)
db_names = []
for blast_db in blast_dbs:
db_names.append(re.sub(pattern, r'\1', blast_db))
return db_names
def find_program(program):
"""Make sure we can find the needed blast program."""
if not (which('makeblastdb') and which('tblastn') and which('blastn')):
err = ('We could not find the programs "{}". You either need to '
'install it or you need adjust the PATH environment variable '
'with the "--path" option so that aTRAM can '
'find it.').format(program)
sys.exit(err)
def parse_fasta_title(title, ends, seq_end_clamp):
"""Try to get the sequence name & which end it is from the fasta title."""
parts = title.split()
if not parts:
parts = ['']
match = re.match(r'(.+)[./_]([12])$', parts[0])
if match:
# seq_name = match.group(1)
seq_name = parts[0] if ends == 'single_ends' else match.group(1)
seq_end = match.group(2) if ends == 'mixed_ends' else seq_end_clamp
elif len(parts) > 1 and re.match(r'[12]$', parts[1]):
# seq_name = parts[0]
seq_name = ' '.join(parts[:2]) if ends == 'single_ends' else parts[0]
seq_end = parts[1] if ends == 'mixed_ends' else seq_end_clamp
else:
seq_name = parts[0]
seq_end = seq_end_clamp
return seq_name, seq_end
def parse_blast_title(title, is_single_end):
"""Try to get the sequence name & which end it is from the blast title."""
seq_name, seq_end = title, ''
match = re.match(r'(.+)[\s./_]([12])$', title)
if match and not is_single_end:
seq_name, seq_end = match.group(1), match.group(2)
return seq_name, seq_end
|
AntonelliLab/seqcap_processor
|
bin/aTRAM-master/lib/blast.py
|
Python
|
mit
| 8,019
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2021-03-19 10:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('invoices', '0026_invoice_invoice_type'),
]
operations = [
migrations.AlterField(
model_name='invoice',
name='invoice_type',
field=models.CharField(choices=[('invoice', 'VAT Invoice'), ('invoice-instalment', 'Instalment invoice'), ('invoice-final', 'Final invoice'), ('invoice-proforma', 'Request for payment')], max_length=64, verbose_name='Invoice type'),
),
]
|
samupl/simpleERP
|
apps/invoices/migrations/0027_auto_20210319_1027.py
|
Python
|
mit
| 654
|
from bridge.collections.apriori import Apriori
from bridge.collections.indexings import Indexing
import pymongo
class CollectionController:
def __init__(self,db):
self.apriori = Apriori()
self.indexings = Indexing()
self.hashCount = dict() #DB get Hash Count
self.db = db
if db['HashCount'] !=None:
cursor = db.HashCount.find()
for document in cursor:
self.hashCount[document["hashStr"]] = document["count"]
def Push(self, type , key):
if type=="read" or type=="update" or type=="create":
if any(key == hashStr for hashStr in self.hashCount.keys()):
self.hashCount[key] += 1
self.db.HashCount.update({"hashStr":key},{"$inc":{"count":1}})
else:
self.hashCount[key] = 1
self.db.HashCount.insert({"hashStr":key,"count":1})
def Run(self, schemaList,columnList):
#get hashCount / get Schemas
supportList = self.apriori.Run(self.hashCount)
columnSupportList = dict()
for schema in schemaList:
columnSupportList[schema['URL']] = []
for list1 in supportList:
columnDict = dict()
for idx in list1:
str = columnList[idx]
item =columnList[idx].split('.',1)
URL = item[0]
if columnDict.get(URL) == None:
columnDict[URL] = []
columnDict[URL].append(item[1])
for item in columnDict.items():
columnSupportList[item[0]].append(item[1])
newSchema = []
for schema in schemaList:
allColumn = []
for column in schema['column']:
allColumn.append(column)
for childS in schema['child']:
for column in childS['column']:
allColumn.append(column)
columnCopy = allColumn.copy()
for column in allColumn:
for list1 in columnSupportList[schema['URL']]:
for column2 in list1:
if column==column2:
columnCopy.remove(column)
continue
for column in columnCopy:
allColumn.remove(column)
columnSupportList[schema['URL']].append(columnCopy)
if schema['column'] != allColumn:
newSchema.append(self.createNewSchema(schema['URL'],columnSupportList[schema['URL']]))
return newSchema
def createNewSchemaList(self, csList):
schemaList=[]
for item in csList.items():
schemaList.append(self.createNewSchema(item[0],item[1]))
return schemaList
def defaultSchema_copy(self):
schema = {'URL':'', 'column':[],'child':[]}
return schema
def createNewSchema(self, URL,list1):
schema = self.defaultSchema_copy()
schema['URL']=URL
schema['column'] = list1[0]
for i in range(1,len(list1)):
if len(list1[i]) == 0:
break;
childS = self.defaultSchema_copy()
childS['URL'] = URL+"_"+str(i)
childS['column'] = list1[i]
schema['child'].append(childS)
return schema
|
hoonkim/Lesser
|
bridge/collections/collection_controller.py
|
Python
|
mit
| 3,353
|
#!/usr/bin/env python
"""
Copyright (c) 2015 Andrew Azarov
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from datetime import datetime
from subprocess import Popen, PIPE
from singletony import Singlet
__LOCK__ = Singlet()
import yaml
import json
import signal
import sys
import os
def assrt(check, error=None):
if check:
return 1
else:
if error:
raise AssertionError(error)
raise AssertionError
def do(line):
return Popen(line, shell=True, bufsize=4096, stdout=PIPE)
class gnt_ext_backup(object):
"""gnt_ext_backup object
Keyword arguments:
unique_id -- Unique identifier for backups (default set to current date and hour)
retention_period -- Backup retention period (default set to 1 day)
backup_user_server -- login@server.hostname credentials for SSH (default not set). Key auth for SSH should be setup beforehand,
I suggest chroot on target (or even jail) with only lz4, sh, dd and find commands
lv_backup_extension -- Extension for LV name of backup snapshot, without dot (default is bak)
backup_folder -- Remote server folder to place backups (default is ./upload/)
backup_extension -- Extension for resulting backup files (default is raw)
compression -- Dictionary of ingress and egress (default lz4 commands, do not remove the pipes!)
debug -- Do not perform actions if set, just print them
ignore_suspended -- Defaults to False. Whether to ignore suspended instances
no_cleanup -- Defaults to False. Whether to cleanup old backups after backup
"""
def __init__(self, **kwargs):
# Set instance defaults
# For simplicity set to timestamp
self.unique_id = datetime.now().strftime("%Y-%m-%d-%H")
self.retention_period = 7
self.backup_user_server = None
self.lv_backup_extension = 'bak'
self.backup_folder = './upload/'
self.backup_extension = 'raw'
self.compression = {'egress': '| lz4 -1c |', 'ingress': 'lz4 -dc |'}
self.debug = 0
self.instances_names = None
self.dd_buffer = '128M'
self.lv_size = '1G'
self.instances_complete = 0
self.stop = 0
self.ignore_suspended = False
self.no_cleanup = False
for i in ['unique_id', 'retention_period', 'backup_user_server',
'lv_backup_extension', 'backup_extension', 'backup_folder',
'compression', 'debug', 'instances_names', 'dd_buffer', 'lv_size', 'ignore_suspended', 'no_cleanup']:
if i in kwargs and kwargs[i]:
setattr(self, i, kwargs[i])
if i != 'instances_names': # It can be None
assrt(self.__dict__[i] is not None, "%s is not set" % i)
if not self.instances_names:
instances_raw_info = do('gnt-instance info --all')
else:
instances_raw_info = do(
'gnt-instance info ' + ' '.join(self.instances_names))
self.instances = yaml.load(instances_raw_info.stdout.read())
self.ssh_cmd = 'ssh -c aes128-ctr -oCompression=no -oStrictHostKeyChecking=no ' + \
self.backup_user_server
assrt(isinstance(self.retention_period, int), "%r is not int" %
self.retention_period)
assrt([isinstance(ids, str) for ids in self.compression.values()],
"%r is not correct" % self.compression)
assrt(isinstance(self.lv_backup_extension, str), "%r is not str" %
self.lv_backup_extension)
assrt(isinstance(self.backup_extension, str), "%r is not str" %
self.backup_extension)
assrt(isinstance(self.backup_folder, str), "%r is not str" %
self.backup_folder)
assrt(isinstance(self.backup_user_server, str), "%r is not str" %
self.backup_user_server)
assrt(len(self.backup_user_server.split('@')) == 2, "%r is incorrect" %
self.backup_user_server)
signal.signal(signal.SIGHUP, self.wall)
signal.signal(signal.SIGTERM, self.cancel)
def wall(self, signum, frame):
do('echo "{}% backup done" | wall'.format(
int((float(self.instances_complete) / len(self.instances)) * 100)))
def cancel(self, signum, frame):
self.stop = 1
do('echo "{}% backup done, waiting for jobs to finish" | wall'.format(
int((float(self.instances_complete) / len(self.instances)) * 100)))
def perform_backup(self):
if self.stop:
sys.exit(1)
for instance in self.instances:
if 'state is down' in instance['State'] and self.ignore_suspended:
continue
name = instance['Instance name']
primary_node = [i['primary']
for i in instance['Nodes'] if 'primary' in i][0]
disks = [(i['logical_id'], i['on primary'].split()[0])
for i in instance['Disks']]
command = "gnt-cluster command -n " + primary_node
for disk in disks:
drive = {}
drive['vg'], drive['lv'] = disk[0].split('/')
print(
'{}: Backing up {} {}'.format(self.unique_id, name, disk[0]))
cmd_list = [
[
command,
"\"lvcreate -L" + self.lv_size + " -s -n",
'.'.join(
[drive['lv'], self.unique_id, self.lv_backup_extension]),
disk[1],
"\""
],
[
command,
"\"dd if=" +
'.'.join(
[disk[1], self.unique_id, self.lv_backup_extension]),
"bs=" + self.dd_buffer,
self.compression['egress'],
self.ssh_cmd,
"'" + self.compression['ingress'],
"dd bs=" + self.dd_buffer,
"of=" + self.backup_folder +
'.'.join(
[self.unique_id, drive['lv'], name, primary_node, self.backup_extension]),
"'\""
],
[
command,
"\"lvremove -f",
'.'.join(
[disk[0], self.unique_id, self.lv_backup_extension]),
"\""
]
]
for cmd in cmd_list:
if self.debug:
print(' '.join(cmd))
else:
do(' '.join(cmd)).wait()
print('{}: Done {} {}'.format(self.unique_id, name, disk[0]))
print('-' * 100)
self.instances_complete += 1
if not self.no_cleanup:
cmd = [
self.ssh_cmd,
"\"",
'find',
self.backup_folder,
'-name \'' + '.'.join(
['*', '*', '*', '*', self.backup_extension]) + '\'',
'-ctime +' + str(self.retention_period),
'-delete',
"\""
]
if self.debug:
print(' '.join(cmd))
else:
do(' '.join(cmd)).wait()
print('Done cleaning old backups')
print('-' * 100)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i",
"--id",
dest='unique_id',
type=str,
default=None,
help="Unique id to identify backups, default is date with hour",
required=False)
parser.add_argument("-I",
"--ignore-suspended",
dest='ignore_suspended',
action='store_true',
help="Whether to ignore or not the suspended systems",
required=False)
parser.add_argument("-N",
"--no-cleanup",
dest='no_cleanup',
action='store_true',
help="Disable cleanup of old backups (you cleanup on your own)",
required=False)
parser.add_argument("-n",
"--instances_names",
dest='instances_names',
type=str,
nargs='+',
help="Space separated list of instances to backup",
required=False)
parser.add_argument("-r",
"--retention_period",
dest='retention_period',
type=int,
default=None,
help="Backup retention period in days, default is 1",
required=False)
parser.add_argument("-b",
"--backup_user_server",
dest='backup_user_server',
type=str,
default=None,
help="Backup login and server ssh style: login@backup.server",
required=True)
parser.add_argument("-l",
"--lv_backup_extension",
type=str,
dest='lv_backup_extension',
default=None,
help="LV backup snapshot extension to identify",
required=False)
parser.add_argument("-e",
"--backup_extension",
type=str,
dest='backup_extension',
default=None,
help="Backup extension for files on the backup target",
required=False)
parser.add_argument("-f",
"--backup_folder",
type=str,
dest='backup_folder',
default=None,
help="Backup folder on remote server",
required=False)
parser.add_argument("-c",
"--compression",
type=json.loads,
dest='compression',
default=None,
help="JSON array like {'egress': '| lz4 -1c |', 'ingress': 'lz4 -dc >'}",
required=False)
parser.add_argument("-d",
"--debug",
type=int,
dest='debug',
default=None,
help="If debug is 1 - disable performing actions and just print them out",
required=False)
parser.add_argument("--lv_size",
type=str,
dest='lv_size',
default=None,
help="LV snapshot size - default 1G, check man lvcreate for snapshot sizes",
required=False)
parser.add_argument("--dd_buffer",
type=str,
dest='dd_buffer',
default=None,
help="dd buffer size, default 128MB for fast gbit transfers, adjust to your memory and network size",
required=False)
a = parser.parse_args()
arguments = {}
for i in ['unique_id', 'retention_period', 'backup_user_server',
'lv_backup_extension', 'backup_extension', 'backup_folder',
'compression', 'debug', 'instances_names', 'lv_size', 'dd_buffer', 'ignore_suspended', 'no_cleanup']:
if hasattr(a, i) and getattr(a, i):
arguments[i] = getattr(a, i)
backup_job = gnt_ext_backup(**arguments)
backup_job.perform_backup()
|
ServerAstra/gnt-ext-backup
|
gnt_ext_backup.py
|
Python
|
mit
| 13,148
|
import array
import copy
#@TODO änderungen nur bei wirklichen änderungen (bessere Performance)
class memory:
def __init__(self, size):
self.size = size
#erzeugt liste mit size elementen
mem_tmp = [0] * size
#kopiert die liste in ein python array für bessere Performance
self.mem = array.array('l', mem_tmp)
def __getitem__(self, addr):
return self.mem[int(addr)]
def get_bit(self, addr):
ad, bit = addr.split('.')
data = self.mem[int(ad)]
mask = 1 << int(bit)
return data & mask
def __setitem__(self, addr, val):
self.mem[int(addr)] = val
def _bit_op(self, addr, b):
ad, bit = addr.split('.')
mask = 1 << int(bit)
data = 0
if b:
data = data | mask
else:
data = data & mask
self.mem[int(ad)] = data
def set_bit(self, addr):
self._bit_op(addr, True)
def reset_bit(self, addr):
self._bit_op(addr, False)
|
schroeder-/pyssps
|
src/memory.py
|
Python
|
mit
| 1,015
|
import contextlib
from six import StringIO
from ExtractMsg import Message
from fulltext.util import BaseBackend
class Backend(BaseBackend):
def handle_path(self, path):
text = StringIO()
with contextlib.closing(Message(path)) as m:
text.write(m.subject)
text.write(u'\n\n')
text.write(m.body)
return text.getvalue()
|
btimby/fulltext
|
fulltext/backends/__msg.py
|
Python
|
mit
| 384
|
from django.conf.urls.defaults import patterns, url
from webdnd.player.views.account import UserSearchApi
urlpatterns = patterns('webdnd.player.views',
# Account
url(r'^account/search/(?P<text>.*)/?$', UserSearchApi.as_view(), name="account_api_search"),
)
|
Saevon/webdnd
|
player/urls/api.py
|
Python
|
mit
| 267
|
# main.py
# Scott M. Phillips
# 31 December 2015
import sys
import argparse
from directoryconversiongui import directoryconversiongui
from propresenterconverter import propresenterconverter
def parsecommandline():
parser = argparse.ArgumentParser(
description='Convert Propresenter6 files from single to triple-wide configurations.',
prog='propresenter-conversion', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--inputfile', type=str, default=None, help='Input file to convert')
parser.add_argument('--outputfile', type=str, default=None, help='Output file to write to')
parser.add_argument('--inputdir', type=str, default=None, help='Input directory to use for conversion')
parser.add_argument('--outputdir', type=str, default=None, help='Output directory to use for conversion')
# Parse all the arguments.
cmdargs = parser.parse_args()
return cmdargs
if __name__ == "__main__":
args = parsecommandline()
p6conv = propresenterconverter()
if args.inputfile is not None and args.outputfile is not None:
p6conv.processfile(args.inputfile, args.outputfile)
if args.inputdir is not None and args.outputdir is not None:
p6conv.processdirectory(args.inputdir, args.outputdir)
|
fundthmcalculus/propresenter-conversion
|
main.py
|
Python
|
mit
| 1,289
|
from __future__ import unicode_literals
import json
import os
import mock
from django.contrib import admin
from django.contrib.admin.utils import quote
from django.contrib.auth import get_permission_codename
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, RequestFactory
from django.urls import reverse
from django.utils.html import format_html
from django.utils.text import capfirst
from .admin import ThreadAdmin, PostAdmin
from .models import Thread, Post
from disqus_interface import DisqusQuery, send_request_to_disqus, DISQUSAPIError
from .utils import cache_clearer, query_cache
TEST_DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_data')
THREADS_LIST_RESPONSE = json.load(open(os.path.join(TEST_DATA_DIR, 'threads_list.json'), 'r'))
POSTS_LIST_RESPONSE = json.load(open(os.path.join(TEST_DATA_DIR, 'posts_list.json'), 'r'))
POST_DETAIL_RESPONSE = json.load(open(os.path.join(TEST_DATA_DIR, 'post_detail.json'), 'r'))
SINGLE_THREAD_LIST_RESPONSE = json.load(open(os.path.join(TEST_DATA_DIR, 'single_thread_list.json'), 'r'))
def get_perm(Model, perm):
"""Return the permission object, for the Model"""
ct = ContentType.objects.get_for_model(Model)
return Permission.objects.get(content_type=ct, codename=perm)
def thread_factory(thread_data):
thread = Thread.objects.create(
id=int(thread_data.get('id')),
forum=thread_data.get('forum'),
is_closed=thread_data.get('isClosed'),
title=thread_data.get('title'),
)
return thread
def post_factory(post_data):
post = Post.objects.create(
id=int(post_data.get('id')),
forum=post_data.get('forum'),
is_approved=post_data.get('isApproved'),
message=post_data.get('raw_message'),
)
return post
class MockSuperUser(object):
is_active = True
is_staff = True
def has_perm(self, perm):
return True
def has_module_perms(self, app_label):
return True
class DisqusAdminTest(TestCase):
@mock.patch.object(DisqusQuery, 'get_threads_list', return_value=THREADS_LIST_RESPONSE)
def test_thread_change_list_view__normal_case__correct_template_response(self, _):
changelist_url = reverse(
'{admin_site_name}:{app_label}_{model_name}_changelist'.format(
admin_site_name=admin.site.name,
app_label=Thread._meta.app_label,
model_name=Thread._meta.model_name
))
request = RequestFactory().get(changelist_url, follow=True)
request.user = MockSuperUser()
response = ThreadAdmin(Thread, admin.site).changelist_view(request)
# what to test:
# 1. template is admin/change_list.html and its subclass template
# 2. status code 200
# 3. thread objects == response
# They should be tested together
# All objects
qs = Thread.objects.filter()
template_names = set([
'admin/change_list.html',
'admin/disqus_backstore/change_list.html',
'admin/disqus_backstore/thread/change_list.html',
])
self.assertEqual(response.status_code, 200)
self.assertEqual(set(response.template_name), template_names)
self.assertEqual(list(response.context_data['cl'].result_list), list(qs))
@mock.patch.object(DisqusQuery, 'get_threads_list', return_value=THREADS_LIST_RESPONSE)
@mock.patch.object(DisqusQuery, 'get_posts_list', return_value=POSTS_LIST_RESPONSE)
def test_post_change_list_view__normal_case__correct_template_response(self, _, __):
changelist_url = reverse(
'{admin_site_name}:{app_label}_{model_name}_changelist'.format(
admin_site_name=admin.site.name,
app_label=Post._meta.app_label,
model_name=Post._meta.model_name
))
request = RequestFactory().get(changelist_url, follow=True)
request.user = MockSuperUser()
response = PostAdmin(Post, admin.site).changelist_view(request)
# what to test:
# 1. template is admin/change_list.html and its subclass template
# 2. status code 200
# 3. thread objects == response
# They should be tested together
# All objects
qs = Post.objects.filter()
template_names = set([
'admin/change_list.html',
'admin/disqus_backstore/change_list.html',
'admin/disqus_backstore/post/change_list.html',
])
self.assertEqual(response.status_code, 200)
self.assertEqual(set(response.template_name), template_names)
self.assertEqual(list(response.context_data['cl'].result_list), list(qs))
@mock.patch.object(DisqusQuery, 'get_threads_list', return_value=SINGLE_THREAD_LIST_RESPONSE)
def test_thread_change_form_view__normal_case__correct_template_response(self, _):
thread_data = SINGLE_THREAD_LIST_RESPONSE['response'][0]
thread_object = thread_factory(thread_data)
change_url = reverse('{admin_site_name}:{app_label}_{model_name}_change'.format(
admin_site_name=admin.site.name,
app_label=Thread._meta.app_label,
model_name=Thread._meta.model_name
), args=[thread_object.id])
request = RequestFactory().get(change_url, follow=True)
request.user = MockSuperUser()
response = ThreadAdmin(Thread, admin.site).change_view(request, thread_data['id'])
# what to test:
# 1. template is admin/change_form.html and its subclass template
# 2. status code 200
# 3. thread object id is equal to the form bounded value
# (So they are the same one.)
# They should be tested together
# All objects
template_names = set([
'admin/change_form.html',
'admin/disqus_backstore/change_form.html',
'admin/disqus_backstore/thread/change_form.html',
])
self.assertEqual(response.status_code, 200)
self.assertEqual(set(response.template_name), template_names)
self.assertEqual(
response.context_data['adminform'].form['id'].value(),
thread_object.id
)
@mock.patch.object(DisqusQuery, 'get_threads_list', return_value=THREADS_LIST_RESPONSE)
@mock.patch.object(DisqusQuery, 'get_posts_list', return_value=POSTS_LIST_RESPONSE)
def test_post_change_form_view__normal_case__correct_template_response(self, _, __):
post_data = POST_DETAIL_RESPONSE['response']
post_object = post_factory(post_data)
change_url = reverse('{admin_site_name}:{app_label}_{model_name}_change'.format(
admin_site_name=admin.site.name,
app_label=Post._meta.app_label,
model_name=Post._meta.model_name
), args=[post_object.id])
request = RequestFactory().get(change_url, follow=True)
request.user = MockSuperUser()
response = PostAdmin(Post, admin.site).change_view(request, post_data['id'])
template_names = set([
'admin/change_form.html',
'admin/disqus_backstore/change_form.html',
'admin/disqus_backstore/post/change_form.html',
])
self.assertEqual(response.status_code, 200)
self.assertEqual(set(response.template_name), template_names)
self.assertEqual(
response.context_data['adminform'].form['id'].value(),
post_object.id
)
@mock.patch.object(DisqusQuery, 'get_threads_list', return_value=THREADS_LIST_RESPONSE)
@mock.patch.object(DisqusQuery, 'get_posts_list', return_value=POSTS_LIST_RESPONSE)
def test_thread_delete_view__get__success(self, _, __):
thread_data = THREADS_LIST_RESPONSE['response'][0]
post_data = POSTS_LIST_RESPONSE['response'][0]
thread_object = thread_factory(thread_data)
related_post_object = post_factory(post_data)
related_post_object.thread = thread_object
delete_url = reverse('{admin_site_name}:{app_label}_{model_name}_delete'.format(
admin_site_name=admin.site.name,
app_label=Thread._meta.app_label,
model_name=Thread._meta.model_name
), args=[thread_object.id])
request = RequestFactory().get(delete_url, follow=True)
request.user = MockSuperUser()
response = ThreadAdmin(Thread, admin.site).delete_view(request, str(thread_object.id))
template_names = set([
'admin/delete_confirmation.html',
'admin/disqus_backstore/delete_confirmation.html',
'admin/disqus_backstore/thread/delete_confirmation.html',
])
self.assertEqual(response.status_code, 200)
self.assertEqual(set(response.template_name), template_names)
# dirty hack for formatting deleted_object context... Use the same formatting
# in django.contrib.admin.utils.get_deleted_objects
# the related post objects will be a list of post object,
# so we have to put it into a list...
deleted_objects = [format_html('{}: <a href="{}">{}</a>',
capfirst(obj.__class__._meta.verbose_name),
reverse('%s:%s_%s_change' % (
admin.site.name,
obj._meta.app_label,
obj._meta.model_name
), None, (quote(obj._get_pk_val()),)),
obj) for obj in [thread_object, related_post_object]]
deleted_objects[1] = [deleted_objects[1]]
self.assertEqual(sorted(response.context_data['deleted_objects']),
sorted(deleted_objects))
@mock.patch.object(DisqusQuery, 'get_threads_list', return_value=THREADS_LIST_RESPONSE)
@mock.patch.object(DisqusQuery, 'get_posts_list', return_value=POSTS_LIST_RESPONSE)
def test_thread_delete_action__post__success(self, _, __):
thread_data = THREADS_LIST_RESPONSE['response'][0]
post_data = POSTS_LIST_RESPONSE['response'][0]
thread_object = thread_factory(thread_data)
related_post_object = post_factory(post_data)
related_post_object.thread = thread_object
# The way to create user is as same as the way in
# `django.tests.admin_view.tests.AdminViewPermissionTests.test_delete_view`
# Because RequestFactory can't handle MiddleWare,
# even we set the `request._dont_enforce_csrf_checks to prevent csrf token check
# We still need to handle the message for MessageMiddleWare used in admin view.
# Workaround for this will make test code hard to understand.
# So here I choose test it directly with `self.client`
deleteuser = User.objects.create_user(
username='deleteuser',
password='secret',
is_staff=True
)
deleteuser.user_permissions.add(
get_perm(
Thread,
get_permission_codename('delete', Thread._meta)
)
)
deleteuser.user_permissions.add(
get_perm(
Post,
get_permission_codename('delete', Post._meta)
)
)
deleteuser.user_permissions.add(
get_perm(
Thread,
get_permission_codename('change', Thread._meta)
)
)
deleteuser.user_permissions.add(
get_perm(
Post,
get_permission_codename('change', Post._meta)
)
)
self.client.force_login(deleteuser)
delete_action_url = reverse('{admin_site_name}:{app_label}_{model_name}_changelist'.format(
admin_site_name=admin.site.name,
app_label=Thread._meta.app_label,
model_name=Thread._meta.model_name
))
delete_action_dict = {
'action': 'delete_selected',
'select_across': 0,
'index': 0,
'_selected_action': thread_object.id
}
response = self.client.post(delete_action_url, delete_action_dict)
template_names = set([
'admin/delete_selected_confirmation.html',
'admin/disqus_backstore/delete_selected_confirmation.html',
'admin/disqus_backstore/thread/delete_selected_confirmation.html',
])
self.assertEqual(response.status_code, 200)
self.assertEqual(set(response.template_name), template_names)
# dirty hack for formatting deleted_object context... Use the same formatting
# in django.contrib.admin.utils.get_deleted_objects
deleted_objects = [format_html('{}: <a href="{}">{}</a>',
capfirst(obj.__class__._meta.verbose_name),
reverse('%s:%s_%s_change' % (
admin.site.name,
obj._meta.app_label,
obj._meta.model_name
), None, (quote(obj._get_pk_val()),)),
obj) for obj in [thread_object, related_post_object]]
# the related post objects will be a list of post object,
# so we have to put it into a list...
deleted_objects[1] = [deleted_objects[1]]
# Because it's delete_select_action we have to put it into another list......
deleted_objects = [deleted_objects]
self.assertEqual(sorted(response.context_data['deletable_objects']),
sorted(deleted_objects))
@mock.patch.object(DisqusQuery, 'delete_thread')
@mock.patch.object(DisqusQuery, 'delete_posts')
@mock.patch.object(DisqusQuery, 'get_threads_list', return_value=THREADS_LIST_RESPONSE)
@mock.patch.object(DisqusQuery, 'get_posts_list', return_value=POSTS_LIST_RESPONSE)
def test_thread_delete_view__post__success(self, _, __, delete_posts_mock, delete_thread_mock):
thread_data = THREADS_LIST_RESPONSE['response'][0]
post_data = POSTS_LIST_RESPONSE['response'][0]
thread_object = thread_factory(thread_data)
related_post_object = post_factory(post_data)
related_post_object.thread = thread_object
# The way to create user is as same as the way in
# `django.tests.admin_view.tests.AdminViewPermissionTests.test_delete_view`
# Because RequestFactory can't handle MiddleWare,
# even we set the `request._dont_enforce_csrf_checks to prevent csrf token check
# We still need to handle the message for MessageMiddleWare used in admin view.
# Workaround for this will make test code hard to understand.
# So here I choose test it directly with `self.client`
deleteuser = User.objects.create_user(
username='deleteuser',
password='secret',
is_staff=True
)
deleteuser.user_permissions.add(
get_perm(
Thread,
get_permission_codename('delete', Thread._meta)
)
)
deleteuser.user_permissions.add(
get_perm(
Post,
get_permission_codename('delete', Post._meta)
)
)
self.client.force_login(deleteuser)
delete_url = reverse('{admin_site_name}:{app_label}_{model_name}_delete'.format(
admin_site_name=admin.site.name,
app_label=Thread._meta.app_label,
model_name=Thread._meta.model_name
), args=[thread_object.id])
delete_dict = {'post': 'yes'}
response = self.client.post(delete_url, delete_dict)
self.assertEqual(response.status_code, 302)
delete_thread_mock.assert_called_once_with(thread_object.id)
delete_posts_mock.assert_called_once()
@mock.patch.object(DisqusQuery, 'get_threads_list', return_value=THREADS_LIST_RESPONSE)
@mock.patch.object(DisqusQuery, 'get_posts_list', return_value=POSTS_LIST_RESPONSE)
def test_post_delete_view__get__success(self, _, __):
thread_data = THREADS_LIST_RESPONSE['response'][0]
post_data = POSTS_LIST_RESPONSE['response'][0]
thread_object = thread_factory(thread_data)
post_object = post_factory(post_data)
post_object.thread = thread_object
delete_url = reverse('{admin_site_name}:{app_label}_{model_name}_delete'.format(
admin_site_name=admin.site.name,
app_label=Post._meta.app_label,
model_name=Post._meta.model_name
), args=[post_data['id']])
request = RequestFactory().get(delete_url, follow=True)
request.user = MockSuperUser()
response = PostAdmin(Post, admin.site).delete_view(request, post_data['id'])
template_names = set([
'admin/delete_confirmation.html',
'admin/disqus_backstore/delete_confirmation.html',
'admin/disqus_backstore/post/delete_confirmation.html',
])
self.assertEqual(response.status_code, 200)
self.assertEqual(set(response.template_name), template_names)
# dirty hack for formatting deleted_object context... Use the same formatting
# in django.contrib.admin.utils.get_deleted_objects
deleted_objects = [format_html('{}: <a href="{}">{}</a>',
capfirst(obj.__class__._meta.verbose_name),
reverse('%s:%s_%s_change' % (
admin.site.name,
obj._meta.app_label,
obj._meta.model_name
), None, (quote(obj._get_pk_val()),)),
obj) for obj in [post_object]]
self.assertEqual(response.context_data['deleted_objects'],
deleted_objects)
@mock.patch.object(DisqusQuery, 'delete_post')
@mock.patch.object(DisqusQuery, 'get_threads_list', return_value=THREADS_LIST_RESPONSE)
@mock.patch.object(DisqusQuery, 'get_posts_list', return_value=POSTS_LIST_RESPONSE)
def test_post_delete_view__post__success(self, _, __, delete_post_mock):
thread_data = THREADS_LIST_RESPONSE['response'][0]
post_data = POSTS_LIST_RESPONSE['response'][0]
thread_object = thread_factory(thread_data)
post_object = post_factory(post_data)
post_object.thread = thread_object
deleteuser = User.objects.create_user(
username='deleteuser',
password='secret',
is_staff=True
)
deleteuser.user_permissions.add(
get_perm(
Thread,
get_permission_codename('delete', Thread._meta)
)
)
deleteuser.user_permissions.add(
get_perm(
Post,
get_permission_codename('delete', Post._meta)
)
)
self.client.force_login(deleteuser)
delete_url = reverse('{admin_site_name}:{app_label}_{model_name}_delete'.format(
admin_site_name=admin.site.name,
app_label=Post._meta.app_label,
model_name=Post._meta.model_name
), args=[post_data['id']])
delete_dict = {'post': 'yes'}
response = self.client.post(delete_url, delete_dict)
self.assertEqual(response.status_code, 302)
delete_post_mock.assert_called_once_with(post_object.id)
class DisqusThreadQuerySetTest(TestCase):
def test_get__normal_case__get_object_successfully(self):
thread_data = THREADS_LIST_RESPONSE['response'][0]
thread_id = int(thread_data.get('id'))
with mock.patch.object(DisqusQuery, 'get_threads_list', return_value={
'response': [thread_data]
}):
obj = Thread.objects.get(id=thread_id)
self.assertEqual(obj.id, thread_id)
class DisqusQueryTest(TestCase):
def test_call_disqus_api__response_code_not_zero__raise_exception(self):
class Error(object):
def json(self):
error_response = {
"code": 7,
"response": "It's a DISQUS api error response mock."
}
return error_response
with mock.patch('requests.get', return_value=Error()):
with self.assertRaises(DISQUSAPIError):
send_request_to_disqus("threads", "list", "get", {})
class UtilsTest(TestCase):
def test_query_cache__no_parameter__works(self):
class A(object):
x = 0
@query_cache('f')
def f1(self):
self.x += 1
return self.x
a = A()
y1 = a.f1()
self.assertEqual(y1, 1)
# Result is cached so it should still return 1
y2 = a.f1()
self.assertEqual(y2, 1)
def test_query_cache__with_kwargs__works(self):
class A(object):
x = 0
@query_cache('f')
def f1(self, kw=0):
self.x += kw
return self.x
a = A()
y1 = a.f1(kw=1)
self.assertEqual(y1, 1)
# Result is cached since kwarg is the same
y2 = a.f1(kw=1)
self.assertEqual(y2, 1)
# Result is calculated since kwarg is different
y3 = a.f1(kw=2)
self.assertEqual(y3, a.x)
def test_query_cache__with_args_and_kwargs__works(self):
class A(object):
x = 0
@query_cache('f')
def f1(self, arg, kw=0):
self.x += (arg + kw)
return self.x
a = A()
y1 = a.f1(1, kw=1)
self.assertEqual(y1, 2)
# Result is cached since all args are the same
y2 = a.f1(1, kw=1)
self.assertEqual(y2, 2)
# Result is calculated since arg is different
y3 = a.f1(2, kw=1)
self.assertEqual(y3, a.x)
def test_cache_clearer__update_operation__cache_cleared(self):
class API(object):
version = 0
@query_cache('thread')
def get_thread(self, thread_id):
return 'thread_{id}_{version}'.format(
id=thread_id,
version=self.version
)
@query_cache('post')
def get_post(self, post_id):
return 'post_{id}_{version}'.format(
id=post_id,
version=self.version
)
@cache_clearer(['thread'])
def update_thread(self, thread_id):
self.version += 1
api = API()
y1 = api.get_thread(1)
# Result is cached since all args are the same
y2 = api.update_thread(1)
y3 = api.get_thread(1)
self.assertEqual(y3, "thread_1_1")
def test_query_cache__after_n_seconds__will_be_cleared(self):
# No cached time
refreshed_seconds = -1
class API(object):
version = 0
categories = dict()
@query_cache('thread',refreshed_seconds=refreshed_seconds)
def get_thread(self, thread_id):
self.version += 1
return 'thread_{id}_{version}'.format(
id=thread_id,
version=self.version
)
api = API()
y1 = api.get_thread(1)
y2 = api.get_thread(1)
self.assertEqual(y2, "thread_1_2")
|
chenesan/django-disqus-backstore
|
disqus_backstore/tests.py
|
Python
|
mit
| 23,738
|
#http://doc.aldebaran.com/2-5/naoqi/peopleperception/alfacedetection.html
import os
import qi
import argparse
import sys
import time
import threading
def onDetection(value):
#
# print "onDetection ::value=",value
if(len(value) > 0):
detectionTimestamp=value[0]
cameraPose_InTorsoFrame=value[2]
cameraPose_InRobotFrame=value[3]
cameraId=value[4]
if(len(value[1]) > 0): # just in case of the ALValue is in the wrong format
#Detecting changes
# get the ALValue returned by the time filtered recognition:
# - [] when nothing new.
# - [4] when a face has been detected but not recognized during the first 8s.
# - [2, [faceName]] when one face has been recognized.
# - [3, [faceName1, faceName2, ...]] when several faces have been recognized.
timeFilteredResult = value[1][len(value[1]) -1]
if( len(timeFilteredResult) == 1 ):
# If a face has been detected for more than 8s but not recognized # TODO: Try to learn face??
if(timeFilteredResult[0] == 4):
pass
elif( len(timeFilteredResult) == 2 ):
# If one or several faces have been recognized
if(timeFilteredResult[0] in [2, 3]):
for s in timeFilteredResult[1]:
print 'persons recognized : ',s
# TODO: extract data for each face detected value[1][..]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--pip", type=str, default=os.environ['PEPPER_IP'],
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--pport", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
pip = args.pip
pport = args.pport
#Starting application
try:
connection_url = "tcp://" + pip + ":" + str(pport)
app = qi.Application(["PeopleDetection", "--qi-url=" + connection_url ])
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + pip + "\" on port " + str(pport) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
app.start()
session = app.session
#Starting services
memory_service = session.service("ALMemory")
#Testing some functions from the ALWavingDetection module
service = session.service("ALFaceDetection")
# list of faces currently trained
learnedlist = service.getLearnedFacesList()
print 'learnedlist=',learnedlist
print 'RecognitionEnabled=',service.isRecognitionEnabled()
service.setRecognitionEnabled(True)
# Connect the event callback.
anyDetection = memory_service.subscriber("FaceDetected")
idAnyDetection = anyDetection.signal.connect(onDetection)
#Program stays at this point until we stop it
app.run()
#Disconnecting callbacks and Threads
anyDetection.signal.disconnect(idAnyDetection)
monitorThread.do_run = False
print "Finished"
if __name__ == "__main__":
main()
|
LCAS/spqrel_tools
|
face_detection/face_detection.py
|
Python
|
mit
| 3,260
|
# -*- coding: utf-8
from __future__ import absolute_import
import unittest
from oaxmlapi import datatypes
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
class TestDatatypesClass(unittest.TestCase):
def test_str(self):
self.assertEqual(
str(datatypes.Datatype('Date', {})),
'<Datatype type=Date>'
)
def test_getDatatype(self):
self.assertIsInstance(
datatypes.Datatype('Date', {}).getDatatype(),
ET.Element
)
def test_tostring(self):
self.assertEqual(
datatypes.Datatype(
'Date',
{
'month': '03'
}
).tostring(),
b'<Date><month>03</month></Date>'
)
def test_tostring_filter(self):
cust_filter = datatypes.Datatype('Filter', {'id': '1'})
self.assertEqual(
cust_filter.tostring(),
b'<Filter type="customer"><id>1</id></Filter>'
)
def test_tostring_addr(self):
contact = datatypes.Datatype('Contact', {'name': 'John Doe', 'email': 'john.doe@email.com'})
self.assertEqual(
contact.tostring(),
(
b'<Contact><name>John Doe</name><addr><Address>'
b'<email>john.doe@email.com</email></Address></addr></Contact>'
)
)
def test_tostring_embedded(self):
date = datatypes.Datatype(
'Date',
{
'month': '03'
}
)
task = datatypes.Datatype(
'Task',
{
'date': date
}
)
self.assertEqual(
task.tostring(),
b'<Task><date><Date><month>03</month></Date></date></Task>'
)
def test_prettify(self):
self.assertEqual(
datatypes.Datatype(
'Date',
{
'month': '03'
}
).prettify(),
(
b'<?xml version="1.0" encoding="utf-8"?>\n'
b'<Date>\n'
b' <month>03</month>\n'
b'</Date>\n'
)
)
suite = unittest.TestLoader().loadTestsFromTestCase(TestDatatypesClass)
unittest.TextTestRunner(verbosity=2).run(suite)
|
23maverick23/oaxmlapi
|
tests/datatypes/test_datatypes.py
|
Python
|
mit
| 2,369
|
__author__ = 'novokonst'
import ply.lex as lex
def check_comment(fn):
def wrapped(self, t):
if self.nested_comment:
t.type = 'COMMENT'
return t
else:
return fn(self, t)
wrapped.__doc__ = fn.__doc__
return wrapped
class DummyLexer:
"""
Need to set MY_KEYWORDS and implement comment policy
"""
MY_KEYWORDS = []
t_ignore = ' \t'
def __init__(self):
self.__class__.RESERVED = {kw: kw for kw in self.__class__.MY_KEYWORDS}
self.__class__.tokens = ['COMMENT'] + self.__class__.RESERVED.values() + ['ID']
self.refresh()
def t_error(self, t):
t.lexer.skip(1)
def t_newline(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
@check_comment
def t_ID(self, t):
t.type = self.__class__.RESERVED.get(t.value, 'ID')
return t
def build(self, **kwargs):
self.lexer = lex.lex(module=self, **kwargs)
def refresh(self):
self.skipped = []
self.nested_comment = 0
self.out_token_dict = {}
def tokenize(self, data):
self.lexer.input(data)
while True:
tok = self.lexer.token()
if not tok: break
self.out_token_dict[tok.type] = self.out_token_dict.get(tok.type, [])
self.out_token_dict[tok.type].append(tok)
return self.out_token_dict
def keywords_ex_stats(self, extra_type_list=[]):
keys = self.__class__.MY_KEYWORDS + extra_type_list
return {k: self.out_token_dict.get(k, []) for k in keys}
|
sayon/ignoreme
|
tokenizers/general/dummylex.py
|
Python
|
mit
| 1,584
|
from .spacynlp import string_id
SUBJECT = string_id('nsubj')
SUBJECTPASS = string_id('nsubjpass')
CLAUSAL_SUBJECT = string_id('csubj')
ATTRIBUTE = string_id('attr')
RELCL = string_id('relcl')
ADJMOD = string_id('amod')
NPADVMOD = string_id('npadvmod')
NOUNMOD = string_id('nmod')
NUMMOD = string_id('nummod')
COMPOUND = string_id('compound')
CONJUNCT = string_id('conj')
APPOS = string_id('appos')
PREPOSITION = string_id('prep')
POBJ = string_id('pobj')
DETERMINER = string_id('det')
def is_subject(token):
return token.dep in [SUBJECT, SUBJECTPASS]
def is_direct_subject(token):
return token.dep == SUBJECT
def is_clausal_subject(token):
return token.dep == CLAUSAL_SUBJECT
def is_attribute(token):
return token.dep == ATTRIBUTE
def is_relative_clause(token):
return token.dep == RELCL
def is_compound(token):
return token.dep == COMPOUND
def is_modifier(token):
return token.dep in [ADJMOD, NOUNMOD, NPADVMOD]
def is_conjunct(token):
return token.dep == CONJUNCT
def is_apposition(token):
return token.dep == APPOS
def is_preposition(token, preposition=None):
return token.dep == PREPOSITION and \
(preposition is None or token.text.lower() == preposition.lower())
def is_prepositional_object(token):
return token.dep == POBJ
def is_hyphen_mod(token, head):
"""Check if token and head are connected by a hyphen"""
return token.i == head.i - 2 and token.nbor(1).text == u'-'
def is_numeric_modifier(token):
return token.dep == NUMMOD
def is_determiner(token):
return token.dep == DETERMINER
|
infolab-csail/whoami
|
whoami/spacydep.py
|
Python
|
mit
| 1,590
|
class Solution:
def findKthNumber(self, m: int, n: int, k: int) -> int:
if m > n:
m, n = n, m
low = 1
high = m * n
while low < high:
mid = (low + high) // 2
count = 0
for i in range(1, m + 1):
count += min(mid // i, n)
if count < k:
low = mid + 1
else:
high = mid
return low
|
jiadaizhao/LeetCode
|
0601-0700/0668-Kth Smallest Number in Multiplication Table/0668-Kth Smallest Number in Multiplication Table.py
|
Python
|
mit
| 439
|
from django.contrib import admin
# Register your models here.
from models import *
class LogAdmin(admin.ModelAdmin):
list_display = ('id', 'date', 'ttype', 'message', 'user')
list_filter = ('ttype',)
class CityAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'name_gde', 'slug', 'timediffk', 'wunderground', 'point')
class BusAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'ttype', 'murl', 'napr_a', 'napr_b', 'city')
search_fields = ['name']
list_filter = ('ttype','city')
class RouteAdmin(admin.ModelAdmin):
list_display = ('id', 'bus', 'busstop', 'direction', 'order', 'endpoint')
search_fields = ['bus__name']
list_filter = ('direction', 'bus__ttype', 'bus__city', 'bus')
class NBusStopAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'moveto', 'city')
search_fields = ['name']
list_filter = ('city',)
class UserTimerAdmin(admin.ModelAdmin):
list_display = ('id', 'user', 'date', 'minutes')
search_fields = ['user']
list_filter = ('date',)
raw_id_fields = ("user")
class TransactionAdmin(admin.ModelAdmin):
list_display = ('id', 'ctime', 'end_time', 'user', 'value', 'fiat', 'phone', 'notified', 'comment')
search_fields = ['user', 'comment']
list_filter = ('ctime','end_time')
raw_id_fields = ("user")
date_hierarchy = 'ctime'
class TimetableAdmin(admin.ModelAdmin):
list_display = ('id', 'bus', 'busstop', 'direction', 'time', 'holiday', 'xeno_title')
search_fields = ['bus', 'busstop']
list_filter = ('bus', 'holiday')
class BonusAdmin(admin.ModelAdmin):
list_display = ('id', 'ctime', 'mtime', 'pin', 'comment', 'days', 'activated')
class SpecialIconAdmin(admin.ModelAdmin):
list_display = ('id', 'ctime', 'gosnum', 'img', 'active')
class SongAdmin(admin.ModelAdmin):
list_display = ('id', 'ctime', 'url', 'name_short', 'active')
list_filter = ('active',)
admin.site.register(Log, LogAdmin)
admin.site.register(City, CityAdmin)
admin.site.register(Bus, BusAdmin)
admin.site.register(Route, RouteAdmin)
admin.site.register(NBusStop, NBusStopAdmin)
admin.site.register(Sound)
admin.site.register(UserTimer, UserTimerAdmin)
admin.site.register(Transaction, TransactionAdmin)
admin.site.register(Timetable, TimetableAdmin)
admin.site.register(Bonus, BonusAdmin)
admin.site.register(Song, SongAdmin)
admin.site.register(SpecialIcon, SpecialIconAdmin)
|
norn/bustime
|
bustime/admin.py
|
Python
|
mit
| 2,399
|
__author__ = 'lewuathe'
import numpy as np
import hashlib
import common
def __calc_with_hash(vec, m, target):
for v in vec:
m.update(v)
if target == 'hex':
return m.hexdigest()
else:
return common.hex2dec(m.hexdigest())
def md5_for_vec(vec, target = 'dec'):
"""
Calculate hexdigest with md5 for given vector
:param vec:
:return string:
"""
m = hashlib.md5()
return __calc_with_hash(vec, m, target)
def sha1_for_vec(vec, target = 'dec'):
"""
Calculate hexdigest with sha1 for given vector
:param vec:
:return string:
"""
m = hashlib.sha1()
return __calc_with_hash(vec, m, target)
def sha224_for_vec(vec, target = 'dec'):
"""
Calculate hexdigest with sha224 for given vector
:param vec:
:return string:
"""
m = hashlib.sha224()
return __calc_with_hash(vec, m, target)
def sha256_for_vec(vec, target = 'dec'):
"""
Calculate hexdigest with sha256 for given vector
:param vec:
:return string:
"""
m = hashlib.sha256()
return __calc_with_hash(vec, m, target)
def sha384_for_vec(vec, target = 'dec'):
"""
Calculate hexdigest with sha384 for given vector
:param vec:
:return string:
"""
m = hashlib.sha384()
return __calc_with_hash(vec, m, target)
def sha512_for_vec(vec, target = 'dec'):
"""
Calculate hexdigest with sha512 for given vector
:param vec:
:return string:
"""
m = hashlib.sha512(vec)
return __calc_with_hash(vec, m, target)
|
PhysicsEngine/kHLL
|
kHLL/hash/image.py
|
Python
|
mit
| 1,542
|
import os
username = os.environ['ttiUsername']
password = os.environ['ttiPassword']
from pandac.PandaModules import *
accountServerEndpoint = ConfigVariableString('account-server-endpoint', 'https://toontowninfinite.net/api/').getValue()
http = HTTPClient()
http.setVerifySsl(0)
def executeHttpRequest(url, message):
channel = http.makeChannel(True)
rf = Ramfile()
spec = DocumentSpec(accountServerEndpoint + '/' + url)
if channel.getDocument(spec) and channel.downloadToRam(rf):
return rf.getData()
response = executeHttpRequest(
'login?n={0}&p={1}'.format(username, password),
username + password)
import json
try:
response = json.loads(response)
except ValueError:
print 'Invalid username and/or password. Please try again.'
if not response['success']:
print response['reason']
else:
os.environ['TTI_PLAYCOOKIE'] = response['token']
# Start the game:
import toontown.toonbase.ToontownStart
|
ToonTownInfiniteRepo/ToontownInfinite
|
toontown/toonbase/ToontownStartRemote.py
|
Python
|
mit
| 967
|
from pprint import pprint
def _compute_coefs(la, nu_avg, N):
coefs = []
addings = [1]
for x in xrange(1, N+1):
coefs.append(nu_avg[-x]/(x * la))
addings.append(coefs[-1] * addings[-1])
return coefs, addings
def solve(la, nu_avg, N):
"""
:param la: lambda
:type la: float
:param nu_avg: nu vector
:type nu_avg: tuple
:param N: N
:type N int
"""
p = []
coefs, addings = _compute_coefs(la, nu_avg, N)
p.append(1.0 / sum(addings))
for i in xrange(0, N):
p_i = p[-1] * coefs[i]
p.append(p_i)
p = list(reversed(p))
return p
if __name__ == "__main__":
la = 0.1
nu_avg = (1, 3, 1, 2)
N = len(nu_avg)
p = solve(la, nu_avg, N)
pprint(p)
print "Sum(p) = %f" % sum(p)
|
tech-team/OpResearch
|
model1_right.py
|
Python
|
mit
| 800
|
"""link plate with analysis
Revision ID: 21ef5ce15822
Revises: 88fa93c68dab
Create Date: 2016-12-05 15:12:49.067536
"""
# revision identifiers, used by Alembic.
revision = "21ef5ce15822"
down_revision = "88fa93c68dab"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column("analysis", sa.Column("plate_id", sa.Integer(), nullable=True))
op.create_foreign_key(None, "analysis", "plate", ["plate_id"], ["id"])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, "analysis", type_="foreignkey")
op.drop_column("analysis", "plate_id")
### end Alembic commands ###
|
Clinical-Genomics/taboo
|
alembic/versions/21ef5ce15822_link_plate_with_analysis.py
|
Python
|
mit
| 799
|
import time
import numpy as np
import keras
import tensorflow as tf
import keras.backend as K
from keras import optimizers
from keras.models import load_model
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, Callback
from keras.models import Model
from keras.layers.recurrent import LSTM, GRU
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import Input, Merge, Masking
from keras.layers.wrappers import Bidirectional
from sklearn.utils import shuffle
from ModelHistoryCheckpointer import ModelHistoryCheckpointer
from PeriodicValidation import PeriodicValidation
from functions import calculate_top_k_new_only
class RecurrentModel:
"""
time_dim -> None or timesteps dimension size
output_length -> number of classes
input_dim -> X dataset length
attr_dim -> A dataset length
a_hidden_length -> hidden layer size for a_model branch
a_output_length -> output layer size for a_model branch
recurrent_dim -> recurrent layer size (for x_model branch)
rnn_architecture -> "lstm" or "gru"
go_direction -> 1 (forward), -1 (backward), 2 (bidirectional)
dropout_rate -> dropout rate
x_output_length -> output layer size for x_model branch
merged_data_dim -> last hidden layer size
"""
def __init__(self, time_dim,
output_length, input_dim, attr_dim,
a_hidden_length, a_output_length,
recurrent_dim, rnn_architecture, go_direction, dropout_rate, x_output_length,
merged_data_dim):
a_input = Input(shape=(attr_dim,))
a_model = Dense(a_hidden_length)(a_input)
a_model = LeakyReLU()(a_model)
a_model = Dense(a_output_length)(a_model)
a_model = LeakyReLU()(a_model)
x_input = Input(shape=(time_dim, input_dim))
x_model = Masking(mask_value=-1.0)(x_input)
RNN_Architecture = GRU if rnn_architecture == "gru" else LSTM
if go_direction in [-1, 1]:
x_model = RNN_Architecture(recurrent_dim, activation='tanh', recurrent_activation='hard_sigmoid',
return_sequences=False, go_backwards=(go_direction == -1))(x_model)
else: # go_direction == 2
x_model = Bidirectional(
RNN_Architecture(recurrent_dim, activation='tanh', recurrent_activation='hard_sigmoid', return_sequences=False),
merge_mode='concat')(x_model)
x_model = Dropout(dropout_rate)(x_model)
x_model = Dense(x_output_length)(x_model)
x_model = LeakyReLU()(x_model)
self.model = keras.layers.concatenate([a_model, x_model])
self.model = Dropout(dropout_rate)(self.model)
if merged_data_dim > 0:
self.model = Dense(merged_data_dim)(self.model)
self.model = LeakyReLU()(self.model)
self.model = Dense(output_length, activation='sigmoid')(self.model)
self.model = Model(inputs=[a_input, x_input], outputs=self.model)
self._a_model = a_model
self._x_model = x_model
def load_model_from_file(self, filename, custom_objects):
self.model = load_model(filename, custom_objects=custom_objects)
self._a_model = None
self._x_model = None
def compile(self, loss, optimizer, metrics):
self.model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
def train_on_batch(self, A_buckets, X_buckets, y_buckets, num_epochs, batch_size, save_models=True):
print("model.train_on_batch")
self.checkpointer = ModelHistoryCheckpointer(self.model) if save_models else None
for epoch in range(num_epochs):
stats_all = np.zeros(len(self.model.metrics)+1)
batches_count = 0
print("epoch: " + str(epoch) + " // " + time.strftime("%H:%M:%S", time.localtime()))
for A_train, X_train, y_train in zip(A_buckets, X_buckets, y_buckets):
A_train, X_train, y_train = shuffle(A_train, X_train, y_train)
batch_indices_or_sections = [i * batch_size for i in range(1, len(X_train) // batch_size)]
A_train_batches = np.array_split(A_train, batch_indices_or_sections)
X_train_batches = np.array_split(X_train, batch_indices_or_sections)
y_train_batches = np.array_split(y_train, batch_indices_or_sections)
for A_batch, X_batch, y_batch in zip(A_train_batches, X_train_batches, y_train_batches):
stats = self.model.train_on_batch(x=[A_batch, X_batch], y=y_batch)
stats_all = stats_all + stats
batches_count += 1
if(epoch % 10 == 9):
print(stats)
print(stats_all, batches_count)
if save_models:
self.checkpointer.save_on_epoch(self.model, epoch, stats_all, batches_count)
if save_models:
self.checkpointer.save_last(self.model, epoch, stats_all, batches_count)
def predict_on_batch(self, A_test_buckets, X_test_buckets, batch_size):
print("model.predict_on_batch")
y_test_pred = np.array([]).reshape(0, 24)
for A_test, X_test in zip(A_test_buckets, X_test_buckets):
if A_test.size > 0 and X_test.size > 0:
y_pred = self.model.predict([A_test, X_test], batch_size=batch_size)
y_test_pred = np.concatenate((y_test_pred, y_pred), axis=0)
return y_test_pred
def test_on_batch(self, A_test_buckets, X_test_buckets, y_test_buckets):
print("model.test_on_batch")
stats_all = np.zeros(len(self.model.metrics)+1)
examples_count = 0
for A_test, X_test, y_test in zip(A_test_buckets, X_test_buckets, y_test_buckets):
for A, X, y in zip(A_test, X_test, y_test):
stats = self.model.test_on_batch(x=[np.array([A]), np.array([X])], y=np.array([y])) # batch of 1
stats_all = stats_all + stats
examples_count += 1
print("--> ", stats_all, examples_count)
print(stats_all, examples_count)
print(stats_all / examples_count)
def train(self, A_train, X_train, y_train, num_epochs, batch_size, validation_data=None, save_models=True):
if type(X_train).__name__ == "list":
self.train_on_batch(A_train, X_train, y_train, num_epochs, batch_size, save_models)
else: # X_train is NumPy array
checkpoint_callback = ModelCheckpoint("./models/model_"+time.strftime("%m-%d_%H-%M", time.localtime())+".h5",
monitor="loss", save_best_only=True, verbose=1)
# lr_callback = ReduceLROnPlateau(monitor="loss",
# factor=0.5, patience=5, verbose=1, mode="auto", epsilon=0.0001, cooldown=0, min_lr=0.0001)
periodic_val_callback = PeriodicValidation(validation_data, batch_size,
("./models/model_val_"+time.strftime("%m-%d_%H-%M", time.localtime())+".h5") if save_models else None)
# callbacks = [lr_callback] + ([checkpoint_callback] if save_models else []) + ([periodic_val_callback] if validation_data else [])
callbacks = ([checkpoint_callback] if save_models else []) + ([periodic_val_callback] if validation_data else [])
h = self.model.fit([A_train, X_train], y_train, batch_size, num_epochs, validation_data=None, callbacks=callbacks, verbose=2)
print(h.params)
# print("training history: ", h.params, h.history)
def predict(self, A_test, X_test, batch_size):
if type(X_test).__name__ == "list":
return self.predict_on_batch(A_test, X_test, batch_size)
# X_test is NumPy array
return self.model.predict([A_test, X_test], batch_size)
def test(self, A_test, X_test, y_test, batch_size):
if type(X_test).__name__ == "list":
h = self.test_on_batch(A_test, X_test, y_test)
print("test_on_batch history: ", h)
else: # X_test is NumPy array
h = self.model.evaluate([A_test, X_test], y_test, batch_size=batch_size, verbose=1)
print("testing history: ", h)
#
|
DimiterM/santander
|
RecurrentModel.py
|
Python
|
mit
| 8,420
|
"""
Homework 3
Data Description:
Input the training and testing data and store it in an array. Training results in 284x31
and Testing results in 284x31. The data entries
consist of a first entry class label('1' for malignant and '-1' for benign) followed by 30
breast cancer diagnostic measurements.
"""
import numpy as np
import os
raw_training_data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'knn_data/knn_train.csv')
raw_testing_data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'knn_data/knn_test.csv')
raw_training_data = np.loadtxt(raw_training_data_path, dtype=float, delimiter=',')
raw_testing_data = np.loadtxt(raw_testing_data_path, dtype=float, delimiter=',')
def get_data_answers(input_array):
"""Split off the first column answers and put in a dummy attribute of 1's in the first
column.
"""
input_array_split = np.split(input_array, [1], axis=1)
data_features = np.array(input_array_split[1])
length = data_features.shape[0]
data_features = np.insert(data_features, 0, np.ones((1, length)), axis=1)
answers = np.array(input_array_split[0])
return data_features, answers
def get_by_row(answers, input_data):
"""Takes a column of input answers and an array of arrays of data and returns each answer,
data set pair.
"""
for row in np.concatenate((answers, input_data), axis=1):
split_row = np.split(row, [1])
answer = split_row[0][0]
features = split_row[1]
yield answer, features
def distance(vector_one, vector_two):
"""Calculates the distance between two numpy arrays."""
return np.linalg.norm(vector_one - vector_two)
def _get_k_nearest(test_entry, training_answers, training_data):
yield '_', float("infinity"), '_'
for current_answer, current_data in get_by_row(training_answers, training_data):
yield current_data, distance(test_entry, current_data), current_answer
def get_k_nearest(k, test_entry, training_answers, training_data):
all_nearest = list(_get_k_nearest(test_entry, training_answers, training_data))
all_nearest.sort(key=lambda l: l[1])
return all_nearest[:k]
def knn_classifier(k, test_entry, training_answers, training_data):
"""Classifies a test entry using k nearest neighbor classification against training data."""
k_nearest_so_far = get_k_nearest(k, test_entry, training_answers, training_data)
class_sum = 0
for entry in k_nearest_so_far:
class_sum += entry[2]
if class_sum > 0:
return 1
else:
return -1
def pull_one_out(answers, input_data):
"""Takes a column of input answers and an array of arrays of data and returns each answer,
data set pair along with the answer, data set arrays that result from having pulled those
single entries out.
"""
number_of_rows = input_data.shape[0]
for index in range(number_of_rows):
remaining_answers = np.concatenate((answers[:index], answers[index + 1:]), axis=0)
remaining_data = np.concatenate((input_data[:index], input_data[index + 1:]), axis=0)
yield answers[index], input_data[index], remaining_answers, remaining_data
def testing_k_values(training_answers, training_data, testing_data, testing_answers):
"""Measures performance using percentage error on leave-one-out cross validation using k
nearest neighbors classification.
"""
for k in range(1, 17, 2):
training_errors = 0
leave_one_out_errors = 0
test_data_errors = 0
# Training Error
for data, answer in zip(training_data, training_answers):
estimation = knn_classifier(k, data, training_answers, training_data)
if estimation != answer:
training_errors += 1
# Cross Validation Error
for verify_answer, verify_data, remaining_answers, remaining_data in pull_one_out(
training_answers, training_data):
estimation = knn_classifier(k, verify_data, remaining_answers, remaining_data)
if estimation != verify_answer:
leave_one_out_errors += 1
# Test data error
for data, answer in zip(testing_data, testing_answers):
estimation = knn_classifier(k, data, training_answers, training_data)
if estimation != answer:
test_data_errors += 1
yield k, training_errors, leave_one_out_errors, test_data_errors
def main():
training_data, training_answers = get_data_answers(raw_training_data)
testing_data, testing_answers = get_data_answers(raw_testing_data)
for line in testing_k_values(training_answers, training_data, testing_data, testing_answers):
print("{},{},{},{}".format(*line))
if __name__ == "__main__":
main()
|
andychase/classwork
|
machine_learning/hmw3/HW3.py
|
Python
|
mit
| 4,807
|
import os
import platform
import unittest
import pytest
from conans.model.ref import ConanFileReference
from conans.test.utils.tools import TestClient, GenConanfile
class RMdirFailTest(unittest.TestCase):
@pytest.mark.skipif(platform.system() != "Windows", reason="needs windows")
def test_fail_rmdir(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . MyPkg/0.1@lasote/testing")
ref = ConanFileReference.loads("MyPkg/0.1@lasote/testing")
builds = client.cache.package_layout(ref).builds()
build_folder = os.listdir(builds)[0]
build_folder = os.path.join(builds, build_folder)
f = open(os.path.join(build_folder, "myfile"), "wb")
f.write(b"Hello world")
client.run("install MyPkg/0.1@lasote/testing --build", assert_error=True)
self.assertIn("Couldn't remove folder, might be busy or open", client.out)
|
conan-io/conan
|
conans/test/integration/cache/rmdir_fail_test.py
|
Python
|
mit
| 948
|
"""Example isosurface visualiser.
press 't' to toggle isosurface on and off
"""
import sys
import logging
import types
from renderer import BaseGlutWindow, IsosurfaceVolumeRenderer
from parser.tiff_parser import open_tiff
class ExampleIsosurfaceVisualiser(BaseGlutWindow):
def load_image(self, fpath, spacing):
self.renderer = IsosurfaceVolumeRenderer()
stack = open_tiff(fpath)
self.renderer.make_volume_obj(stack, spacing)
def draw_hook(self):
self.renderer.render(self.width, self.height, self.VMatrix, self.PMatrix)
def reshape_hook(self):
self.renderer.init_back_texture(self.width, self.height)
pass
def toggle_volume(self, x, y):
self.renderer.volume_objects[0].active = not self.renderer.volume_objects[0].active
def main():
logging.basicConfig(level=logging.DEBUG)
r = ExampleIsosurfaceVisualiser("Example Isosurface Visualiser", 800, 600)
if len(sys.argv) >= 5:
spacing = map(float, sys.argv[2:5])
else:
spacing = (1.0, 1.0, 1.0)
r.load_image(sys.argv[1], spacing)
r.toggle_volume = types.MethodType(toggle_volume, r)
r.key_bindings["t"] = r.toggle_volume
r.start()
if __name__ == '__main__':
main()
|
jfozard/pyvol
|
pyvol/example_isosurface_visualiser.py
|
Python
|
mit
| 1,241
|
#!/usr/bin/env python3
import numpy as np
from numpy.core.umath_tests import matrix_multiply as _matrix_multiply
# TODO: Write unit tests for all of these helper functions.
def SphPosToCart(vectors, radians=False):
"""Convert a spherical position vector into Cartesian position.
Arguments:
vector -- A 3-element NumPy array representing, in order,
the spherical r, theta, and phi position coordinates.
Returns a 3-element NumPy array representing, in order, the
representative Cartesian x, y, and z coordinates of the input vector.
"""
if vectors.ndim == 1:
if len(vectors) != 3:
print("ERROR - SphPosToCart(): Vector not a 3-dimensional vector! Aborting.")
return
elif vectors.ndim > 2:
print("ERROR - SphPosToCart(): Only handles a list of 3D vectors \
(2-dimensional array). Aborting.")
return
if vectors.ndim == 1:
r, theta, phi = vectors
elif vectors.ndim == 2:
r = vectors[:,0]
theta = vectors[:,1]
phi = vectors[:,2]
if not radians:
theta = np.radians(theta % 360)
phi = np.radians(phi % 360)
result = np.array([r * np.sin(theta) * np.cos(phi),
r * np.sin(theta) * np.sin(phi),
r * np.cos(theta)])
# Transpose only has an effect for arrays of vectors, but puts vectors into
# rows not columns, the way it should be.
return result.T
def CartPosToSph(vectors, radians=False):
"""Convert a Cartesian position vector into spherical coordinate space.
NOTE: Largely untested...
Arguments:
vectors -- A 3-element NumPy array representing, in order,
Cartesian x, y, and z position coordinates.
Returns a 3-element NumPy array representing, in order, the
spherical r, theta, and phi position coordinates of the input vector.
"""
if vectors.ndim == 1:
if len(vectors) != 3:
print("ERROR - CartPosToSph(): Vector not a 3-dimensional vector! Aborting.")
return
elif vectors.ndim > 2:
print("ERROR - CartPosToSph(): Only handles a list of 3D vectors \
(2-dimensional array). Aborting.")
return
if vectors.ndim == 1:
x, y, z = vectors
elif vectors.ndim == 2:
x = vectors[:,0]
y = vectors[:,1]
z = vectors[:,2]
r = np.sqrt(x**2+y**2+z**2)
theta = np.arctan2(np.sqrt(x**2+y**2), z)
phi = np.arctan2(y, x)
if not radians:
theta = np.degrees(theta % (2*np.pi))
phi = np.degrees(phi % (2*np.pi))
result = np.array([r, theta, phi])
# Transpose only has an effect for arrays of vectors, but puts vectors into
# rows not columns, the way it should be.
return result.T
def SphVecToCart(position, vector, radians=False):
"""Convert spherical vectors into Cartesian vector space.
Takes spherical-space vectors and their corresponding spherical-
space positions and returns the magnitude of the vectors in x, y,
and z Cartesian directions.
Arguments:
position -- A 3-element NumPy array, or array of arrays, representing
the position of the vector(s) in spherical space.
vector -- A 3-element NumPy array, or array of arrays, representing
the vector(s) to be converted, also in spherical space.
Returns a 3-element NumPy array representing the magnitude of
the input vector in Cartesian vector space.
"""
if len(position) != len(vector):
print("ERROR - SphVecToCart(): \
Vector and position arrays must have the same length! Aborting.")
return
if position.ndim == 1 and vector.ndim == 1:
if len(position) == 3:
r, theta, phi = position
else:
print("ERROR - SphVecToCart(): \
Vectors and positions must each have three elements! Aborting.")
return
elif position.ndim == 2 and vector.ndim == 2:
# Maybe an error-checking thing for 3-element vectors like above?
r, theta, phi = position[:,[0,1,2]].T
else:
print("ERROR - SphVecToCart(): \
Vector and position arrays must have the same dimensions, or must \
be either 1D or 2D arrays! Aborting.")
return
if not radians:
theta = np.radians(theta % 360)
phi = np.radians(phi % 360)
# Calculating x-hat, y-hat, and z-hat from r-hat, theta-hat, and phi-hat
transform_matrix = np.array([
[np.sin(theta) * np.cos(phi), np.sin(theta) * np.sin(phi), np.cos(theta)],
[np.cos(theta) * np.cos(phi), np.cos(theta) * np.sin(phi),-np.sin(theta)],
[-np.sin(phi), np.cos(phi), np.zeros_like(theta)]
])
# Do the dot products!
return np.squeeze(_matrix_multiply(transform_matrix.T, vector[...,None]))
def RotX(vector, angle, radians=False):
"""Rotate a Cartesian vector by a given angle about the +x axis.
NOTE: Probably needs to be re-written (to be like Rot()) for accepting
arrays of vectors.
This function rotates a given vector about the Cartesian +x axis.
The rotation is in a right-handed sense; positive angles rotate
from the +y axis toward the +z axis.
Arguments:
vector -- A 3-element NumPy array to be rotated.
angle -- The angle by which the input vector will be rotated.
Returns a 3-element NumPy array representing the rotated vector.
"""
if not radians:
angle = np.radians(angle % 360)
R_X = np.array([[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)]])
return R_X.dot(vector)
def RotY(vector, angle, radians=False):
"""Rotate a Cartesian vector by a given angle about the +y axis.
NOTE: Probably needs to be re-written (to be like Rot()) for accepting
arrays of vectors.
This function rotates a given vector about the Cartesian +y axis.
The rotation is in a right-handed sense; positive angles rotate
from the +z axis toward the +x axis.
Arguments:
vector -- A 3-element NumPy array to be rotated.
angle -- The angle by which the input vector will be rotated.
Keywords:
radians -- Whether 'angle' is in radians (True) or degrees (False; default).
Returns a 3-element NumPy array representing the rotated vector.
"""
if not radians:
angle = np.radians(angle % 360)
R_Y = np.array([[ np.cos(angle), 0, np.sin(angle)],
[ 0, 1, 0],
[-np.sin(angle), 0, np.cos(angle)]])
return R_Y.dot(vector)
def RotZ(vector, angle, radians=False):
"""Rotate a Cartesian vector by a given angle about the +z axis.
NOTE: Probably needs to be re-written (to be like Rot()) for accepting
arrays of vectors.
This function rotates a given vector about the Cartesian +z axis.
The rotation is in a right-handed sense; positive angles rotate
from the +x axis toward the +y axis.
Arguments:
vector -- A 3-element NumPy array to be rotated.
angle -- The angle by which the input vector will be rotated.
Keywords:
radians -- Whether 'angle' is in radians (True) or degrees (False; default).
Returns a 3-element NumPy array representing the rotated vector.
"""
if not radians:
angle = np.radians(angle % 360)
R_Z = np.array([[ np.cos(angle), -np.sin(angle), 0],
[ np.sin(angle), np.cos(angle), 0],
[ 0, 0, 1]])
return R_Z.dot(vector)
def Rot(vectors, x=0., y=0., z=0., radians=False):
"""Rotate Cartesian vectors.
This function rotates input vectors about the Cartesian axes.
The rotation is in a right-handed sense.
Arguments:
vector -- A NumPy array of vectors to be rotated.
Keywords:
x -- The angle to rotate about the x-axis.
y -- The angle to rotate about the y-axis.
z -- The angle to rotate about the z-axis.
radians -- Whether the above angles are given in radians (True)
or degrees (False; default).
Returns a NumPy array representing the rotated vectors.
"""
if vectors.ndim == 1:
if len(vectors) != 3:
print("ERROR - Rot(): Vector not a 3-dimensional vector! Aborting.")
return
elif vectors.ndim > 2:
print("ERROR - Rot(): Only handles a list of 3D vectors (2-dimensional array). \
Aborting.")
return
if not radians:
x = np.radians(x % 360)
y = np.radians(y % 360)
z = np.radians(z % 360)
R_X = np.matrix([[ 1, 0, 0],
[ 0, np.cos(x), -np.sin(x)],
[ 0, np.sin(x), np.cos(x)]])
R_Y = np.matrix([[ np.cos(y), 0, np.sin(y)],
[ 0, 1, 0],
[-np.sin(y), 0, np.cos(y)]])
R_Z = np.matrix([[ np.cos(z), -np.sin(z), 0],
[ np.sin(z), np.cos(z), 0],
[ 0, 0, 1]])
R = R_Z * R_Y * R_X
if vectors.ndim == 1: # A single vector
result = R.dot(vectors).A1 # Return result as flattened array.
elif vectors.ndim == 2: # A list of vectors
result = R.dot(vectors.T).T.A
return result
def CylPosToCart(vector):
"""Convert a cylindrical position vector into Cartesian position.
NOTE: Haven't really used this, so it might not be great.
Arguments:
vector -- A 3-element NumPy array representing, in order,
the cylindrical r, theta, and z position coordinates.
Returns a 3-element NumPy array representing, in order, the
representative Cartesian x, y, and z coordinates of the input vector.
"""
if len(vector) != 3:
print("WARNING - CylPosToCart(): Not a 3-dimensional vector!")
r, phi, z = vector
phi = np.radians(phi)
return np.array([r * np.cos(phi),
r * np.sin(phi),
z])
def CartPosToCyl(vector):
"""Convert a Cartesian position vector into cylindrical coordinates.
NOTE: Haven't really used this, so it might not be great.
Arguments:
vector -- A 3-element NumPy array representing, in order,
Cartesian x, y, and z position coordinates.
Returns a 3-element NumPy array representing, in order, the
cylindrical r, theta, and z position coordinates of the input vector.
"""
if len(vector) != 3:
print("WARNING - CartPosToCyl(): Not a 3-dimensional vector!")
x, y, z = vector[:3]
r = np.sqrt(x**2+y**2)
phi = np.degrees(np.arctan2(y, x))
return np.array([r, phi, z])
def CylVecToCart(position, vector):
"""Convert a cylindrical vector into Cartesian vector space.
N.B.: Not optimized! See SphVecToCart() for a better way to do this.
Takes a cylindrical-space vector and its corresponding cylindrical-
space position and returns the magnitude of the vector in x, y,
and z Cartesian directions.
Arguments:
position -- A 3-element NumPy array, representing the position
of the vector in cylindrical space.
vector -- A 3-element NumPy array, representing the vector to
be converted, also in cylindrical space.
Returns a 3-element NumPy array representing the magnitude of
the input vector in Cartesian vector space.
"""
if len(position) != 3:
print("WARNING - CylVecToCart(): Position not a 3-dimensional vector!")
if len(vector) != 3:
print("WARNING - CylVecToCart(): Vector not a 3-dimensional vector!")
r, phi, z = position
phi = np.radians(phi % 360)
r_hat = np.array([np.cos(phi), #x_hat
np.sin(phi), #y_hat
0]) #z_hat
phi_hat = np.array([-np.sin(phi), #x_hat
np.cos(phi), #y_hat
0]) #z_hat
z_hat = np.array([0, #x_hat
0, #y_hat
1]) #z_hat
transform_matrix = np.array([r_hat, phi_hat, z_hat])
return vector.dot(transform_matrix)
def RotCurve(vel, radius, C=0.3, p=1.35):
"""Create an analytic disk galaxy rotation curve.
Arguments:
vel -- The approximate maximum circular velocity.
radius -- The radius (or radii) at which to calculate the
rotation curve.
Keywords:
C -- Controls the radius at which the curve turns over,
in the same units as 'radius'.
p -- Controls the fall-off of the curve after the turn-over;
values expected to be between 1 and 1.5 for disks.
Returns the value of the rotation curve at the given radius.
See Bertola et al. 1991, ApJ, 373, 369 for more information.
"""
C_ = C # kpc
p_ = p
return vel * radius / ((radius**2 + C_**2)**(p_/2.))
|
fourwood/OutflowCone
|
Helpers.py
|
Python
|
mit
| 13,229
|
"""Python API for talking to Bondora.com.
Bondora API Docs:
https://api.bondora.com/Intro
"""
import sys
import requests
import bondoraapi.account
import json
import logging
import datetime
import time
class Api(object):
def __init__(self, storage):
self.bondora_base_url = "https://api.bondora.com"
self.a = bondoraapi.account.Account()
self.token = self.a.token
self.storage = storage
def translate_status_code_to_string(self, status_code):
"""Convert status code from integer to a human readable string."""
known_status_codes = {0: "Pending",
1: "Open",
2: "Successful",
3: "Failed",
4: "Cancelled",
5: "Accepted"}
if status_code in known_status_codes:
return known_status_codes[status_code]
else:
return "Unknown code: {}".format(status_code)
def make_post_request(self, request_url, content):
full_url = "{}/{}".format(self.bondora_base_url, request_url)
headers = {"Authorization": "Bearer {}".format(self.token),
"Content-Type": "application/json"}
return requests.post(full_url,
headers=headers,
data=json.dumps(content))
def make_bid(self, auction_id, bid_size=5):
url = "/api/v1/bid"
logging.debug("Making bid for auction: {}, bid size: {}".format(
auction_id, bid_size))
# Create a bid JSON payload
bid = '''
{{
"Bids":
[
{{
"AuctionId": "{auction_id}",
"Amount": {bid_size},
"MinAmount": 5.0
}}
]
}}
'''
bid = json.loads(bid.format(auction_id=auction_id,
bid_size=float(bid_size)))
response = self.make_post_request(url, bid)
if response.status_code == 202:
response_json = response.json()
if response_json["Success"]:
return response_json["Payload"]
else:
print "Request was not successfull"
print response_json
else:
print "Unexpected status code {}".format(response.status_code)
return False
def make_get_request(self, request_url):
full_url = "{}/{}".format(self.bondora_base_url, request_url)
headers = {"Authorization": "Bearer {}".format(self.token)}
while True:
try:
response = requests.get(full_url, headers=headers)
except Exception, e:
self.storage.save("last_failure", datetime.datetime.now())
if "Connection reset by peer" in e:
logging.warning("Connection reset by peer. Will sleep and retry")
time.sleep(10)
else:
logging.critical("Exception, while making a GET request.")
logging.critical(e)
print "Request failed. Check logs for details."
sys.exit(1)
# Handle bad responses. Sort of.
if not response.ok:
logging.warning("Bad response")
logging.warning(response.json())
time.sleep(1)
else:
logging.debug("Request to %s was successfull", request_url)
break # exit the loop after success response
# At this point we have OK response
response_json = response.json()
if not response_json['Success']:
print "We have response, but it's not a success"
sys.exit(1)
return response_json['Payload']
def get_balance(self):
url = "/api/v1/account/balance"
return self.make_get_request(url)
def get_auctions(self):
url = "/api/v1/auctions"
return self.make_get_request(url)
def get_auction(self, auction_id):
url = "/api/v1/auction/{auction_id}".format(auction_id=auction_id)
return self.make_get_request(url)
def get_bids(self, count=10):
""" Return a list of bids, sorted by bid date"""
url = "/api/v1/bids"
bids = self.make_get_request(url)
# sort bids by 'BidRequestedDate'
sorted_bids = sorted(bids, key=lambda item: item['BidRequestedDate'])
return sorted_bids[-count:]
def get_investments(self, count=10):
url = "/api/v1/account/investments"
investments = self.make_get_request(url)
sorted_investments = sorted(investments,
key=lambda item: item['PurchaseDate'])
return sorted_investments[-count:]
|
fxlv/bondora
|
bondoraapi/api.py
|
Python
|
mit
| 4,882
|
from __future__ import division, print_function, absolute_import
import pkg_resources
from turgles.geometry import SHAPES
from turgles.gl.api import (
GL_STATIC_DRAW,
GL_TRIANGLES,
GLfloat,
glGetAttribLocation,
glDrawArrays,
)
from turgles.renderer import Renderer
from turgles.gl.buffer import VertexBuffer
from turgles.util import measure
from turgles import memory
BATCH_SIZE = 35 # see shader
class ESTurtleShapeRenderer(object):
"""A Renderer for rendering mutliple versions of a specific turtle shape.
Creates vertex/index/model arrays, and can render them given turtle
data."""
def __init__(self, name, program, geometry):
self.name = name
self.program = program
self.geometry = geometry
# size of batched draw calls
self.batch = BATCH_SIZE
self.vertex_attr = glGetAttribLocation(self.program.id, b"vertex")
self.edge_attr = glGetAttribLocation(self.program.id, b"edge")
self.index_attr = glGetAttribLocation(self.program.id, b"index")
# load/bind/configure vertex buffer
self.vertex_buffer = VertexBuffer(GLfloat, GL_STATIC_DRAW)
batched_edges = list(geometry.edges) * self.batch
self.vertex_buffer.load(memory.create_vertex_buffer(batched_edges))
self.vertex_buffer.partition(
[(self.vertex_attr, 4), (self.edge_attr, 3)]
)
uniform_indicies = []
for i in range(self.batch):
uniform_indicies.extend([i] * geometry.num_vertex)
indices_buffer = memory.create_vertex_buffer(uniform_indicies)
self.indices_buffer = VertexBuffer(GLfloat, GL_STATIC_DRAW)
self.indices_buffer.load(indices_buffer)
self.indices_buffer.set(self.index_attr, 1)
def render(self, model, color, num_turtles):
self.program.bind()
# no VAOs so have to set manually
self.vertex_buffer.partition(
[(self.vertex_attr, 4), (self.edge_attr, 3)]
)
self.indices_buffer.set(self.index_attr, 1)
model_uniform = self.program.uniforms['turtle_model_array[0]']
color_uniform = self.program.uniforms['turtle_color_array[0]']
model_iter = model.slice(self.batch)
color_iter = color.slice(self.batch)
slices = zip(model_iter, color_iter)
with measure("loop"):
for (msize, model_slice), (csize, color_slice) in slices:
assert msize == csize
# load batch of turtle data
with measure('load'):
model_uniform.set(model_slice, size=msize)
color_uniform.set(color_slice, size=msize)
with measure('draw'):
glDrawArrays(
GL_TRIANGLES,
0,
len(self.geometry.edges) // 7 * msize,
)
self.vertex_buffer.unbind()
self.program.unbind()
class ES2Renderer(Renderer):
vertex_shader = pkg_resources.resource_string(
'turgles', 'shaders/turtles_es.vert').decode('utf8')
fragment_shader = pkg_resources.resource_string(
'turgles', 'shaders/turtles.frag').decode('utf8')
def setup_vaos(self):
self.program.bind()
self.vao = {}
for shape, geom in SHAPES.items():
self.vao[shape] = ESTurtleShapeRenderer(shape, self.program, geom)
|
AllTheWayDown/turgles
|
turgles/es_renderer.py
|
Python
|
mit
| 3,423
|
dia,mes,ano =input("data: ").split('/')
ms = '''Janeiro Fevereiro Março Abril Maio Junho Julho Agosto Setembro Outubro Novembro Dezembro'''.split()
print('Você nasceu em %s de %s de %s' % (dia, ms[int(mes)-1], ano))
|
andersonsilvade/python_C
|
Python32/aulas/data.py
|
Python
|
mit
| 219
|
"""
Modules to Set default parameters:
W.T. Franks FMP Berlin
"""
import de.bruker.nmr.mfw.root as root
import math
#import os
import sys
import TopCmds
import IntShape
import CPDtools
ret=u"\u000D"
spc=u"\u0020"
def name_confirm():
adbname=pul.GetPar('sCadb',"")
if adbname == "gauss" : adbname = "TanhTan"
SP=pul.pulDict['sCadb']
Wave = str(TopCmds.INPUT_DIALOG("Adiabatic TOBSY", "",\
["SP File = ","SP Wave =",],\
[adbname,SP],["",""],["1","1"],["Accept","Close"], [spc,ret], 30))
if Wave == None:TopCmds.EXIT()
Files = Wave[8:len(Wave)-21] #get rid of Java formatting
i = Files.find(",")
File = Files[0:i-1]
SP = Files[i+3:]
pul.SetPar(SP,str(File),"")
pul.SetPar('sCadb',File,"")
def CalC931_TOBSY(units):
p90C=pul.GetPar('pC90',"")
ampC=pul.GetPar('aC',units)
MAS =pul.GetPar('MAS',"")
Loop=pul.GetPar('lTOBSY',"")
if Loop == 0: Loop = 25
if units == "W":
ampC=WtodB(ampC)
MaxB1 = 1000000./4./p90C
C9B1 = 6.0*MAS
adjust=20*(math.log10(C9B1/MaxB1))
condition=ampC-adjust
if units == "W":
condition=dBtoW(condition)
pul.SetPar('aCc9',condition,"units")
Loop=pul.GetPar('lTOBSY',Loop,"")
def CalC961_TOBSY(unit):
p90C=pul.GetPar('pC90',"")
ampC=pul.GetPar('aC',units)
MAS =pul.GetPar('MAS',"")
Loop=pul.GetPar('lTOBSY',"")
if Loop == 0: Loop = 25
if units == "W":
ampC=WtodB(ampC)
MaxB1 = 1000000./4./p90C
C9B1 = 3.0*MAS
adjust=20*(math.log10(C9B1/MaxB1))
condition=ampC-adjust
if units == "W":
condition=dBtoW(condition)
pul.SetPar('aCc9',condition,"units")
Loop=pul.GetPar('lTOBSY',Loop,"")
def CalC542_adb_TOBSY():
p90C=pul.GetPar('pC90',"")
ampC=pul.GetPar('aC',units)
MAS =pul.GetPar('MAS',"")
SP = pul.GetPar('sCabd',"")
Loop=pul.GetPar('lTOBSY',"")
AvgAmp=IntShape.Integrate(RAMP)/100.
if Loop == 0: Loop = 25
if units == "W":
ampC=WtodB(ampC)
MaxB1 = 1000000./4./p90C
C542B1= MAS*0.8/AvgAmp
adjust=20*(math.log10(C542B1/MaxB1))
condition=ampC-adjust
if units == "W":
condition=dBtoW(condition)
pul.SetPar('aCadb',condition,"units")
Loop=pul.GetPar('lTOBSY',Loop,"")
|
TrentFranks/ssNMR-Topspin-Python
|
modules/TOBSY.py
|
Python
|
mit
| 2,225
|
#!/usr/bin/python
import urllib
def main():
# url = 'https://screener.finance.yahoo.com/stocks.html'
url = 'https://screener.finance.yahoo.com/b?sc=&im=&prmin=0&prmax=&mcmin=&mcmax=&dvymin=0&dvymax=&betamin=&betamax=&remin=&remax=&pmmin=&pmmax=&pemin=&pemax=&pbmin=&pbmax=&psmin=&psmax=&pegmin=&pegmax=&gr=&grfy=&ar=&vw=1&db=stocks'
html = urllib.urlopen(url)
print "%s" % html.read()
main()
|
jtraver/dev
|
python/urllib/urllib1.py
|
Python
|
mit
| 411
|
import os
import numpy
import meshplex
from pynosh import magnetic_vector_potentials as mvp
def _run(filename, control_values):
"""Test $\int_{\Omega} A^2$."""
# read the mesh
mesh, _, _, _ = meshplex.read(filename)
if mesh.control_volumes is None:
mesh.compute_control_volumes()
tol = 1.0e-10
A = mvp.constant_field(mesh.node_coords, numpy.array([0, 0, 1]))
integral = numpy.sum(mesh.control_volumes * numpy.sum(A ** 2, axis=1))
assert numpy.all(numpy.abs(control_values["z"] - integral) < tol)
# If this is a 2D mesh, append the z-component 0 to each node
# to make sure that the magnetic vector potentials can be
# calculated.
points = mesh.node_coords.copy()
if points.shape[1] == 2:
points = numpy.column_stack((points, numpy.zeros(len(points))))
A = mvp.magnetic_dipole(
points, x0=numpy.array([0, 0, 10]), m=numpy.array([0, 0, 1])
)
integral = numpy.sum(mesh.control_volumes * numpy.sum(A ** 2, axis=1))
assert numpy.all(numpy.abs(control_values["dipole"] - integral) < tol)
# import time
# start = time.time()
A = mvp.magnetic_dot(mesh.node_coords, 2.0, [10.0, 11.0])
# A = numpy.empty((len(points), 3), dtype=float)
# for k, node in enumerate(points):
# A[k] = mvp.magnetic_dot(node[0], node[1], 2.0, 10.0, 11.0)
# end = time.time()
# print end-start
integral = numpy.sum(mesh.control_volumes * numpy.sum(A ** 2, axis=1))
assert numpy.all(numpy.abs(control_values["dot"] - integral) < tol)
return
def test_rectanglesmall():
this_path = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(this_path, "rectanglesmall.e")
control_values = {
"z": 63.125,
"dipole": 0.00012850741240854054,
"dot": 0.015062118041804408,
}
_run(filename, control_values)
return
def test_pacman():
this_path = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(this_path, "pacman.e")
control_values = {
"z": 3730.2268660993054,
"dipole": 0.0037630906971841487,
"dot": 0.46680832033437036,
}
_run(filename, control_values)
return
def test_cubesmall():
this_path = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(this_path, "cubesmall.e")
control_values = {
"z": 1.25,
"dipole": 0.00015098959555300608,
"dot": 0.00052723843169109191,
}
_run(filename, control_values)
return
def test_brick():
this_path = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(this_path, "brick-w-hole.e")
control_values = {
"z": 1687.6928071551067,
"dipole": 0.014339810567783946,
"dot": 0.4275090788990229,
}
_run(filename, control_values)
return
if __name__ == "__main__":
test_pacman()
|
nschloe/pynosh
|
test/test_mvp.py
|
Python
|
mit
| 2,871
|
#!/usr/bin/python3
"""
Copyright (c) 2018 Bill Peterson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
import re
import yaml
from subprocess import call, check_output
import stompboxpi as SB
def midi_connect():
mididevs={}
for client in check_output(['aconnect','-i']).split(b'client'):
x=re.match(b" (\d+:) '([ \w]*)'",client)
if not x:
continue
ports=re.findall(b"\n +(\d+) '",client)
mididevs[x.group(2)]=[x.group(1)+p for p in ports]
del mididevs[b'System']
del mididevs[b'Midi Through']
for d in mididevs.keys():
for p in mididevs[d]:
call(['aconnect', p, fluidport])
def midi_route(type, par1=[], par2=[], chan=[]):
fluid.router_begin(type)
if chan:
fluid.router_chan(*chan)
if par1:
fluid.router_par1(*par1)
if par2:
fluid.router_par2(*par2)
fluid.router_end()
def set_chorus_reverb():
try:
if 'chorus_nr' in bank:
fluid.set_chorus_nr(bank['chorus_nr'])
if 'chorus_level' in bank:
fluid.set_chorus_level(bank['chorus_level'])
if 'chorus_depth' in bank:
fluid.set_chorus_depth(bank['chorus_depth'])
if 'chorus_speed' in bank:
fluid.set_chorus_speed(bank['chorus_speed'])
if 'chorus_type' in bank:
fluid.set_chorus_type(bank['chorus_type'])
if 'reverb_roomsize' in bank:
fluid.set_reverb_roomsize(bank['reverb_roomsize'])
if 'reverb_damping' in bank:
fluid.set_reverb_damping(bank['reverb_damping'])
if 'reverb_level' in bank:
fluid.set_reverb_level(bank['reverb_level'])
if 'reverb_width' in bank:
fluid.set_reverb_width(bank['reverb_width'])
except NameError:
if 'chorus_nr' in bank:
fluid.set_chorus(nr=bank['chorus_nr'])
if 'chorus_level' in bank:
fluid.set_chorus(level=bank['chorus_level'])
if 'chorus_depth' in bank:
fluid.set_chorus(depth=bank['chorus_depth'])
if 'chorus_speed' in bank:
fluid.set_chorus(speed=bank['chorus_speed'])
if 'chorus_type' in bank:
fluid.set_chorus(type=bank['chorus_type'])
if 'reverb_roomsize' in bank:
fluid.set_reverb(roomsize=bank['reverb_roomsize'])
if 'reverb_damping' in bank:
fluid.set_reverb(damping=bank['reverb_damping'])
if 'reverb_level' in bank:
fluid.set_reverb(level=bank['reverb_level'])
if 'reverb_width' in bank:
fluid.set_reverb(width=bank['reverb_width'])
def select_sfpreset(p):
(n,bank,prog)=p
fluid.program_select(0,sfid,bank,prog)
for ch in range(1,16):
fluid.program_unset(ch)
fluid.router_clear()
fluid.router_default()
def load_soundfont(f):
global sfid, sfpresets
if sfid:
fluid.sfunload(sfid,1)
sfid=fluid.sfload(f)
sfpresets=[]
for b in range(129): # banks can go higher, but we'd be here all day
for p in range(128):
name=fluid.sfpreset_name(sfid,b,p)
if not name:
continue
sfpresets.append((name,b,p))
def select_patch(p):
fluid.router_clear()
if 'router_rules' in bank:
for rule in bank['router_rules']:
midi_route(**rule)
if 'router_rules' in p:
for rule in p['router_rules']:
if rule=='default':
fluid.router_default()
elif rule=='clear':
fluid.router_clear()
else:
midi_route(**rule)
if 'rule' not in locals():
fluid.router_default()
for ch in range(16):
if ch not in p:
continue
x=fluid.program_select(ch,
sfids[p[ch]['soundfont']],
p[ch]['bank'],
p[ch]['program'])
if 'cc' in p[ch]:
for cc,val in p[ch]['cc'].items():
fluid.cc(ch,cc,val)
def update_patch(p):
for ch in range(16):
(sfid,bank,prog)=fluid.program_info(ch)
if sfid not in sfids.values():
continue
if ch not in p:
p[ch]={}
for sf in sfids.keys():
if sfids[sf]==sfid:
p[ch]['soundfont']=sf
break
p[ch]['bank']=bank
p[ch]['program']=prog
# check for any nonzero cc values, avoid those reserved for special functions
for (f,l) in [(1,3),(5,5),(7,31),(33,63),(65,65),(70,83),(85,95),(102,119)]:
for cc in range(f,l+1):
val=fluid.get_cc(ch,cc)
if val>0: # 0 --> cc not changed
if 'cc' not in p[ch]:
p[ch]['cc']={}
p[ch]['cc'][cc]=val
write_bank()
def add_patch(name='noname'):
global patches,pno
pnew=len(patches)
patches.append({'name':name})
if 'router_rules' in patches[pno]:
patches[pnew]['router_rules']=[]
for rule in patches[pno]['router_rules']:
patches[pnew]['router_rules'].append(dict(rule))
update_patch(patches[pnew])
def load_bank(file):
global bank, sfids
try:
f=open(file)
x = yaml.safe_load(f)
except yaml.YAMLError or IOError:
return False
f.close()
sfused=[]
bank=x
for p in bank['patches']:
for ch in range(16):
if ch not in p:
continue
sf=p[ch]['soundfont']
if sf not in sfused:
sfused.append(sf)
for sf in list(sfids.keys()):
if sf not in sfused:
fluid.sfunload(sfids[sf],1)
del sfids[sf]
for sf in sfused:
if sf not in sfids:
if 'soundfonts' in bank and sf in bank['soundfonts']:
id=fluid.sfload(bank['soundfonts'][sf])
else:
id=fluid.sfload(sf)
sfids[sf]=id
set_chorus_reverb()
fluid.setting('synth.gain', bank.get('gain',0.5))
return True
def write_bank(newbank=''):
if not newbank:
newbank=config['currentbank']
try:
f = open(newbank,'w')
yaml.dump(bank,f)
except yaml.YAMLError or IOError:
return False
f.close()
return True
def write_config():
try:
f = open('/home/pi/squishbox_settings.yaml','w')
yaml.dump(config,f)
except yaml.YAMLError or IOError:
return False
f.close()
return True
def soundfont_menu():
global config, patches, sfids, sfid
k=SB.choose_opt(['Save as Patch','Exit Soundfont'],1)
if k==0:
newsfid=fluid.sfload(config['soundfont'])
sfids[config['soundfont']]=newsfid
pnew=len(patches)
patches.append({'name':sfpresets[sfp][0]})
patches[pnew][0]={'soundfont': config['soundfont'],
'bank': sfpresets[sfp][1],
'program': sfpresets[sfp][2]}
write_bank()
if k==1:
config['soundfont']=''
fluid.sfunload(sfid)
sfid=0
select_patch(patches[pno])
write_config()
def patch_menu():
global patches, pno
k=SB.choose_opt(['Update Patch','Save New Patch','Rename Patch','Delete Patch'],1)
if k==0:
update_patch(patches[pno])
SB.lcd_message("updated! ",1)
SB.waitforrelease(1)
elif k==1:
newname=patches[pno]['name']
x=re.search('[0-9]*$',newname)
if x.group():
newname=re.sub('[0-9]*$',"%d" % (int(x.group())+1),newname)
else:
newname+='2'
pnew=len(patches)
patches.append({'name':newname})
if 'router_rules' in patches[pno]:
patches[pnew]['router_rules']=[]
for rule in patches[pno]['router_rules']:
patches[pnew]['router_rules'].append(dict(rule))
update_patch(patches[pnew])
elif k==2:
SB.lcd_message("Rename Patch: ")
a=SB.char_input(patches[pno]['name'])
if a:
patches[pno]['name']=a
write_bank()
elif k==3:
if len(patches)<2:
SB.lcd_message("only 1 patch! ",1)
SB.waitforrelease(1)
else:
del patches[pno]
pno=(pno-1)%len(patches)
select_patch(patches[pno])
write_bank()
def switch_bank():
global config
SB.lcd_message("Load Bank: ")
bpaths=check_output("find /home/pi -name '*.yaml'",shell=True).strip()
banks=[x[9:] for x in bpaths.decode('ascii').split('\n')]
del banks[banks.index('squishbox_settings.yaml')]
i=SB.choose_opt(banks, row=1, scroll=True)
if i>=0:
SB.lcd_message("loading patches ",1)
if not load_bank(banks[i]):
SB.lcd_message("bank load error!",1)
SB.waitforrelease(2)
return False
config['currentbank']=banks[i]
if config['uselastbank']:
config['initialbank']=banks[i]
write_config()
SB.waitforrelease(1)
return True
return False
def saveasnew_bank():
global config
x=re.search('([0-9]*).yaml$',config['currentbank'])
if x.group(1):
f=re.sub('[0-9]*.yaml$',"%d.yaml" % (int(x.group(1))+1),config['currentbank'])
else:
f=re.sub('\.yaml$','2.yaml',config['currentbank'])
SB.lcd_message("New Bank: ")
newbank=SB.char_input(f)
if newbank:
if not re.search('\.yaml$',newbank):
newbank+='.yaml'
if not write_bank(newbank):
SB.lcd_message("bank save error!",1)
SB.waitforrelease(1)
return
call(['sudo','chmod','666',newbank])
config['currentbank']=newbank
if config['uselastbank']:
config['initialbank']=newbank
write_config()
SB.lcd_message("new bank saved! ",1)
SB.waitforrelease(1)
def chorverb_menu():
global bank
opts=['Chorus Voices','Chorus Level','Chorus Speed',
'Chorus Depth','Chorus Type','Reverb Size',
'Reverb Damping','Reverb Width','Reverb Level']
while True:
SB.lcd_message("Chorus/Reverb ")
i=SB.choose_opt(opts,1)
if i<0:
return
SB.lcd_message("%-16s" % opts[i])
if i==0:
bank['chorus_nr']=SB.choose_val(fluid.get_chorus_nr(),1,0,99,'%16d')
elif i==1:
bank['chorus_level']=SB.choose_val(fluid.get_chorus_level(),0.1,0.0,10.0,'%16.1f')
elif i==2:
bank['chorus_depth']=SB.choose_val(fluid.get_chorus_depth(),0.1,0.0,21.0,'%16.1f')
elif i==3:
bank['chorus_speed']=SB.choose_val(fluid.get_chorus_speed(),0.1,0.3,5.0,'%16.1f')
elif i==4:
bank['chorus_type']=SB.choose_val(fluid.get_chorus_type(),1,0,1,'%16d')
elif i==5:
bank['reverb_roomsize']=SB.choose_val(fluid.get_reverb_roomsize(),0.1,0.0,1.0,'%16.1f')
elif i==6:
bank['reverb_damping']=SB.choose_val(fluid.get_reverb_damp(),0.1,0.0,1.0,'%16.1f')
elif i==7:
bank['reverb_width']=SB.choose_val(fluid.get_reverb_width(),1.0,0.0,100.0,'%16.1f')
elif i==8:
bank['reverb_level']=SB.choose_val(fluid.get_reverb_level(),0.01,0.00,1.00,'%16.2f')
set_chorus_reverb()
write_bank()
def wifi_settings():
ssid=check_output(['iwgetid','wlan0','--raw']).strip().decode('ascii')
ip=re.sub(b'\s.*',b'',check_output(['hostname','-I'])).decode('ascii')
if ssid=="":
statusmsg="Not connected \n"+' '*16
else:
statusmsg="%16s\n%-16s" % (ssid,ip)
j=SB.choose_opt([statusmsg,"Add Network.. \n"+' '*16])
if j==1:
SB.lcd_message("Network (SSID):")
newssid=SB.char_input()
if not newssid:
return
SB.lcd_message("Password:")
newpsk=SB.char_input()
if not newpsk:
return
SB.lcd_message("adding network.."+' '*16)
f=open('/etc/wpa_supplicant/wpa_supplicant.conf','a')
f.write('network={\n ssid="%s"\n psk="%s"\n}\n' % (newssid,newpsk))
f.close()
call('sudo service networking restart'.split())
SB.waitforrelease(1)
def open_soundfont():
global config
sfpath=check_output("find /home/pi -name '*.sf2'",shell=True).strip()
sf=[x[9:] for x in sfpath.decode('ascii').split('\n')]
s=SB.choose_opt(sf,row=1,scroll=True)
if s<0:
return False
SB.lcd_message("loading... ",1)
load_soundfont(sf[s])
config['soundfont']=sf[s]
write_config()
SB.waitforrelease(1)
return True
def add_fromusb():
SB.lcd_clear()
SB.lcd_message("looking for USB \n")
b=check_output('sudo blkid'.split())
x=re.findall(b'/dev/sd[a-z]\d*',b)
if x:
SB.lcd_message("copying files.. ",1)
for u in x:
call(['sudo','mount',u,'/mnt/usbdrv/'])
call(['sudo','/home/pi/copyfromUSB.sh'])
call(['sudo','umount',u])
SB.lcd_clear()
SB.lcd_message("copying files.. \ndone!")
SB.waitforrelease(1)
else:
SB.lcd_message("USB not found! ",1)
SB.waitforrelease(1)
### STARTUP ###
SB.lcd_clear()
SB.lcd_message("Squishbox v2.0",0)
SB.lcd_message("setting up",1)
# load main settings file
try:
f = open('/home/pi/squishbox_settings.yaml')
config = yaml.safe_load(f)
except yaml.YAMLError or IOError:
SB.lcd_message("bad config file!",1)
time.sleep(10)
f.close()
# to do --
# if fluidversion is in settings,
# set some environment variable so
# fluidsynth.py knows where it is, then
# import fluidsynth.py after this line
import importlib
fluidsynth = importlib.import_module(config.get('fluidversion','fluidsynth'))
# start fluidsynth
SB.lcd_message("starting fluid ",1)
fluid=fluidsynth.Synth(channels=16)
for opt,val in config['fluidsettings'].items():
fluid.setting(opt,val)
fluid.start(driver='alsa', device='hw:0', midi_driver='alsa_seq')
x=re.search(b"client (\d+:) 'FLUID Synth",check_output(['aconnect','-o']))
if not x:
while True:
SB.lcd_message("no fluid midi! ")
time.sleep(10)
fluidport=x.group(1).decode()+'0'
midi_connect()
# load bank
SB.lcd_message("loading patches ",1)
bank={}
sfids={}
if not load_bank(config['initialbank']):
while True:
SB.lcd_message("bank load error!",1)
SB.waitfortap(10)
if switch_bank():
break
config['currentbank']=config['initialbank']
patches=bank['patches']
sfp=0
sfid=0
sfpresets=[]
pno=0
select_patch(patches[pno])
sfp=0
sfid=0
sfpresets=[]
if config['soundfont']:
load_soundfont(config['soundfont'])
select_sfpreset(sfpresets[sfp])
SB.reset_scroll()
### MAIN ###
while True:
time.sleep(SB.POLL_TIME)
SB.poll_stompswitches()
if config['soundfont']:
SB.lcd_scroll(sfpresets[sfp][0])
SB.lcd_message("%16s" % ("preset %03d:%03d" % sfpresets[sfp][1:3]),1)
if SB.r_state==SB.STATE_TAP:
sfp=(sfp+1)%len(sfpresets)
elif SB.l_state==SB.STATE_TAP:
sfp=(sfp-1)%len(sfpresets)
if SB.r_state==SB.STATE_TAP or SB.l_state==SB.STATE_TAP:
select_sfpreset(sfpresets[sfp])
SB.reset_scroll()
continue
elif SB.r_state==SB.STATE_HOLD:
soundfont_menu()
continue
else:
SB.lcd_scroll(patches[pno]['name'])
SB.lcd_message("%16s" % ("patch: %d/%d" % (pno+1,len(patches))),1)
if SB.r_state==SB.STATE_TAP:
pno=(pno+1)%len(patches)
elif SB.l_state==SB.STATE_TAP:
pno=(pno-1)%len(patches)
if SB.r_state==SB.STATE_TAP or SB.l_state==SB.STATE_TAP:
select_patch(patches[pno])
SB.reset_scroll()
continue
elif SB.r_state==SB.STATE_HOLD:
patch_menu()
continue
if SB.r_state+SB.l_state==SB.STATE_NONE:
continue
elif SB.l_state==SB.STATE_HOLD:
SB.lcd_message("Settings: ")
k=SB.choose_opt(['Switch Bank','Save New Bank','Set Gain','Chorus/Reverb','Advanced..','Power Down'],1)
if k==0:
if switch_bank():
patches=bank['patches']
pno=0
select_patch(patches[pno])
elif k==1:
saveasnew_bank()
elif k==2:
SB.lcd_message("Output Gain: ")
g=SB.choose_val(bank['gain'],0.1,0.0,5.0,"%16.2f")
bank['gain']=g
fluid.setting('synth.gain', g)
write_bank()
elif k==3:
chorverb_menu()
elif k==4:
SB.lcd_message("Advanced: ")
j=SB.choose_opt(['Open Soundfont','MIDI Reconnect','Wifi Settings','Add From USB'],1)
if j==0:
if open_soundfont():
sfp=0
select_sfpreset(sfpresets[sfp])
if j==1:
SB.lcd_message("reconnecting.. ",1)
midi_connect()
SB.waitforrelease(1)
if j==2:
wifi_settings()
if j==3:
add_fromusb()
elif k==5:
SB.lcd_message("Shutting down...\nWait 30s, unplug",0)
call('sudo shutdown -h now'.split())
|
albedozero/squishbox
|
squishbox.py
|
Python
|
mit
| 18,383
|
from __future__ import (division, print_function)
from pomegranate import *
from nose.tools import with_setup
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_raises
from nose.tools import assert_almost_equal
import random
import numpy as np
import json
def setup():
'''
Build a model that we want to use to test sequences. This model will
be somewhat complicated, in order to extensively test YAHMM. This will be
a three state global sequence alignment HMM. The HMM models a reference of
'ACT', with pseudocounts to allow for slight deviations from this
reference.
'''
random.seed(0)
global model
model = HiddenMarkovModel( "Global Alignment")
# Define the distribution for insertions
i_d = DiscreteDistribution( { 'A': 0.25, 'C': 0.25, 'G': 0.25, 'T': 0.25 } )
# Create the insert states
i0 = State( i_d, name="I0" )
i1 = State( i_d, name="I1" )
i2 = State( i_d, name="I2" )
i3 = State( i_d, name="I3" )
# Create the match states
m1 = State( DiscreteDistribution({ "A": 0.95, 'C': 0.01, 'G': 0.01, 'T': 0.02 }) , name="M1" )
m2 = State( DiscreteDistribution({ "A": 0.003, 'C': 0.99, 'G': 0.003, 'T': 0.004 }) , name="M2" )
m3 = State( DiscreteDistribution({ "A": 0.01, 'C': 0.01, 'G': 0.01, 'T': 0.97 }) , name="M3" )
# Create the delete states
d1 = State( None, name="D1" )
d2 = State( None, name="D2" )
d3 = State( None, name="D3" )
# Add all the states to the model
model.add_states( [i0, i1, i2, i3, m1, m2, m3, d1, d2, d3 ] )
# Create transitions from match states
model.add_transition( model.start, m1, 0.9 )
model.add_transition( model.start, i0, 0.1 )
model.add_transition( m1, m2, 0.9 )
model.add_transition( m1, i1, 0.05 )
model.add_transition( m1, d2, 0.05 )
model.add_transition( m2, m3, 0.9 )
model.add_transition( m2, i2, 0.05 )
model.add_transition( m2, d3, 0.05 )
model.add_transition( m3, model.end, 0.9 )
model.add_transition( m3, i3, 0.1 )
# Create transitions from insert states
model.add_transition( i0, i0, 0.70 )
model.add_transition( i0, d1, 0.15 )
model.add_transition( i0, m1, 0.15 )
model.add_transition( i1, i1, 0.70 )
model.add_transition( i1, d2, 0.15 )
model.add_transition( i1, m2, 0.15 )
model.add_transition( i2, i2, 0.70 )
model.add_transition( i2, d3, 0.15 )
model.add_transition( i2, m3, 0.15 )
model.add_transition( i3, i3, 0.85 )
model.add_transition( i3, model.end, 0.15 )
# Create transitions from delete states
model.add_transition( d1, d2, 0.15 )
model.add_transition( d1, i1, 0.15 )
model.add_transition( d1, m2, 0.70 )
model.add_transition( d2, d3, 0.15 )
model.add_transition( d2, i2, 0.15 )
model.add_transition( d2, m3, 0.70 )
model.add_transition( d3, i3, 0.30 )
model.add_transition( d3, model.end, 0.70 )
# Call bake to finalize the structure of the model.
model.bake()
def multitransition_setup():
'''
Build a model that we want to use to test sequences. This is the same as the
above model, except that it uses the multiple transition methods for building.
'''
random.seed(0)
global model
model = HiddenMarkovModel( "Global Alignment")
# Define the distribution for insertions
i_d = DiscreteDistribution( { 'A': 0.25, 'C': 0.25, 'G': 0.25, 'T': 0.25 } )
# Create the insert states
i0 = State( i_d, name="I0" )
i1 = State( i_d, name="I1" )
i2 = State( i_d, name="I2" )
i3 = State( i_d, name="I3" )
# Create the match states
m1 = State( DiscreteDistribution({ "A": 0.95, 'C': 0.01, 'G': 0.01, 'T': 0.02 }) , name="M1" )
m2 = State( DiscreteDistribution({ "A": 0.003, 'C': 0.99, 'G': 0.003, 'T': 0.004 }) , name="M2" )
m3 = State( DiscreteDistribution({ "A": 0.01, 'C': 0.01, 'G': 0.01, 'T': 0.97 }) , name="M3" )
# Create the delete states
d1 = State( None, name="D1" )
d2 = State( None, name="D2" )
d3 = State( None, name="D3" )
# Add all the states to the model
model.add_states( [i0, i1, i2, i3, m1, m2, m3, d1, d2, d3 ] )
# Create transitions from match states
model.add_transitions( model.start, [m1, i0], [0.9, 0.1] )
model.add_transitions( m1, [m2, i1, d2], [0.9, 0.05, 0.05] )
model.add_transitions( m2, [m3, i2, d3], [0.9, 0.05, 0.05] )
model.add_transitions( m3, [model.end, i3], [0.9, 0.1] )
# Create transitions from insert states
model.add_transitions( i0, [i0, d1, m1], [0.7, 0.15, 0.15] )
model.add_transitions( i1, [i1, d2, m2], [0.7, 0.15, 0.15] )
model.add_transitions( i2, [i2, d3, m3], [0.7, 0.15, 0.15] )
model.add_transitions( [i3, i3], [i3, model.end], [0.85, 0.15] )
# Create transitions from delete states
model.add_transitions( d1, [d2, i1, m2], [0.15, 0.15, 0.70] )
model.add_transitions( [d2, d2, d2, d3, d3], [d3, i2, m3, i3, model.end],
[0.15, 0.15, 0.70, 0.30, 0.70 ] )
# Call bake to finalize the structure of the model.
model.bake()
def tied_edge_setup():
'''
Build a model that we want to use to test sequences. This model has
tied edges.
'''
random.seed(0)
global model
model = HiddenMarkovModel( "Global Alignment")
# Define the distribution for insertions
i_d = DiscreteDistribution( { 'A': 0.25, 'C': 0.25, 'G': 0.25, 'T': 0.25 } )
# Create the insert states
i0 = State( i_d, name="I0" )
i1 = State( i_d, name="I1" )
i2 = State( i_d, name="I2" )
i3 = State( i_d, name="I3" )
# Create the match states
m1 = State( DiscreteDistribution({ "A": 0.95, 'C': 0.01, 'G': 0.01, 'T': 0.02 }) , name="M1" )
m2 = State( DiscreteDistribution({ "A": 0.003, 'C': 0.99, 'G': 0.003, 'T': 0.004 }) , name="M2" )
m3 = State( DiscreteDistribution({ "A": 0.01, 'C': 0.01, 'G': 0.01, 'T': 0.97 }) , name="M3" )
# Create the delete states
d1 = State( None, name="D1" )
d2 = State( None, name="D2" )
d3 = State( None, name="D3" )
# Add all the states to the model
model.add_states( [i0, i1, i2, i3, m1, m2, m3, d1, d2, d3 ] )
# Create transitions from match states
model.add_transition( model.start, m1, 0.9 )
model.add_transition( model.start, i0, 0.1 )
model.add_transition( m1, m2, 0.9 )
model.add_transition( m1, i1, 0.05 )
model.add_transition( m1, d2, 0.05 )
model.add_transition( m2, m3, 0.9 )
model.add_transition( m2, i2, 0.05 )
model.add_transition( m2, d3, 0.05 )
model.add_transition( m3, model.end, 0.9 )
model.add_transition( m3, i3, 0.1 )
# Create transitions from insert states
model.add_transition( i0, i0, 0.70, group="i_a" )
model.add_transition( i0, d1, 0.15, group="i_b" )
model.add_transition( i0, m1, 0.15, group="i_c" )
model.add_transition( i1, i1, 0.70, group="i_a" )
model.add_transition( i1, d2, 0.15, group="i_b" )
model.add_transition( i1, m2, 0.15, group="i_c" )
model.add_transition( i2, i2, 0.70, group="i_a" )
model.add_transition( i2, d3, 0.15, group="i_b" )
model.add_transition( i2, m3, 0.15, group="i_c" )
model.add_transition( i3, i3, 0.85, group="i_a" )
model.add_transition( i3, model.end, 0.15 )
# Create transitions from delete states
model.add_transition( d1, d2, 0.15, group="d_a" )
model.add_transition( d1, i1, 0.15, group="d_b" )
model.add_transition( d1, m2, 0.70, group="d_c" )
model.add_transition( d2, d3, 0.15, group="d_a" )
model.add_transition( d2, i2, 0.15, group="d_b" )
model.add_transition( d2, m3, 0.70, group="d_c" )
model.add_transition( d3, i3, 0.30 )
model.add_transition( d3, model.end, 0.70 )
# Call bake to finalize the structure of the model.
model.bake()
def teardown():
'''
Remove the model at the end of the unit testing. Since it is stored in a
global variance, simply delete it.
'''
pass
@with_setup( setup, teardown )
def test_same_length_viterbi():
scores = [ -0.5132449003570658, -11.048101241343396, -9.125519674022627,
-5.0879558788604475 ]
sequences = [ list(x) for x in [ 'ACT', 'GGC', 'GAT', 'ACC' ] ]
for seq, score in zip( sequences, scores ):
assert_almost_equal( model.viterbi( seq )[0], score )
assert_raises( ValueError, model.viterbi, list('XXX') )
@with_setup( setup, teardown )
def test_variable_length_viterbi():
scores = [ -5.406181012423981, -10.88681993576597, -3.6244718790494277,
-3.644880750680635, -10.674332964640293, -10.393824835172445,
-8.67126440174503, -16.903451796110275, -16.451699654050792 ]
sequences = [ list(x) for x in ('A', 'GA', 'AC', 'AT', 'ATCC',
'ACGTG', 'ATTT', 'TACCCTC', 'TGTCAACACT') ]
for seq, score in zip( sequences, scores ):
assert_almost_equal( model.viterbi( seq )[0], score )
@with_setup( setup, teardown )
def test_log_probability():
scores = [ -5.3931, -0.5052, -11.8478, -14.3482 ]
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
for seq, score in zip( sequences, scores ):
assert_equal( round( model.log_probability( seq ), 4 ), score )
@with_setup( setup, teardown )
def test_posterior_transitions():
a_scores = [ 0.0, 0.0021, 0.2017, 1.5105 ]
b_scores = [ 0.013, 0.0036, 1.9836, 2.145 ]
c_scores = [ 0.013, 0.0035, 0.817, 0.477 ]
d_scores = [ 1.0, 0.0023, 0.2636, 0.3682 ]
t_scores = [ 4.013, 4.0083, 6.457, 8.9812 ]
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
indices = { state.name: i for i, state in enumerate( model.states ) }
i, j, k, l = indices['I2'], indices['I0'], indices['D1'], indices['D2']
scores = zip( sequences, a_scores, b_scores, c_scores, d_scores, t_scores )
for seq, a, b, c, d, t in scores:
trans, ems = model.forward_backward( seq )
assert_equal( round( trans[i].sum(), 4 ), a )
assert_equal( round( trans[j].sum(), 4 ), b )
assert_equal( round( trans[k].sum(), 4 ), c )
assert_equal( round( trans[l].sum(), 4 ), d )
assert_equal( round( trans.sum(), 4 ), t )
@with_setup( setup, teardown )
def test_posterior_transitions_w_training():
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
indices = { state.name: i for i, state in enumerate( model.states ) }
transitions = model.dense_transition_matrix()
i0, i1, i2 = indices['I0'], indices['I1'], indices['I2']
d1, d2, d3 = indices['D1'], indices['D2'], indices['D3']
m1, m2, m3 = indices['M1'], indices['M2'], indices['M3']
assert_equal( transitions[d1, i1], transitions[d2, i2] )
assert_equal( transitions[i0, i0], transitions[i1, i1] )
assert_equal( transitions[i0, i0], transitions[i2, i2] )
assert_equal( transitions[i0, m1], transitions[i1, m2] )
assert_equal( transitions[d1, d2], transitions[d2, d3] )
assert_equal( transitions[i0, d1], transitions[i1, d2] )
assert_equal( transitions[i0, d1], transitions[i2, d3] )
model.fit( sequences, verbose=False )
transitions = model.dense_transition_matrix()
assert_not_equal( transitions[d1, i1], transitions[d2, i2] )
assert_not_equal( transitions[i0, m1], transitions[i1, m2] )
assert_not_equal( transitions[d1, d2], transitions[d2, d3] )
assert_not_equal( transitions[i0, d1], transitions[i1, d2] )
assert_not_equal( transitions[i0, d1], transitions[i2, d3] )
@with_setup( setup, teardown )
def test_posterior_transitions_w_vtraining():
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
indices = { state.name: i for i, state in enumerate( model.states ) }
transitions = model.dense_transition_matrix()
i0, i1, i2, i3 = indices['I0'], indices['I1'], indices['I2'], indices['I3']
d1, d2, d3 = indices['D1'], indices['D2'], indices['D3']
m1, m2, m3 = indices['M1'], indices['M2'], indices['M3']
assert_equal( transitions[d1, i1], transitions[d2, i2] )
assert_equal( transitions[i0, i0], transitions[i1, i1] )
assert_equal( transitions[i0, i0], transitions[i2, i2] )
assert_equal( transitions[i0, m1], transitions[i1, m2] )
assert_equal( transitions[d1, d2], transitions[d2, d3] )
assert_equal( transitions[i0, d1], transitions[i1, d2] )
assert_equal( transitions[i0, d1], transitions[i2, d3] )
model.fit( sequences, verbose=False, algorithm='viterbi' )
transitions = model.dense_transition_matrix()
assert_not_equal( transitions[i0, i0], transitions[i1, i1] )
assert_not_equal( transitions[d1, d2], transitions[d2, d3] )
assert_not_equal( transitions[i0, d1], transitions[i1, d2] )
assert_not_equal( transitions[i0, d1], transitions[i2, d3] )
@with_setup( tied_edge_setup, teardown )
def test_posterior_transitions_w_tied_training():
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
indices = { state.name: i for i, state in enumerate( model.states ) }
transitions = model.dense_transition_matrix()
i0, i1, i2, i3 = indices['I0'], indices['I1'], indices['I2'], indices['I3']
d1, d2, d3 = indices['D1'], indices['D2'], indices['D3']
m1, m2, m3 = indices['M1'], indices['M2'], indices['M3']
assert_equal( transitions[d1, i1], transitions[d2, i2] )
assert_equal( transitions[i0, i0], transitions[i1, i1] )
assert_equal( transitions[i0, i0], transitions[i2, i2] )
assert_equal( transitions[i0, m1], transitions[i1, m2] )
assert_equal( transitions[d1, d2], transitions[d2, d3] )
assert_equal( transitions[i0, d1], transitions[i1, d2] )
assert_equal( transitions[i0, d1], transitions[i2, d3] )
model.fit( sequences, verbose=False )
transitions = model.dense_transition_matrix()
assert_equal( transitions[i0, i0], transitions[i1, i1] )
assert_equal( transitions[d1, d2], transitions[d2, d3] )
assert_equal( transitions[i0, d1], transitions[i1, d2] )
assert_equal( transitions[i0, d1], transitions[i2, d3] )
@with_setup( tied_edge_setup, teardown )
def test_posterior_transitions_w_tied_vtraining():
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
indices = { state.name: i for i, state in enumerate( model.states ) }
transitions = model.dense_transition_matrix()
i0, i1, i2 = indices['I0'], indices['I1'], indices['I2']
d1, d2, d3 = indices['D1'], indices['D2'], indices['D3']
m1, m2, m3 = indices['M1'], indices['M2'], indices['M3']
assert_equal( transitions[d1, i1], transitions[d2, i2] )
assert_equal( transitions[i0, i0], transitions[i1, i1] )
assert_equal( transitions[i0, i0], transitions[i2, i2] )
assert_equal( transitions[i0, m1], transitions[i1, m2] )
assert_equal( transitions[d1, d2], transitions[d2, d3] )
assert_equal( transitions[i0, d1], transitions[i1, d2] )
assert_equal( transitions[i0, d1], transitions[i2, d3] )
model.fit( sequences, verbose=False, algorithm='viterbi' )
transitions = model.dense_transition_matrix()
assert_equal( transitions[d1, i1], transitions[d2, i2] )
assert_equal( transitions[i0, i0], transitions[i1, i1] )
assert_equal( transitions[i0, i0], transitions[i2, i2] )
assert_equal( transitions[i0, m1], transitions[i1, m2] )
assert_equal( transitions[d1, d2], transitions[d2, d3] )
assert_equal( transitions[i0, d1], transitions[i1, d2] )
assert_equal( transitions[i0, d1], transitions[i2, d3] )
@with_setup( setup, teardown )
def test_posterior_emissions():
a_scores = [ 0.987, 0.9965, 0.183, 0.523 ]
b_scores = [ 0.0, 0.9977, 0.7364, 0.6318 ]
c_scores = [ 0.0, 0.9975, 0.6237, 0.8641 ]
d_scores = [ 0.0, 0.0021, 0.2017, 1.5105 ]
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
indices = { state.name: i for i, state in enumerate( model.states ) }
i, j, k, l = indices['M1'], indices['M2'], indices['M3'], indices['I2']
for seq, a, b, c, d in zip( sequences, a_scores, b_scores, c_scores, d_scores ):
trans, ems = model.forward_backward( seq )
ems = np.exp( ems )
assert_equal( round( ems[:,i].sum(), 4 ), a )
assert_equal( round( ems[:,j].sum(), 4 ), b )
assert_equal( round( ems[:,k].sum(), 4 ), c )
assert_equal( round( ems[:,l].sum(), 4 ), d )
assert_equal( round( ems.sum() ), len( seq ) )
@with_setup( multitransition_setup, teardown )
def test_posterior_emissions_w_multitransition_setup():
a_scores = [ 0.987, 0.9965, 0.183, 0.523 ]
b_scores = [ 0.0, 0.9977, 0.7364, 0.6318 ]
c_scores = [ 0.0, 0.9975, 0.6237, 0.8641 ]
d_scores = [ 0.0, 0.0021, 0.2017, 1.5105 ]
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
indices = { state.name: i for i, state in enumerate( model.states ) }
i, j, k, l = indices['M1'], indices['M2'], indices['M3'], indices['I2']
for seq, a, b, c, d in zip( sequences, a_scores, b_scores, c_scores, d_scores ):
trans, ems = model.forward_backward( seq )
ems = np.exp( ems )
assert_equal( round( ems[:,i].sum(), 4 ), a )
assert_equal( round( ems[:,j].sum(), 4 ), b )
assert_equal( round( ems[:,k].sum(), 4 ), c )
assert_equal( round( ems[:,l].sum(), 4 ), d )
assert_equal( round( ems.sum() ), len( seq ) )
@with_setup( tied_edge_setup, teardown )
def test_posterior_emissions_w_tied_edge_setup():
a_scores = [ 0.987, 0.9965, 0.183, 0.523 ]
b_scores = [ 0.0, 0.9977, 0.7364, 0.6318 ]
c_scores = [ 0.0, 0.9975, 0.6237, 0.8641 ]
d_scores = [ 0.0, 0.0021, 0.2017, 1.5105 ]
sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
indices = { state.name: i for i, state in enumerate( model.states ) }
i, j, k, l = indices['M1'], indices['M2'], indices['M3'], indices['I2']
for seq, a, b, c, d in zip( sequences, a_scores, b_scores, c_scores, d_scores ):
trans, ems = model.forward_backward( seq )
ems = np.exp( ems )
assert_equal( round( ems[:,i].sum(), 4 ), a )
assert_equal( round( ems[:,j].sum(), 4 ), b )
assert_equal( round( ems[:,k].sum(), 4 ), c )
assert_equal( round( ems[:,l].sum(), 4 ), d )
assert_equal( round( ems.sum() ), len( seq ) )
@with_setup( setup, teardown )
def test_properties():
assert_equal( model.edge_count(), 29 )
assert_equal( model.state_count(), 12 )
assert_equal( model.name, "Global Alignment" )
@with_setup( setup, teardown )
def test_to_json():
b = json.loads(model.to_json())
assert_equal(b['name'], 'Global Alignment')
assert_equal(len(b['edges']), 29)
assert_equal(len(b['states']), 12)
assert_equal(b['silent_index'], 7)
@with_setup( setup, teardown )
def test_from_json():
hmm = HiddenMarkovModel.from_json( model.to_json() )
assert_equal(hmm.edge_count(), 29)
assert_equal(hmm.state_count(), 12)
assert_equal(hmm.name, "Global Alignment")
|
jmschrei/pomegranate
|
tests/test_profile_hmm.py
|
Python
|
mit
| 17,863
|