code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import subprocess
import click
@click.command()
def cli():
""" Start all services. """
cmd = 'honcho start'
return subprocess.call(cmd, shell=True)
|
z123/build-a-saas-app-with-flask
|
cli/commands/cmd_all.py
|
Python
|
mit
| 163
|
"""Database Populate module.
This module contains functions for populating a database with PAF data.
The data is split across a number of files (as explained elsewhere), and so
each file must be parsed and the data inserted into the database.
"""
from paf_tools import database
from paf_tools.database.tables import Address
from paf_tools.populate.data_store import PAFData
def populate_address_data(paf_path, erase_existing=True):
"""Populate address table in the database.
Uses the PAFData class to extract and clean the data from the postcode
address file. This is then saved to the addresses table of the database.
Returns the total number of entries added to the table.
Keyword arguments:
paf_path - the full path to the folder containing PAF data
erase_existing - boolean confirming whether existing database is to be
erased before populating (defaults to True)
"""
#Check if existing database is to be erased, then do so if true.
if erase_existing:
database.operations.erase_database()
data_generator = PAFData(paf_path)
session = database.Session()
count = 0
print("=== Populating {} table... ===".format(Address.__name__))
for row in data_generator:
session.add(Address(**row))
count += 1
#Only commit after 100000 additions
if not count % 100000:
session.commit()
print("{:,d} records added...".format(count))
else:
session.commit()
print("{:,d} total records added.".format(count))
return count
|
DanMeakin/paf-tools
|
paf_tools/populate/populate.py
|
Python
|
mit
| 1,589
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
#-*- coding: utf-8 -*-
import json
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User as DjangoUser
from treeio.core.models import User, Group, Perspective, ModuleSetting, Object
from treeio.finance.models import Transaction, Liability, Category, Account, Equity, Asset, Currency, Tax
from treeio.identities.models import Contact, ContactType
class FinanceAPITest(TestCase):
"Finance api tests"
username = "api_test"
password = "api_password"
prepared = False
authentication_headers ={"CONTENT_TYPE": "application/json",
"HTTP_AUTHORIZATION" : "Basic YXBpX3Rlc3Q6YXBpX3Bhc3N3b3Jk" }
content_type ='application/json'
def setUp(self):
"Initial Setup"
if not self.prepared:
# Clean up first
Object.objects.all().delete()
User.objects.all().delete()
# Create objects
try:
self.group = Group.objects.get(name='test')
except Group.DoesNotExist:
Group.objects.all().delete()
self.group = Group(name='test')
self.group.save()
try:
self.user = DjangoUser.objects.get(username=self.username)
self.user.set_password(self.password)
try:
self.profile = self.user.get_profile()
except Exception:
User.objects.all().delete()
self.user = DjangoUser(username=self.username, password='')
self.user.set_password(self.password)
self.user.save()
except DjangoUser.DoesNotExist:
User.objects.all().delete()
self.user = DjangoUser(username=self.username, password='')
self.user.set_password(self.password)
self.user.save()
try:
perspective = Perspective.objects.get(name='default')
except Perspective.DoesNotExist:
Perspective.objects.all().delete()
perspective = Perspective(name='default')
perspective.set_default_user()
perspective.save()
ModuleSetting.set('default_perspective', perspective.id)
self.contact_type = ContactType(name='test')
self.contact_type.set_default_user()
self.contact_type.save()
self.contact = Contact(name='test', contact_type=self.contact_type)
self.contact.set_default_user()
self.contact.save()
self.category = Category(name='test')
self.category.set_default_user()
self.category.save()
self.equity = Equity(issue_price=10, sell_price=10, issuer=self.contact, owner=self.contact)
self.equity.set_default_user()
self.equity.save()
self.asset = Asset(name='test', owner=self.contact)
self.asset.set_default_user()
self.asset.save()
self.tax = Tax(name='test', rate=10)
self.tax.set_default_user()
self.tax.save()
self.currency = Currency(code="GBP",
name="Pounds",
symbol="L",
is_default=True)
self.currency.set_default_user()
self.currency.save()
self.account = Account(name='test', owner=self.contact, balance_currency=self.currency)
self.account.set_default_user()
self.account.save()
self.liability = Liability(name='test',
source=self.contact,
target=self.contact,
account=self.account,
value=10,
value_currency=self.currency)
self.liability.set_default_user()
self.liability.save()
self.transaction = Transaction(name='test', account=self.account, source=self.contact,
target=self.contact, value=10, value_currency=self.currency)
self.transaction.set_default_user()
self.transaction.save()
self.client = Client()
self.prepared = True
def test_unauthenticated_access(self):
"Test index page at /api/finance/currencies"
response = self.client.get('/api/finance/currencies')
# Redirects as unauthenticated
self.assertEquals(response.status_code, 401)
def test_get_currencies_list(self):
""" Test index page api/finance/currencies """
response = self.client.get(path=reverse('api_finance_currencies'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_currency(self):
response = self.client.get(path=reverse('api_finance_currencies', kwargs={'object_ptr': self.currency.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_currency(self):
updates = {"code": "RUB", "name": "api RUB", "factor": "10.00", "is_active": True}
response = self.client.put(path=reverse('api_finance_currencies', kwargs={'object_ptr': self.currency.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['code'], updates['code'])
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['factor'], updates['factor'])
self.assertEquals(data['is_active'], updates['is_active'])
def test_get_taxes_list(self):
""" Test index page api/finance/taxes """
response = self.client.get(path=reverse('api_finance_taxes'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_tax(self):
response = self.client.get(path=reverse('api_finance_taxes', kwargs={'object_ptr': self.tax.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_tax(self):
updates = { "name" : "API TEST TAX", "rate": "20.00", "compound": False}
response = self.client.put(path=reverse('api_finance_taxes', kwargs={'object_ptr': self.tax.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['rate'], updates['rate'])
self.assertEquals(data['compound'], updates['compound'])
def test_get_categories_list(self):
""" Test index page api/finance/categories """
response = self.client.get(path=reverse('api_finance_categories'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_category(self):
response = self.client.get(path=reverse('api_finance_categories', kwargs={'object_ptr': self.category.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_category(self):
updates = { "name":"Api category", "details": "api details" }
response = self.client.put(path=reverse('api_finance_categories', kwargs={'object_ptr': self.category.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['details'], updates['details'])
def test_get_assets_list(self):
""" Test index page api/finance/assets """
response = self.client.get(path=reverse('api_finance_assets'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_asset(self):
response = self.client.get(path=reverse('api_finance_assets', kwargs={'object_ptr': self.asset.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_asset(self):
updates = { "current_value": "20.0", "owner": self.contact.id, "asset_type": "fixed", "name": "Api name",
"initial_value": '40.0'}
response = self.client.put(path=reverse('api_finance_assets', kwargs={'object_ptr': self.asset.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
print response.content
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['owner']['id'], updates['owner'])
self.assertEquals(data['asset_type'], updates['asset_type'])
self.assertEquals(data['initial_value'], updates['initial_value'])
self.assertEquals(data['current_value'], updates['current_value'])
def test_get_accounts_list(self):
""" Test index page api/finance/accounts """
response = self.client.get(path=reverse('api_finance_accounts'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_account(self):
response = self.client.get(path=reverse('api_finance_accounts', kwargs={'object_ptr': self.account.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_account(self):
updates = { "owner": self.user.id, "balance_display": 40.0, "name": "api test name", "balance_currency": self.currency.id }
response = self.client.put(path=reverse('api_finance_accounts', kwargs={'object_ptr': self.account.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['owner']['id'], updates['owner'])
self.assertEquals(data['balance_display'], updates['balance_display'])
self.assertEquals(data['balance_currency']['id'], updates['balance_currency'])
def test_get_equities_list(self):
""" Test index page api/finance/equities"""
response = self.client.get(path=reverse('api_finance_equities'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_equity(self):
response = self.client.get(path=reverse('api_finance_equities', kwargs={'object_ptr': self.equity.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_account(self):
updates = { "issue_price": "100.0", "equity_type": "warrant", "sell_price": "50.0", "amount": 100,
"purchase_date": "2011-06-06", "owner": self.contact.id, "issuer": self.contact.id }
response = self.client.put(path=reverse('api_finance_equities', kwargs={'object_ptr': self.equity.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['issue_price'], updates['issue_price'])
self.assertEquals(data['equity_type'], updates['equity_type'])
self.assertEquals(data['sell_price'], updates['sell_price'])
self.assertEquals(data['amount'], updates['amount'])
self.assertEquals(data['purchase_date'], updates['purchase_date'])
self.assertEquals(data['owner']['id'], updates['owner'])
self.assertEquals(data['issuer']['id'], updates['issuer'])
self.assertEquals(data['issuer']['id'], updates['issuer'])
def test_get_liabilities_list(self):
""" Test index page api/finance/liabilities"""
response = self.client.get(path=reverse('api_finance_liabilities'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_liability(self):
response = self.client.get(path=reverse('api_finance_liabilities', kwargs={'object_ptr': self.liability.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_liability(self):
updates = { "account": self.account.id, "target": self.contact.id, "value_display": "20.0",
"name": "api test name", "value_currency": self.currency.id}
response = self.client.put(path=reverse('api_finance_liabilities', kwargs={'object_ptr': self.liability.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['target']['id'], updates['target'])
self.assertEquals(data['account']['id'], updates['account'])
self.assertEquals(data['value_display'], updates['value_display'])
self.assertEquals(data['value_currency']['id'], updates['value_currency'])
def test_get_transactions_list(self):
""" Test index page api/finance/transactions"""
response = self.client.get(path=reverse('api_finance_transactions'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_transaction(self):
response = self.client.get(path=reverse('api_finance_transactions', kwargs={'object_ptr': self.transaction.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_transaction(self):
updates = { "value_display": "1000.0", "account": self.account.id, "name": "api test name", "value_currency": self.currency.id,
"datetime": "2011-03-21 11:04:42", "target": self.contact.id, "account": self.account.id, "source": self.contact.id }
response = self.client.put(path=reverse('api_finance_transactions', kwargs={'object_ptr': self.transaction.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
print response.content
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['value_display'], updates['value_display'])
self.assertEquals(data['account']['id'], updates['account'])
self.assertEquals(data['value_currency']['id'], updates['value_currency'])
self.assertEquals(data['datetime'], updates['datetime'])
self.assertEquals(data['target']['id'], updates['target'])
self.assertEquals(data['account']['id'], updates['account'])
self.assertEquals(data['source']['id'], updates['source'])
|
rogeriofalcone/treeio
|
finance/api/tests.py
|
Python
|
mit
| 15,550
|
import numpy
class Clusters(object):
def __init__(self, data_in, clusters_in):
self.data = data_in
if type(clusters_in) is int:
#self.clusters = [Cluster(get_random_value_from_list(data_in)) for i in range(0, clusters_in)]
#self.clusters = []
cluster_origins = []
for i in range(0, clusters_in):
cluster_origins.append(get_random_value_from_list(data_in))
self.clusters = [Cluster(cluster_origins[i]) for i in range(0, len(cluster_origins))]
else:
self.clusters = clusters_in
def cycle(self, step, step_count):
self.fit_data_to_clusters(step, step_count)
self.reset_empty_clusters(step, step_count)
self.set_clusters_origin_to_mean()
self.clear_clusters()
def fit_data_to_clusters(self, step, step_count):
for i in range(step_count, len(self.data), step):
self.get_closest_cluster_to_vector(self.data[i]).append_vector(self.data[i])
def get_closest_cluster_to_vector(self, vector):
sort_by_dist = sorted(self.clusters, key=lambda cluster : cluster.dist_to_origin(vector))
return sort_by_dist[0]
def reset_empty_clusters(self, step, step_count):
for i in range(0, len(self.clusters)):
cluster_origins = [self.clusters[i].get_origin() for i in range(0, len(self.clusters))]
if len(self.clusters[i].get_vectors()) == 0:
self.clusters[i] = Cluster(get_random_value_from_list(self.data))
self.clear_clusters()
self.fit_data_to_clusters(step, step_count)
def set_clusters_origin_to_mean(self):
for cluster in self.clusters:
cluster.set_origin_to_mean()
def clear_clusters(self):
for cluster in self.clusters:
cluster.clear()
def __len__(self):
return len(self.clusters)
def __getitem__(self, index):
return self.clusters[index]
def __delitem__(self, index):
del self.clusters[index]
def __repr__(self):
out_str = ""
for i in range(0, len(self.clusters)):
out_str += str(self.clusters[i]) + ", "
return out_str
class Cluster(object):
def __init__(self, origin_in):
self.origin = origin_in
self.numpy_origin = numpy.asarray(self.origin)
self.vectors = []
def append_vector(self, vector):
self.vectors.append(vector)
def set_origin_to_mean(self):
self.origin = self.get_mean()
self.numpy_origin = numpy.asarray(self.origin)
def get_mean(self):
sums = [0 for i in range(0, len(self.origin))]
for i in range(0, len(self.origin)):
sum = 0
for j in range(0, len(self.vectors)):
sum += self.vectors[j][i]
if(len(self.vectors) != 0):
sums[i] = sum/float(len(self.vectors))
return tuple(sums)
def dist_to_origin(self, vector):
numpy_delta = numpy.subtract(vector, self.numpy_origin)
return numpy.linalg.norm(numpy_delta)
def get_vectors(self):
return self.vectors
def get_origin(self):
return self.origin
def __len__(self):
return len(self.vectors)
def __getitem__(self, index):
return self.vectors[index]
def __repr__(self):
return str(self.origin)
def clear(self):
self.vectors = []
|
FlintHill/SUAS-Competition
|
UpdatedImageProcessing/UpdatedImageProcessing/ShapeDetection/utils/cluster.py
|
Python
|
mit
| 3,430
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# This file is part of Pynspect package (https://pypi.python.org/pypi/pynspect).
# Originally part of Mentat system (https://mentat.cesnet.cz/).
#
# Copyright (C) since 2016 CESNET, z.s.p.o (http://www.ces.net/).
# Copyright (C) since 2016 Jan Mach <honza.mach.ml@gmail.com>
# Use of this source is governed by the MIT license, see LICENSE file.
#-------------------------------------------------------------------------------
"""
Unit test module for testing the :py:mod:`pynspect.filters` module with
`IDEA <https://idea.cesnet.cz/en/index>`__ messages.
"""
__author__ = "Jan Mach <jan.mach@cesnet.cz>"
__credits__ = "Pavel Kácha <pavel.kacha@cesnet.cz>"
import unittest
import datetime
from idea import lite
from pynspect.rules import IntegerRule, VariableRule, ConstantRule,\
LogicalBinOpRule, UnaryOperationRule, ComparisonBinOpRule, MathBinOpRule, ListRule
from pynspect.gparser import PynspectFilterParser
from pynspect.filters import DataObjectFilter
from pynspect.compilers import IDEAFilterCompiler
#-------------------------------------------------------------------------------
# NOTE: Sorry for the long lines in this file. They are deliberate, because the
# assertion permutations are (IMHO) more readable this way.
#-------------------------------------------------------------------------------
class TestDataObjectFilterIDEA(unittest.TestCase):
"""
Unit test class for testing the :py:mod:`pynspect.filters` module.
"""
test_msg1 = {
"ID" : "e214d2d9-359b-443d-993d-3cc5637107a0",
"WinEndTime" : "2016-06-21 11:25:01Z",
"ConnCount" : 2,
"Source" : [
{
"IP4" : [
"188.14.166.39"
]
}
],
"Format" : "IDEA0",
"WinStartTime" : "2016-06-21 11:20:01Z",
"_CESNET" : {
"StorageTime" : 1466508305
},
"Target" : [
{
"IP4" : [
"195.113.165.128/25"
],
"Port" : [
"22"
],
"Proto" : [
"tcp",
"ssh"
],
"Anonymised" : True
}
],
"Note" : "SSH login attempt",
"DetectTime" : "2016-06-21 13:08:27Z",
"Node" : [
{
"Name" : "cz.cesnet.mentat.warden_filer",
"Type" : [
"Relay"
]
},
{
"AggrWin" : "00:05:00",
"Type" : [
"Connection",
"Honeypot",
"Recon"
],
"SW" : [
"Kippo"
],
"Name" : "cz.uhk.apate.cowrie"
}
],
"Category" : [
"Attempt.Login"
]
}
def setUp(self):
self.flt = DataObjectFilter()
self.psr = PynspectFilterParser()
self.psr.build()
self.cpl = IDEAFilterCompiler()
self.msg_idea = lite.Idea(self.test_msg1)
def build_rule(self, rule_str):
"""
Build and compile rule tree from given rule string.
"""
rule = self.psr.parse(rule_str)
rule = self.cpl.compile(rule)
return rule
def check_rule(self, rule):
"""
Check given rule against internal test message and filter.
"""
return self.flt.filter(rule, self.msg_idea)
def test_01_basic_logical(self):
"""
Perform filtering tests with basic logical expressions.
"""
self.maxDiff = None
rule = LogicalBinOpRule('OP_AND', ConstantRule(True), ConstantRule(True))
self.assertEqual(self.check_rule(rule), True)
rule = LogicalBinOpRule('OP_AND', ConstantRule(True), ConstantRule(False))
self.assertEqual(self.check_rule(rule), False)
rule = LogicalBinOpRule('OP_AND', ConstantRule(False), ConstantRule(True))
self.assertEqual(self.check_rule(rule), False)
rule = LogicalBinOpRule('OP_AND', ConstantRule(False), ConstantRule(False))
self.assertEqual(self.check_rule(rule), False)
rule = LogicalBinOpRule('OP_OR', ConstantRule(True), ConstantRule(True))
self.assertEqual(self.check_rule(rule), True)
rule = LogicalBinOpRule('OP_OR', ConstantRule(True), ConstantRule(False))
self.assertEqual(self.check_rule(rule), True)
rule = LogicalBinOpRule('OP_OR', ConstantRule(False), ConstantRule(True))
self.assertEqual(self.check_rule(rule), True)
rule = LogicalBinOpRule('OP_OR', ConstantRule(False), ConstantRule(False))
self.assertEqual(self.check_rule(rule), False)
rule = LogicalBinOpRule('OP_XOR', ConstantRule(True), ConstantRule(True))
self.assertEqual(self.check_rule(rule), False)
rule = LogicalBinOpRule('OP_XOR', ConstantRule(True), ConstantRule(False))
self.assertEqual(self.check_rule(rule), True)
rule = LogicalBinOpRule('OP_XOR', ConstantRule(False), ConstantRule(True))
self.assertEqual(self.check_rule(rule), True)
rule = LogicalBinOpRule('OP_XOR', ConstantRule(False), ConstantRule(False))
self.assertEqual(self.check_rule(rule), False)
rule = UnaryOperationRule('OP_NOT', ConstantRule(True))
self.assertEqual(self.check_rule(rule), False)
rule = UnaryOperationRule('OP_NOT', ConstantRule(False))
self.assertEqual(self.check_rule(rule), True)
rule = UnaryOperationRule('OP_NOT', VariableRule("Target.Anonymised"))
self.assertEqual(self.check_rule(rule), False)
def test_02_basic_comparison(self):
"""
Perform filtering tests with basic comparison operations.
"""
self.maxDiff = None
rule = ComparisonBinOpRule('OP_EQ', VariableRule("ID"), ConstantRule("e214d2d9-359b-443d-993d-3cc5637107a0"))
self.assertEqual(self.check_rule(rule), True)
rule = ComparisonBinOpRule('OP_EQ', VariableRule("ID"), ConstantRule("e214d2d9-359b-443d-993d-3cc5637107"))
self.assertEqual(self.check_rule(rule), False)
rule = ComparisonBinOpRule('OP_NE', VariableRule("ID"), ConstantRule("e214d2d9-359b-443d-993d-3cc5637107a0"))
self.assertEqual(self.check_rule(rule), False)
rule = ComparisonBinOpRule('OP_NE', VariableRule("ID"), ConstantRule("e214d2d9-359b-443d-993d-3cc5637107"))
self.assertEqual(self.check_rule(rule), True)
rule = ComparisonBinOpRule('OP_LIKE', VariableRule("ID"), ConstantRule("e214d2d9"))
self.assertEqual(self.check_rule(rule), True)
rule = ComparisonBinOpRule('OP_LIKE', VariableRule("ID"), ConstantRule("xxxxxxxx"))
self.assertEqual(self.check_rule(rule), False)
rule = ComparisonBinOpRule('OP_IN', VariableRule("Category"), ListRule(ConstantRule("Phishing"), ListRule(ConstantRule("Attempt.Login"))))
self.assertEqual(self.check_rule(rule), True)
rule = ComparisonBinOpRule('OP_IN', VariableRule("Category"), ListRule(ConstantRule("Phishing"), ListRule(ConstantRule("Spam"))))
self.assertEqual(self.check_rule(rule), False)
rule = ComparisonBinOpRule('OP_IS', VariableRule("Category"), ListRule(ConstantRule("Attempt.Login")))
self.assertEqual(self.check_rule(rule), True)
rule = ComparisonBinOpRule('OP_IS', VariableRule("Category"), ListRule(ConstantRule("Phishing"), ListRule(ConstantRule("Attempt.Login"))))
self.assertEqual(self.check_rule(rule), False)
rule = ComparisonBinOpRule('OP_EQ', VariableRule("ConnCount"), IntegerRule(2))
self.assertEqual(self.check_rule(rule), True)
rule = ComparisonBinOpRule('OP_EQ', VariableRule("ConnCount"), IntegerRule(4))
self.assertEqual(self.check_rule(rule), False)
rule = ComparisonBinOpRule('OP_NE', VariableRule("ConnCount"), IntegerRule(2))
self.assertEqual(self.check_rule(rule), False)
rule = ComparisonBinOpRule('OP_NE', VariableRule("ConnCount"), IntegerRule(4))
self.assertEqual(self.check_rule(rule), True)
rule = ComparisonBinOpRule('OP_GT', VariableRule("ConnCount"), IntegerRule(2))
self.assertEqual(self.check_rule(rule), False)
rule = ComparisonBinOpRule('OP_GT', VariableRule("ConnCount"), IntegerRule(1))
self.assertEqual(self.check_rule(rule), True)
rule = ComparisonBinOpRule('OP_GE', VariableRule("ConnCount"), IntegerRule(2))
self.assertEqual(self.check_rule(rule), True)
rule = ComparisonBinOpRule('OP_GE', VariableRule("ConnCount"), IntegerRule(1))
self.assertEqual(self.check_rule(rule), True)
rule = ComparisonBinOpRule('OP_GE', VariableRule("ConnCount"), IntegerRule(3))
self.assertEqual(self.check_rule(rule), False)
rule = ComparisonBinOpRule('OP_LT', VariableRule("ConnCount"), IntegerRule(2))
self.assertEqual(self.check_rule(rule), False)
rule = ComparisonBinOpRule('OP_LT', VariableRule("ConnCount"), IntegerRule(3))
self.assertEqual(self.check_rule(rule), True)
rule = ComparisonBinOpRule('OP_LE', VariableRule("ConnCount"), IntegerRule(2))
self.assertEqual(self.check_rule(rule), True)
rule = ComparisonBinOpRule('OP_LE', VariableRule("ConnCount"), IntegerRule(3))
self.assertEqual(self.check_rule(rule), True)
rule = ComparisonBinOpRule('OP_LE', VariableRule("ConnCount"), IntegerRule(1))
self.assertEqual(self.check_rule(rule), False)
def test_03_parsed_comparison(self):
"""
Perform filtering tests with basic parsed comparison operations.
"""
self.maxDiff = None
rule = self.build_rule('ID == "e214d2d9-359b-443d-993d-3cc5637107a0"')
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('ID eq "e214d2d9-359b-443d-993d-3cc5637107"')
self.assertEqual(self.check_rule(rule), False)
rule = self.build_rule('ID != "e214d2d9-359b-443d-993d-3cc5637107a0"')
self.assertEqual(self.check_rule(rule), False)
rule = self.build_rule('ID ne "e214d2d9-359b-443d-993d-3cc5637107"')
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('ID like "e214d2d9"')
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('ID LIKE "xxxxxxxx"')
self.assertEqual(self.check_rule(rule), False)
rule = self.build_rule('Category in ["Phishing" , "Attempt.Login"]')
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('Category IN ["Phishing" , "Spam"]')
self.assertEqual(self.check_rule(rule), False)
rule = self.build_rule('Category is ["Attempt.Login"]')
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('Category IS ["Phishing" , "Attempt.Login"]')
self.assertEqual(self.check_rule(rule), False)
rule = self.build_rule('ConnCount == 2')
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('ConnCount eq 4')
self.assertEqual(self.check_rule(rule), False)
rule = self.build_rule('ConnCount != 2')
self.assertEqual(self.check_rule(rule), False)
rule = self.build_rule('ConnCount ne 4')
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('ConnCount > 2')
self.assertEqual(self.check_rule(rule), False)
rule = self.build_rule('ConnCount gt 1')
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('ConnCount >= 2')
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('ConnCount ge 1')
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('ConnCount GE 3')
self.assertEqual(self.check_rule(rule), False)
rule = self.build_rule('ConnCount < 2')
self.assertEqual(self.check_rule(rule), False)
rule = self.build_rule('ConnCount lt 3')
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('ConnCount <= 2')
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('ConnCount le 3')
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('ConnCount LE 1')
self.assertEqual(self.check_rule(rule), False)
def test_04_basic_math(self):
"""
Perform filtering tests with basic math operations.
"""
self.maxDiff = None
rule = MathBinOpRule('OP_PLUS', VariableRule("ConnCount"), IntegerRule(1))
self.assertEqual(self.check_rule(rule), 3)
rule = MathBinOpRule('OP_MINUS', VariableRule("ConnCount"), IntegerRule(1))
self.assertEqual(self.check_rule(rule), 1)
rule = MathBinOpRule('OP_TIMES', VariableRule("ConnCount"), IntegerRule(5))
self.assertEqual(self.check_rule(rule), 10)
rule = MathBinOpRule('OP_DIVIDE', VariableRule("ConnCount"), IntegerRule(2))
self.assertEqual(self.check_rule(rule), 1)
rule = MathBinOpRule('OP_MODULO', VariableRule("ConnCount"), IntegerRule(2))
self.assertEqual(self.check_rule(rule), 0)
def test_05_parsed_math(self):
"""
Perform filtering tests with parsed math operations.
"""
self.maxDiff = None
rule = self.build_rule('ConnCount + 1')
self.assertEqual(self.check_rule(rule), 3)
rule = self.build_rule('ConnCount - 1')
self.assertEqual(self.check_rule(rule), 1)
rule = self.build_rule('ConnCount * 5')
self.assertEqual(self.check_rule(rule), 10)
rule = self.build_rule('ConnCount / 2')
self.assertEqual(self.check_rule(rule), 1)
rule = self.build_rule('ConnCount % 2')
self.assertEqual(self.check_rule(rule), 0)
def test_06_advanced_filters(self):
"""
Perform advanced filtering tests.
"""
self.maxDiff = None
rule = self.build_rule('DetectTime + 3600')
self.assertEqual(repr(rule), "MATHBINOP(VARIABLE('DetectTime') OP_PLUS TIMEDELTA(datetime.timedelta(0, 3600)))")
expected_res = (datetime.datetime(2016, 6, 21, 13, 8, 27) + datetime.timedelta(seconds = 3600))
self.assertEqual(self.check_rule(rule), expected_res)
rule = self.build_rule('(ConnCount + 10) > 11')
self.assertEqual(repr(rule), "COMPBINOP(MATHBINOP(VARIABLE('ConnCount') OP_PLUS INTEGER(10)) OP_GT INTEGER(11))")
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('(ConnCount + 3) < 5')
self.assertEqual(repr(rule), "COMPBINOP(MATHBINOP(VARIABLE('ConnCount') OP_PLUS INTEGER(3)) OP_LT INTEGER(5))")
self.assertEqual(self.check_rule(rule), False)
rule = self.build_rule('((ConnCount + 3) < 5) or ((ConnCount + 10) > 11)')
self.assertEqual(repr(rule), "LOGBINOP(COMPBINOP(MATHBINOP(VARIABLE('ConnCount') OP_PLUS INTEGER(3)) OP_LT INTEGER(5)) OP_OR COMPBINOP(MATHBINOP(VARIABLE('ConnCount') OP_PLUS INTEGER(10)) OP_GT INTEGER(11)))")
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('(DetectTime == 2016-06-21T13:08:27Z)')
self.assertEqual(repr(rule), "COMPBINOP(VARIABLE('DetectTime') OP_EQ DATETIME(datetime.datetime(2016, 6, 21, 13, 8, 27)))")
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('(DetectTime != 2016-06-21T13:08:27Z)')
self.assertEqual(repr(rule), "COMPBINOP(VARIABLE('DetectTime') OP_NE DATETIME(datetime.datetime(2016, 6, 21, 13, 8, 27)))")
self.assertEqual(self.check_rule(rule), False)
rule = self.build_rule('(DetectTime >= 2016-06-21T14:08:27Z)')
self.assertEqual(repr(rule), "COMPBINOP(VARIABLE('DetectTime') OP_GE DATETIME(datetime.datetime(2016, 6, 21, 14, 8, 27)))")
self.assertEqual(self.check_rule(rule), False)
rule = self.build_rule('(DetectTime <= 2016-06-21T14:08:27Z)')
self.assertEqual(repr(rule), "COMPBINOP(VARIABLE('DetectTime') OP_LE DATETIME(datetime.datetime(2016, 6, 21, 14, 8, 27)))")
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('DetectTime < (utcnow() + 05:00:00)')
self.assertEqual(repr(rule), "COMPBINOP(VARIABLE('DetectTime') OP_LT MATHBINOP(FUNCTION(utcnow()) OP_PLUS TIMEDELTA(datetime.timedelta(0, 18000))))")
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('DetectTime > (utcnow() - 05:00:00)')
self.assertEqual(repr(rule), "COMPBINOP(VARIABLE('DetectTime') OP_GT MATHBINOP(FUNCTION(utcnow()) OP_MINUS TIMEDELTA(datetime.timedelta(0, 18000))))")
self.assertEqual(self.check_rule(rule), False)
rule = self.build_rule('(Source.IP4 == 188.14.166.39)')
self.assertEqual(repr(rule), "COMPBINOP(VARIABLE('Source.IP4') OP_EQ IPV4(IP4('188.14.166.39')))")
self.assertEqual(self.check_rule(rule), True)
rule = self.build_rule('(Source.IP4 in ["188.14.166.39","188.14.166.40","188.14.166.41"])')
self.assertEqual(repr(rule), "COMPBINOP(VARIABLE('Source.IP4') OP_IN IPLIST(IPV4(IP4('188.14.166.39')), IPV4(IP4('188.14.166.40')), IPV4(IP4('188.14.166.41'))))")
self.assertEqual(self.check_rule(rule), True)
# list with CIDR addresses
rule = self.build_rule('(Source.IP4 in ["188.14.166.0/24","10.0.0.0/8","189.14.166.41"])')
self.assertEqual(repr(rule), "COMPBINOP(VARIABLE('Source.IP4') OP_IN IPLIST(IPV4(IP4Net('188.14.166.0/24')), IPV4(IP4Net('10.0.0.0/8')), IPV4(IP4('189.14.166.41'))))")
self.assertEqual(self.check_rule(rule), True)
def test_06_shortcuts(self):
"""
Perform tests of shortcut methods.
"""
self.maxDiff = None
# Let the shortcut method initialize everything.
flt = DataObjectFilter(
parser = PynspectFilterParser,
compiler = IDEAFilterCompiler
)
rule = flt.prepare('(Source.IP4 == 188.14.166.39)')
self.assertEqual(repr(rule), "COMPBINOP(VARIABLE('Source.IP4') OP_EQ IPV4(IP4('188.14.166.39')))")
self.assertEqual(self.check_rule(rule), True)
# Create parser and compiler instances by hand, but register them into filter.
cpl = IDEAFilterCompiler()
psr = PynspectFilterParser()
psr.build()
flt = DataObjectFilter(
parser = psr,
compiler = cpl
)
rule = flt.prepare('(Source.IP4 == 188.14.166.39)')
self.assertEqual(repr(rule), "COMPBINOP(VARIABLE('Source.IP4') OP_EQ IPV4(IP4('188.14.166.39')))")
self.assertEqual(self.check_rule(rule), True)
#-------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
honzamach/pynspect
|
pynspect/tests/test_filters_idea.py
|
Python
|
mit
| 19,028
|
import math
import bitstring as bt
from research.coding.common import BitEncoder, BitDecoder
class Decoder(BitDecoder):
def decode(self):
return self.stream.read('ue')
class Encoder(BitEncoder):
def encode(self, n):
self.bit_stream.append(bt.pack('ue', n))
|
west-tandon/ReSearch
|
research/coding/golomb.py
|
Python
|
mit
| 287
|
from req import Service
from service.base import BaseService
from utils.form import form_validation
class SchoolService(BaseService):
def __init__(self, db, rs):
super().__init__(db, rs)
SchoolService.inst = self
def get_school_list(self):
res = yield self.db.execute('SELECT * FROM schools;')
return (None, res.fetchall())
def get_school(self, data={}):
required_args = [{
'name': '+id',
'type': int,
}]
err = form_validation(data, required_args)
if err: return (err, None)
res = yield self.db.execute('SELECT * FROM schools WHERE id=%s;', (data['id'],))
if res.rowcount == 0:
return ((404, 'No school ID'), None)
return (None, res.fetchone())
|
Tocknicsu/nctuoj
|
backend/service/school.py
|
Python
|
mit
| 784
|
"""
WSGI config for DevelopersShelf project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DevelopersShelf.settings")
application = get_wsgi_application()
|
hkdahal/DevelopersShelf
|
DevelopersShelf/wsgi.py
|
Python
|
mit
| 407
|
from mesa.visualization.ModularVisualization import ModularServer
from LabModel import LabModel
from CoffeMakerAgent import CoffeMakerAgent
from TVAgent import TVAgent
from AccessAgent import AccessAgent
from PC import PC
from DrawLabMapBackEnd import DrawLabMapBackEnd
from DrawLabMapBackEnd import RepresentationModule
from UserAgent import UserAgent
def agent_portrayal(agent):
portrayal = {"Shape": "circle",
"Filled": "true",
"Layer": 0,
"Color": "blue",
"r": 0.5}
if isinstance(agent, UserAgent) and agent.pos == (0, 2):
portrayal['Color'] = 'green'
if isinstance(agent, CoffeMakerAgent):
portrayal = {"Shape": "rect",
"Filled": "true",
"Layer": 0,
"Color": "black",
"text": agent.amount
}
if agent.amount == 0:
portrayal["text_color"] = "red"
else:
portrayal["text_color"] = "green"
if isinstance(agent, TVAgent):
portrayal = {"Shape": "rect",
"Filled": "false",
"Layer": 0,
"Color": "grey"
}
if agent.state == True:
portrayal["Color"] = "yellow"
if isinstance(agent, AccessAgent):
portrayal = {"Shape": "rect",
"Filled": "false",
"Layer": 0,
"Color": "black"
}
if isinstance(agent, PC):
portrayal = {"Shape": "rect",
"Filled": "true",
"Layer": 0,
"Color": "black",
"text": 'PC',
"text_color": 'blue'
}
if agent.state == 'on':
portrayal["Color"] = "yellow"
if agent.state == 'standby':
portrayal["Color"] = "grey"
return portrayal
grid = DrawLabMapBackEnd(agent_portrayal, 15, 18, 600, 600)
representation = RepresentationModule()
server = ModularServer(LabModel,
[grid, representation],
"Lab Model",
14, 17)
server.port = 8882
server.launch()
|
gsi-upm/soba
|
projects/oldProyects/EWESim/Visual.py
|
Python
|
mit
| 2,183
|
#!/bin/python
# -*- coding: utf-8 -*-
import urllib2
import re
import logging
def webpage_urlopen(url, crawl_timeout):
"""
webpage_urlopen - Function to get content from specific url.
Args:
url: The source url to be request.
crawl_timeout: The request timeout value.
Returns:
content: Response content of the urls.
"""
try:
page = urllib2.urlopen(url, timeout = crawl_timeout)
content = page.read()
regex = ur'meta.*charset=("?)(.*?)("|>)'
match = re.search(regex, content)
html_charset = 'utf-8' # default charset
if match:
html_charset = match.group(2)
else:
logging.warning("Fail to match charset Regex for url:%s, "
"using the default charset.", url)
return content
if html_charset == "gb2312" or html_charset == "GBK":
html_charset = "GB18030"
elif html_charset == "iso-8859-1":
html_charset = "latin-1"
return content.decode(html_charset).encode("utf-8")
except urllib2.HTTPError as e:
if e.code == 403:
logging.error("Fail to webpage_urlopen for url(%s) as "
"HTTPError(403-Forbidden): %s", url, e)
if e.code == 404:
logging.error("Fail to webpage_urlopen for url(%s) as "
"HTTPError(404-Not Found): %s", url, e)
if e.code == 500:
logging.error("Fail to webpage_urlopen for url(%s) as "
"HTTPError(500-Internal Server Error): %s", url, e)
else:
logging.error("Fail to webpage_urlopen for url(%s) as "
"HTTPError: %s", url, e)
except urllib2.URLError as e:
logging.error("Fail to webpage_urlopen for url(%s) as URLError: %s", url, e)
except IOError as e:
logging.error("Fail to webpage_urlopen for url(%s) as IOError: %s", url, e)
except Exception as e:
logging.error("Fail to webpage_urlopen for url(%s) as unknowException: %s", url, e)
return ""
|
fivezh/Keepgoing
|
py_spider/webpage_urlopen.py
|
Python
|
mit
| 2,074
|
# -*- coding: utf-8 -*-
from lode_runner import dataprovider
from unittest import TestCase
try:
from mock import patch, Mock
except ImportError:
from unittest.mock import patch, Mock
from tests.helpers import get_response_from_file, wait_for
from stf_utils.stf_connect.client import SmartphoneTestingFarmClient, STFConnectedDevicesWatcher
class TestSmartphoneTestingFarmClient(TestCase):
def setUp(self):
super(TestSmartphoneTestingFarmClient, self).setUp()
self.watcher = None
get_all_devices = get_response_from_file('get_all_devices.json')
get_device = get_response_from_file('get_device_x86.json')
remote_connect = get_response_from_file('remote_connect.json')
self.all_devices_mock = Mock(return_value=Mock(json=Mock(return_value=get_all_devices)))
self.get_device_mock = Mock(return_value=Mock(json=Mock(return_value=get_device)))
self.remote_connect_mock = Mock(return_value=Mock(json=Mock(return_value=remote_connect)))
def tearDown(self):
if self.watcher:
self.watcher.stop()
@dataprovider([
[
{
"group_name": "alfa",
"amount": "1",
"min_sdk": "16",
"max_sdk": "23",
"specs": {"abi": "x86", "platform": "Android"}
}
]
])
def test_connect_devices(self, device_spec):
"""
- set config with 1 device
- try to connect devices
Expected: 1 device connected and 1 device in connected_devices list
- stop stf-connect
Expected: 0 devices connected and lists of devices was empty
"""
with patch(
'stf_utils.common.stfapi.SmartphoneTestingFarmAPI.get_all_devices', self.all_devices_mock,
), patch(
'stf_utils.stf_connect.client.SmartphoneTestingFarmClient.get_device', self.get_device_mock,
), patch(
'stf_utils.stf_connect.client.SmartphoneTestingFarmClient.add_device', Mock(),
), patch(
'stf_utils.stf_connect.client.SmartphoneTestingFarmClient.remote_connect', self.remote_connect_mock,
), patch(
'stf_utils.stf_connect.client.SmartphoneTestingFarmClient.delete_device', Mock(),
), patch(
'stf_utils.stf_connect.client.SmartphoneTestingFarmClient.remote_disconnect', Mock(),
), patch(
'stf_utils.common.adb.device_is_ready', Mock(return_value=True)
), patch(
'stf_utils.common.adb.connect', Mock(return_value=True)
), patch(
'stf_utils.common.adb.disconnect', Mock(return_value=True)
):
stf = SmartphoneTestingFarmClient(
host="http://host.domain",
common_api_path="/api/v1",
oauth_token="test token",
device_spec=device_spec,
devices_file_path="./devices",
shutdown_emulator_on_disconnect=True
)
stf.connect_devices()
wait_for(lambda: self.assertTrue(stf.shutdown_emulator_on_disconnect))
wait_for(lambda: self.assertEqual(len(stf.device_groups[0].get("added_devices")), int(device_spec[0].get("amount"))))
wait_for(lambda: self.assertEqual(len(stf.device_groups[0].get("connected_devices")), int(device_spec[0].get("amount"))))
stf.close_all()
wait_for(lambda: self.assertEqual(len(stf.device_groups[0].get("added_devices")), 0))
wait_for(lambda: self.assertEqual(len(stf.device_groups[0].get("connected_devices")), 0))
@dataprovider([
[
{
"group_name": "alfa",
"amount": "1",
"specs": {"abi": "x86"}
}
]
])
def test_connect_new_device_after_device_lost(self, device_spec):
"""
- set config with 1 device
- try to connect devices
Expected: 1 device connected and 1 device in connected_devices list
- start devices watcher
- got 'False' in device_is_ready method (connected device is not available)
Expected: 0 devices connected and lists of devices was empty
(device was removed from stf-connect and device by adb was disconnected)
- try to connect available devices
Expected: 1 device connected and 1 device in connected_devices list
"""
def raise_exception():
raise Exception('something ugly happened in adb connect')
with patch(
'stf_utils.common.stfapi.SmartphoneTestingFarmAPI.get_all_devices', self.all_devices_mock,
), patch(
'stf_utils.stf_connect.client.SmartphoneTestingFarmClient.get_device', self.get_device_mock,
), patch(
'stf_utils.stf_connect.client.SmartphoneTestingFarmClient.add_device', Mock(),
), patch(
'stf_utils.stf_connect.client.SmartphoneTestingFarmClient.remote_connect', self.remote_connect_mock,
), patch(
'stf_utils.common.adb.device_is_ready', Mock(side_effect=[False, True, True])
), patch(
'stf_utils.common.adb.connect', Mock(side_effect=[True, raise_exception, True])
), patch(
'stf_utils.common.adb.disconnect', Mock(return_value=True)
):
stf = SmartphoneTestingFarmClient(
host="http://host.domain",
common_api_path="/api/v1",
oauth_token="test token",
device_spec=device_spec,
devices_file_path="./devices",
shutdown_emulator_on_disconnect=True
)
stf.connect_devices()
self.assertTrue(wait_for(lambda: len(stf.device_groups[0].get("added_devices")) == int(device_spec[0].get("amount"))))
self.assertTrue(wait_for(lambda: len(stf.device_groups[0].get("connected_devices")) == int(device_spec[0].get("amount"))))
self.watcher = STFConnectedDevicesWatcher(stf)
self.watcher.start()
self.assertTrue(wait_for(lambda: len(stf.device_groups[0].get("added_devices")) == 0))
self.assertTrue(wait_for(lambda: len(stf.device_groups[0].get("connected_devices")) == 0))
stf.connect_devices()
self.assertTrue(wait_for(lambda: stf.shutdown_emulator_on_disconnect))
self.assertTrue(wait_for(lambda: len(stf.device_groups[0].get("added_devices")) == int(device_spec[0].get("amount"))))
self.assertTrue(wait_for(lambda: len(stf.device_groups[0].get("connected_devices")) == int(device_spec[0].get("amount"))))
|
2gis/stf-utils
|
tests/test_stf_connect_client.py
|
Python
|
mit
| 6,649
|
#!/usr/bin python
# -*- coding: utf-8 -*-
"""
This file is part of the pyquaternion python module
Author: Kieran Wynn
Website: https://github.com/KieranWynn/pyquaternion
Documentation: http://kieranwynn.github.io/pyquaternion/
Version: 1.0.0
License: The MIT License (MIT)
Copyright (c) 2015 Kieran Wynn
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
test_quaternion.py - Unit test for quaternion module
"""
import unittest
from math import pi, sin, cos
from random import random
import numpy as np
import pyquaternion
Quaternion = pyquaternion.Quaternion
ALMOST_EQUAL_TOLERANCE = 13
def randomElements():
return tuple(np.random.uniform(-1, 1, 4))
class TestQuaternionInitialisation(unittest.TestCase):
def test_init_default(self):
q = Quaternion()
self.assertIsInstance(q, Quaternion)
self.assertEqual(q, Quaternion(1., 0., 0., 0.))
def test_init_copy(self):
q1 = Quaternion.random()
q2 = Quaternion(q1)
self.assertIsInstance(q2, Quaternion)
self.assertEqual(q2, q1)
with self.assertRaises(TypeError):
q3 = Quaternion(None)
with self.assertRaises(ValueError):
q4 = Quaternion("String")
def test_init_random(self):
r1 = Quaternion.random()
r2 = Quaternion.random()
self.assertAlmostEqual(r1.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
self.assertIsInstance(r1, Quaternion)
#self.assertNotEqual(r1, r2) #TODO, this *may* fail at random
def test_init_from_scalar(self):
s = random()
q1 = Quaternion(s)
q2 = Quaternion(repr(s))
self.assertIsInstance(q1, Quaternion)
self.assertIsInstance(q2, Quaternion)
self.assertEqual(q1, Quaternion(s, 0.0, 0.0, 0.0))
self.assertEqual(q2, Quaternion(s, 0.0, 0.0, 0.0))
with self.assertRaises(TypeError):
q = Quaternion(None)
with self.assertRaises(ValueError):
q = Quaternion("String")
def test_init_from_elements(self):
a, b, c, d = randomElements()
q1 = Quaternion(a, b, c, d)
q2 = Quaternion(repr(a), repr(b), repr(c), repr(d))
q3 = Quaternion(a, repr(b), c, d)
self.assertIsInstance(q1, Quaternion)
self.assertIsInstance(q2, Quaternion)
self.assertIsInstance(q3, Quaternion)
self.assertTrue(np.array_equal(q1.q, [a, b, c, d]))
self.assertEqual(q1, q2)
self.assertEqual(q2, q3)
with self.assertRaises(TypeError):
q = Quaternion(None, b, c, d)
with self.assertRaises(ValueError):
q = Quaternion(a, b, "String", d)
with self.assertRaises(ValueError):
q = Quaternion(a, b, c)
with self.assertRaises(ValueError):
q = Quaternion(a, b, c, d, random())
def test_init_from_array(self):
r = randomElements()
a = np.array(r)
q = Quaternion(a)
self.assertIsInstance(q, Quaternion)
self.assertEqual(q, Quaternion(*r))
with self.assertRaises(ValueError):
q = Quaternion(a[1:4]) # 3-vector
with self.assertRaises(ValueError):
q = Quaternion(np.hstack((a, a))) # 8-vector
with self.assertRaises(ValueError):
q = Quaternion(np.array([a, a])) # 2x4-
with self.assertRaises(TypeError):
q = Quaternion(np.array([None, None, None, None]))
def test_init_from_tuple(self):
t = randomElements()
q = Quaternion(t)
self.assertIsInstance(q, Quaternion)
self.assertEqual(q, Quaternion(*t))
with self.assertRaises(ValueError):
q = Quaternion(t[1:4]) # 3-tuple
with self.assertRaises(ValueError):
q = Quaternion(t + t) # 8-tuple
with self.assertRaises(ValueError):
q = Quaternion((t, t)) # 2x4-tuple
with self.assertRaises(TypeError):
q = Quaternion((None, None, None, None))
def test_init_from_list(self):
r = randomElements()
l = list(r)
q = Quaternion(l)
self.assertIsInstance(q, Quaternion)
self.assertEqual(q, Quaternion(*l))
with self.assertRaises(ValueError):
q = Quaternion(l[1:4]) # 3-list
with self.assertRaises(ValueError):
q = Quaternion(l + l) # 8-list
with self.assertRaises(ValueError):
q = Quaternion((l, l)) # 2x4-list
with self.assertRaises(TypeError):
q = Quaternion([None, None, None, None])
def test_init_from_explicit_elements(self):
e1, e2, e3, e4 = randomElements()
q1 = Quaternion(w=e1, x=e2, y=e3, z=e4)
q2 = Quaternion(a=e1, b=repr(e2), c=e3, d=e4)
q3 = Quaternion(a=e1, i=e2, j=e3, k=e4)
q4 = Quaternion(a=e1)
self.assertIsInstance(q1, Quaternion)
self.assertIsInstance(q2, Quaternion)
self.assertIsInstance(q3, Quaternion)
self.assertIsInstance(q4, Quaternion)
self.assertEqual(q1, Quaternion(e1, e2, e3, e4))
self.assertEqual(q1, q2)
self.assertEqual(q2, q3)
self.assertEqual(q4, Quaternion(e1))
with self.assertRaises(TypeError):
q = Quaternion(a=None, b=e2, c=e3, d=e4)
with self.assertRaises(ValueError):
q = Quaternion(a=e1, b=e2, c="String", d=e4)
with self.assertRaises(ValueError):
q = Quaternion(w=e1, x=e2)
with self.assertRaises(ValueError):
q = Quaternion(a=e1, b=e2, c=e3, d=e4, e=e1)
def test_init_from_explicit_component(self):
a, b, c, d = randomElements()
# Using 'real' & 'imaginary' notation
q1 = Quaternion(real=a, imaginary=(b, c, d))
q2 = Quaternion(real=a, imaginary=[b, c, d])
q3 = Quaternion(real=a, imaginary=np.array([b, c, d]))
q4 = Quaternion(real=a)
q5 = Quaternion(imaginary=np.array([b, c, d]))
q6 = Quaternion(real=None, imaginary=np.array([b, c, d]))
self.assertIsInstance(q1, Quaternion)
self.assertIsInstance(q2, Quaternion)
self.assertIsInstance(q3, Quaternion)
self.assertIsInstance(q4, Quaternion)
self.assertIsInstance(q5, Quaternion)
self.assertIsInstance(q6, Quaternion)
self.assertEqual(q1, Quaternion(a, b, c, d))
self.assertEqual(q1, q2)
self.assertEqual(q2, q3)
self.assertEqual(q4, Quaternion(a, 0, 0, 0))
self.assertEqual(q5, Quaternion(0, b, c, d))
self.assertEqual(q5, q6)
with self.assertRaises(ValueError):
q = Quaternion(real=a, imaginary=[b, c])
with self.assertRaises(ValueError):
q = Quaternion(real=a, imaginary=(b, c, d, d))
# Using 'scalar' & 'vector' notation
q1 = Quaternion(scalar=a, vector=(b, c, d))
q2 = Quaternion(scalar=a, vector=[b, c, d])
q3 = Quaternion(scalar=a, vector=np.array([b, c, d]))
q4 = Quaternion(scalar=a)
q5 = Quaternion(vector=np.array([b, c, d]))
q6 = Quaternion(scalar=None, vector=np.array([b, c, d]))
self.assertIsInstance(q1, Quaternion)
self.assertIsInstance(q2, Quaternion)
self.assertIsInstance(q3, Quaternion)
self.assertIsInstance(q4, Quaternion)
self.assertIsInstance(q5, Quaternion)
self.assertIsInstance(q6, Quaternion)
self.assertEqual(q1, Quaternion(a, b, c, d))
self.assertEqual(q1, q2)
self.assertEqual(q2, q3)
self.assertEqual(q4, Quaternion(a, 0, 0, 0))
self.assertEqual(q5, Quaternion(0, b, c, d))
self.assertEqual(q5, q6)
with self.assertRaises(ValueError):
q = Quaternion(scalar=a, vector=[b, c])
with self.assertRaises(ValueError):
q = Quaternion(scalar=a, vector=(b, c, d, d))
def test_init_from_explicit_rotation_params(self):
vx = random()
vy = random()
vz = random()
theta = random() * 2.0 * pi
v1 = (vx, vy, vz) # tuple format
v2 = [vx, vy, vz] # list format
v3 = np.array(v2) # array format
q1 = Quaternion(axis=v1, angle=theta)
q2 = Quaternion(axis=v2, radians=theta)
q3 = Quaternion(axis=v3, degrees=theta / pi * 180)
# normalise v to a unit vector
v3 = v3 / np.linalg.norm(v3)
q4 = Quaternion(angle=theta, axis=v3)
# Construct the true quaternion
t = theta / 2.0
a = cos(t)
b = v3[0] * sin(t)
c = v3[1] * sin(t)
d = v3[2] * sin(t)
truth = Quaternion(a, b, c, d)
self.assertEqual(q1, truth)
self.assertEqual(q2, truth)
self.assertEqual(q3, truth)
self.assertEqual(q4, truth)
self.assertEqual(Quaternion(axis=v3, angle=0), Quaternion())
self.assertEqual(Quaternion(axis=v3, radians=0), Quaternion())
self.assertEqual(Quaternion(axis=v3, degrees=0), Quaternion())
self.assertEqual(Quaternion(axis=v3), Quaternion())
# Result should be a versor (Unit Quaternion)
self.assertAlmostEqual(q1.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
self.assertAlmostEqual(q2.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
self.assertAlmostEqual(q3.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
self.assertAlmostEqual(q4.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
with self.assertRaises(ValueError):
q = Quaternion(angle=theta)
with self.assertRaises(ValueError):
q = Quaternion(axis=[b, c], angle=theta)
with self.assertRaises(ValueError):
q = Quaternion(axis=(b, c, d, d), angle=theta)
with self.assertRaises(ZeroDivisionError):
q = Quaternion(axis=[0., 0., 0.], angle=theta)
def test_init_from_explicit_matrix(self):
def R_z(theta):
"""
Generate a rotation matrix describing a rotation of theta degrees about the z-axis
"""
c = cos(theta)
s = sin(theta)
return np.array([
[c,-s, 0],
[s, c, 0],
[0, 0, 1]])
v = np.array([1, 0, 0])
for angle in [0, pi/6, pi/4, pi/2, pi, 4*pi/3, 3*pi/2, 2*pi]:
R = R_z(angle) # rotation matrix describing rotation of 90 about +z
v_prime_r = np.dot(R, v)
q1 = Quaternion(axis=[0,0,1], angle=angle)
v_prime_q1 = q1.rotate(v)
np.testing.assert_almost_equal(v_prime_r, v_prime_q1, decimal=ALMOST_EQUAL_TOLERANCE)
q2 = Quaternion(matrix=R)
v_prime_q2 = q2.rotate(v)
np.testing.assert_almost_equal(v_prime_q2, v_prime_r, decimal=ALMOST_EQUAL_TOLERANCE)
R = np.matrix(np.eye(3))
q3 = Quaternion(matrix=R)
v_prime_q3 = q3.rotate(v)
np.testing.assert_almost_equal(v, v_prime_q3, decimal=ALMOST_EQUAL_TOLERANCE)
self.assertEqual(q3, Quaternion())
R[0,1] += 3 # introduce error to make matrix non-orthogonal
with self.assertRaises(ValueError):
q4 = Quaternion(matrix=R)
def test_init_from_explicit_matrix_with_optional_tolerance_arguments(self):
"""
The matrix defined in this test is orthogonal was carefully crafted
such that it's orthogonal to a precision of 1e-07, but not to a precision
of 1e-08. The default value for numpy's atol function is 1e-08, but
developers should have the option to use a lower precision if they choose
to.
Reference: https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html
"""
m = [[ 0.73297226, -0.16524626, -0.65988294, -0.07654548],
[ 0.13108627, 0.98617666, -0.10135052, -0.04878795],
[ 0.66750896, -0.01221443, 0.74450167, -0.05474513],
[ 0, 0, 0, 1, ]]
npm = np.matrix(m)
with self.assertRaises(ValueError):
Quaternion(matrix=npm)
try:
Quaternion(matrix=npm, atol=1e-07)
except ValueError:
self.fail("Quaternion() raised ValueError unexpectedly!")
def test_init_from_explicit_arrray(self):
r = randomElements()
a = np.array(r)
q = Quaternion(array=a)
self.assertIsInstance(q, Quaternion)
self.assertEqual(q, Quaternion(*r))
with self.assertRaises(ValueError):
q = Quaternion(array=a[1:4]) # 3-vector
with self.assertRaises(ValueError):
q = Quaternion(array=np.hstack((a, a))) # 8-vector
with self.assertRaises(ValueError):
q = Quaternion(array=np.array([a, a])) # 2x4-matrix
with self.assertRaises(TypeError):
q = Quaternion(array=np.array([None, None, None, None]))
def test_equivalent_initialisations(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
self.assertEqual(q, Quaternion(q))
self.assertEqual(q, Quaternion(np.array([a, b, c, d])))
self.assertEqual(q, Quaternion((a, b, c, d)))
self.assertEqual(q, Quaternion([a, b, c, d]))
self.assertEqual(q, Quaternion(w=a, x=b, y=c, z=d))
self.assertEqual(q, Quaternion(array=np.array([a, b, c, d])))
class TestQuaternionRepresentation(unittest.TestCase):
def test_str(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
string = "{:.3f} {:+.3f}i {:+.3f}j {:+.3f}k".format(a, b, c, d)
self.assertEqual(string, str(q))
def test_format(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
for s in ['.3f', '+.14f', '.6e', 'g']:
individual_fmt = '{:' + s + '} {:' + s + '}i {:' + s + '}j {:' + s + '}k'
quaternion_fmt = '{:' + s + '}'
self.assertEqual(individual_fmt.format(a, b, c, d), quaternion_fmt.format(q))
def test_repr(self):
a, b, c, d = np.array(randomElements()) # Numpy seems to increase precision of floats (C magic?)
q = Quaternion(a, b, c, d)
string = "Quaternion(" + repr(a) + ", " + repr(b) + ", " + repr(c) + ", " + repr(d) + ")"
self.assertEqual(string, repr(q))
class TestQuaternionTypeConversions(unittest.TestCase):
def test_bool(self):
self.assertTrue(Quaternion())
self.assertFalse(Quaternion(scalar=0.0))
self.assertTrue(~Quaternion(scalar=0.0))
self.assertFalse(~Quaternion())
def test_float(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
self.assertEqual(float(q), a)
def test_int(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
self.assertEqual(int(q), int(a))
self.assertEqual(int(Quaternion(6.28)), 6)
self.assertEqual(int(Quaternion(6.78)), 6)
self.assertEqual(int(Quaternion(-4.87)), -4)
self.assertEqual(int(round(float(Quaternion(-4.87)))), -5)
def test_complex(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
self.assertEqual(complex(q), complex(a, b))
class TestQuaternionArithmetic(unittest.TestCase):
def test_equality(self):
r = randomElements()
self.assertEqual(Quaternion(*r), Quaternion(*r))
q = Quaternion(*r)
self.assertEqual(q, q)
# Equality should work with other types, if they can be interpreted as quaternions
self.assertEqual(q, r)
self.assertEqual(Quaternion(1., 0., 0., 0.), 1.0)
self.assertEqual(Quaternion(1., 0., 0., 0.), "1.0")
self.assertNotEqual(q, q + Quaternion(0.0, 0.002, 0.0, 0.0))
# Equality should also cover small rounding and floating point errors
self.assertEqual(Quaternion(1., 0., 0., 0.), Quaternion(1.0 - 1e-14, 0., 0., 0.))
self.assertNotEqual(Quaternion(1., 0., 0., 0.), Quaternion(1.0 - 1e-12, 0., 0., 0.))
self.assertNotEqual(Quaternion(160., 0., 0., 0.), Quaternion(160.0 - 1e-10, 0., 0., 0.))
self.assertNotEqual(Quaternion(1600., 0., 0., 0.), Quaternion(1600.0 - 1e-9, 0., 0., 0.))
with self.assertRaises(TypeError):
q == None
with self.assertRaises(ValueError):
q == 's'
def test_assignment(self):
a, b, c, d = randomElements()
q1 = Quaternion(a, b, c, d)
q2 = Quaternion(a, b*0.1, c+0.3, d)
self.assertNotEqual(q1, q2)
q2 = q1
self.assertEqual(q1, q2)
def test_unary_minus(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
self.assertEqual(-q, Quaternion(-a, -b, -c, -d))
def test_add(self):
r1 = randomElements()
r2 = randomElements()
r = random()
n = None
q1 = Quaternion(*r1)
q2 = Quaternion(*r2)
q3 = Quaternion(array= np.array(r1) + np.array(r2))
q4 = Quaternion(array= np.array(r2) + np.array([r, 0.0, 0.0, 0.0]))
self.assertEqual(q1 + q2, q3)
q1 += q2
self.assertEqual(q1, q3)
self.assertEqual(q2 + r, q4)
self.assertEqual(r + q2, q4)
with self.assertRaises(TypeError):
q1 += n
with self.assertRaises(TypeError):
n += q1
def test_subtract(self):
r1 = randomElements()
r2 = randomElements()
r = random()
n = None
q1 = Quaternion(*r1)
q2 = Quaternion(*r2)
q3 = Quaternion(array= np.array(r1) - np.array(r2))
q4 = Quaternion(array= np.array(r2) - np.array([r, 0.0, 0.0, 0.0]))
self.assertEqual(q1 - q2, q3)
q1 -= q2
self.assertEqual(q1, q3)
self.assertEqual(q2 - r, q4)
self.assertEqual(r - q2, -q4)
with self.assertRaises(TypeError):
q1 -= n
with self.assertRaises(TypeError):
n -= q1
def test_multiplication_of_bases(self):
one = Quaternion(1.0, 0.0, 0.0, 0.0)
i = Quaternion(0.0, 1.0, 0.0, 0.0)
j = Quaternion(0.0, 0.0, 1.0, 0.0)
k = Quaternion(0.0, 0.0, 0.0, 1.0)
self.assertEqual(i * i, j * j)
self.assertEqual(j * j, k * k)
self.assertEqual(k * k, i * j * k)
self.assertEqual(i * j * k, -one)
self.assertEqual(i * j, k)
self.assertEqual(i * i, -one)
self.assertEqual(i * k, -j)
self.assertEqual(j * i, -k)
self.assertEqual(j * j, -one)
self.assertEqual(j * k, i)
self.assertEqual(k * i, j)
self.assertEqual(k * j, -i)
self.assertEqual(k * k, -one)
self.assertEqual(i * j * k, -one)
def test_multiply_by_scalar(self):
a, b, c, d = randomElements()
q1 = Quaternion(a, b, c, d)
for s in [30.0, 0.3, -2, -4.7, 0]:
q2 = Quaternion(s*a, s*b, s*c, s*d)
q3 = q1
self.assertEqual(q1 * s, q2) # post-multiply by scalar
self.assertEqual(s * q1, q2) # pre-multiply by scalar
q3 *= repr(s)
self.assertEqual(q3, q2)
def test_multiply_incorrect_type(self):
q = Quaternion()
with self.assertRaises(TypeError):
a = q * None
with self.assertRaises(ValueError):
b = q * [1, 1, 1, 1, 1]
with self.assertRaises(ValueError):
c = q * np.array([[1, 2, 3], [4, 5, 6]])
with self.assertRaises(ValueError):
d = q * 's'
def test_divide(self):
r = randomElements()
q = Quaternion(*r)
if q:
self.assertEqual(q / q, Quaternion())
self.assertEqual(q / r, Quaternion())
else:
with self.assertRaises(ZeroDivisionError):
q / q
with self.assertRaises(ZeroDivisionError):
q / Quaternion(0.0)
with self.assertRaises(TypeError):
q / None
with self.assertRaises(ValueError):
q / [1, 1, 1, 1, 1]
with self.assertRaises(ValueError):
q / np.array([[1, 2, 3], [4, 5, 6]])
with self.assertRaises(ValueError):
q / 's'
def test_division_of_bases(self):
one = Quaternion(1.0, 0.0, 0.0, 0.0)
i = Quaternion(0.0, 1.0, 0.0, 0.0)
j = Quaternion(0.0, 0.0, 1.0, 0.0)
k = Quaternion(0.0, 0.0, 0.0, 1.0)
self.assertEqual(i / i, j / j)
self.assertEqual(j / j, k / k)
self.assertEqual(k / k, one)
self.assertEqual(k / -k, -one)
self.assertEqual(i / j, -k)
self.assertEqual(i / i, one)
self.assertEqual(i / k, j)
self.assertEqual(j / i, k)
self.assertEqual(j / j, one)
self.assertEqual(j / k, -i)
self.assertEqual(k / i, -j)
self.assertEqual(k / j, i)
self.assertEqual(k / k, one)
self.assertEqual(i / -j, k)
def test_divide_by_scalar(self):
a, b, c, d = randomElements()
q1 = Quaternion(a, b, c, d)
for s in [30.0, 0.3, -2, -4.7]:
q2 = Quaternion(a/s, b/s, c/s, d/s)
q3 = q1
self.assertEqual(q1 / s, q2)
if q1:
self.assertEqual(s / q1, q2.inverse)
else:
with self.assertRaises(ZeroDivisionError):
s / q1
q3 /= repr(s)
self.assertEqual(q3, q2)
with self.assertRaises(ZeroDivisionError):
q4 = q1 / 0.0
with self.assertRaises(TypeError):
q4 = q1 / None
with self.assertRaises(ValueError):
q4 = q1 / 's'
def test_squared(self):
one = Quaternion(1.0, 0.0, 0.0, 0.0)
i = Quaternion(0.0, 1.0, 0.0, 0.0)
j = Quaternion(0.0, 0.0, 1.0, 0.0)
k = Quaternion(0.0, 0.0, 0.0, 1.0)
self.assertEqual(i**2, j**2)
self.assertEqual(j**2, k**2)
self.assertEqual(k**2, -one)
def test_power(self):
q1 = Quaternion.random()
q2 = Quaternion(q1)
self.assertEqual(q1 ** 0, Quaternion())
self.assertEqual(q1 ** 1, q1)
q2 **= 4
self.assertEqual(q2, q1 * q1 * q1 * q1)
self.assertEqual((q1 ** 0.5) * (q1 ** 0.5), q1)
self.assertEqual(q1 ** -1, q1.inverse)
self.assertEqual(4 ** Quaternion(2), Quaternion(16))
with self.assertRaises(TypeError):
q1 ** None
with self.assertRaises(ValueError):
q1 ** 's'
q3 = Quaternion()
self.assertEqual(q3 ** 0.5, q3) # Identity behaves as an identity
self.assertEqual(q3 ** 5, q3)
self.assertEqual(q3 ** 3.4, q3)
q4 = Quaternion(scalar=5) # real number behaves as any other real number would
self.assertEqual(q4 ** 4, Quaternion(scalar=5 ** 4))
def test_distributive(self):
q1 = Quaternion.random()
q2 = Quaternion.random()
q3 = Quaternion.random()
self.assertEqual(q1 * ( q2 + q3 ), q1 * q2 + q1 * q3)
def test_noncommutative(self):
q1 = Quaternion.random()
q2 = Quaternion.random()
if not q1 == q2: # Small chance of this happening with random initialisation
self.assertNotEqual(q1 * q2, q2 * q1)
class TestQuaternionFeatures(unittest.TestCase):
def test_conjugate(self):
a, b, c, d = randomElements()
q1 = Quaternion(a, b, c, d)
q2 = Quaternion.random()
self.assertEqual(q1.conjugate, Quaternion(a, -b, -c, -d))
self.assertEqual((q1 * q2).conjugate, q2.conjugate * q1.conjugate)
self.assertEqual((q1 + q1.conjugate) / 2, Quaternion(scalar=q1.scalar))
self.assertEqual((q1 - q1.conjugate) / 2, Quaternion(vector=q1.vector))
def test_double_conjugate(self):
q = Quaternion.random()
self.assertEqual(q, q.conjugate.conjugate)
def test_norm(self):
r = randomElements()
q1 = Quaternion(*r)
q2 = Quaternion.random()
self.assertEqual(q1.norm, np.linalg.norm(np.array(r)))
self.assertEqual(q1.magnitude, np.linalg.norm(np.array(r)))
# Multiplicative norm
self.assertAlmostEqual((q1 * q2).norm, q1.norm * q2.norm, ALMOST_EQUAL_TOLERANCE)
# Scaled norm
for s in [30.0, 0.3, -2, -4.7]:
self.assertAlmostEqual((q1 * s).norm, q1.norm * abs(s), ALMOST_EQUAL_TOLERANCE)
def test_inverse(self):
q1 = Quaternion(randomElements())
q2 = Quaternion.random()
if q1:
self.assertEqual(q1 * q1.inverse, Quaternion(1.0, 0.0, 0.0, 0.0))
else:
with self.assertRaises(ZeroDivisionError):
q1 * q1.inverse
self.assertEqual(q2 * q2.inverse, Quaternion(1.0, 0.0, 0.0, 0.0))
def test_normalisation(self): # normalise to unit quaternion
r = randomElements()
q1 = Quaternion(*r)
v = q1.unit
n = q1.normalised
if q1 == Quaternion(0): # small chance with random generation
return # a 0 quaternion does not normalise
# Test normalised objects are unit quaternions
np.testing.assert_almost_equal(v.q, q1.elements / q1.norm, decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(n.q, q1.elements / q1.norm, decimal=ALMOST_EQUAL_TOLERANCE)
self.assertAlmostEqual(v.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
self.assertAlmostEqual(n.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
# Test axis and angle remain the same
np.testing.assert_almost_equal(q1.axis, v.axis, decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(q1.axis, n.axis, decimal=ALMOST_EQUAL_TOLERANCE)
self.assertAlmostEqual(q1.angle, v.angle, ALMOST_EQUAL_TOLERANCE)
self.assertAlmostEqual(q1.angle, n.angle, ALMOST_EQUAL_TOLERANCE)
# Test special case where q is zero
q2 = Quaternion(0)
self.assertEqual(q2, q2.normalised)
def test_is_unit(self):
q1 = Quaternion()
q2 = Quaternion(1.0, 0, 0, 0.0001)
self.assertTrue(q1.is_unit())
self.assertFalse(q2.is_unit())
self.assertTrue(q2.is_unit(0.001))
def test_q_matrix(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
M = np.array([
[a, -b, -c, -d],
[b, a, -d, c],
[c, d, a, -b],
[d, -c, b, a]])
self.assertTrue(np.array_equal(q._q_matrix(), M))
def test_q_bar_matrix(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
M = np.array([
[a, -b, -c, -d],
[b, a, d, -c],
[c, -d, a, b],
[d, c, -b, a]])
self.assertTrue(np.array_equal(q._q_bar_matrix(), M))
def test_output_of_components(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
# Test scalar
self.assertEqual(q.scalar, a)
self.assertEqual(q.real, a)
# Test vector
self.assertTrue(np.array_equal(q.vector, [b, c, d]))
self.assertTrue(np.array_equal(q.imaginary, [b, c, d]))
self.assertEqual(tuple(q.vector), (b, c, d))
self.assertEqual(list(q.imaginary), [b, c, d])
self.assertEqual(q.w, a)
self.assertEqual(q.x, b)
self.assertEqual(q.y, c)
self.assertEqual(q.z, d)
def test_output_of_elements(self):
r = randomElements()
q = Quaternion(*r)
self.assertEqual(tuple(q.elements), r)
def test_element_access(self):
r = randomElements()
q = Quaternion(*r)
self.assertEqual(q[0], r[0])
self.assertEqual(q[1], r[1])
self.assertEqual(q[2], r[2])
self.assertEqual(q[3], r[3])
self.assertEqual(q[-1], r[3])
self.assertEqual(q[-4], r[0])
with self.assertRaises(TypeError):
q[None]
with self.assertRaises(IndexError):
q[4]
with self.assertRaises(IndexError):
q[-5]
def test_element_assignment(self):
q = Quaternion()
self.assertEqual(q[1], 0.0)
q[1] = 10.0
self.assertEqual(q[1], 10.0)
self.assertEqual(q, Quaternion(1.0, 10.0, 0.0, 0.0))
with self.assertRaises(TypeError):
q[2] = None
with self.assertRaises(ValueError):
q[2] = 's'
def test_rotate(self):
q = Quaternion(axis=[1,1,1], angle=2*pi/3)
q2 = Quaternion(axis=[1, 0, 0], angle=-pi)
q3 = Quaternion(axis=[1, 0, 0], angle=pi)
precision = ALMOST_EQUAL_TOLERANCE
for r in [1, 3.8976, -69.7, -0.000001]:
# use np.testing.assert_almost_equal() to compare float sequences
np.testing.assert_almost_equal(q.rotate((r, 0, 0)), (0, r, 0), decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(q.rotate([0, r, 0]), [0, 0, r], decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(q.rotate(np.array([0, 0, r])), np.array([r, 0, 0]), decimal=ALMOST_EQUAL_TOLERANCE)
self.assertEqual(q.rotate(Quaternion(vector=[-r, 0, 0])), Quaternion(vector=[0, -r, 0]))
np.testing.assert_almost_equal(q.rotate([0, -r, 0]), [0, 0, -r], decimal=ALMOST_EQUAL_TOLERANCE)
self.assertEqual(q.rotate(Quaternion(vector=[0, 0, -r])), Quaternion(vector=[-r, 0, 0]))
np.testing.assert_almost_equal(q2.rotate((r, 0, 0)), q3.rotate((r, 0, 0)), decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(q2.rotate((0, r, 0)), q3.rotate((0, r, 0)), decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(q2.rotate((0, 0, r)), q3.rotate((0, 0, r)), decimal=ALMOST_EQUAL_TOLERANCE)
def test_conversion_to_matrix(self):
q = Quaternion.random()
a, b, c, d = tuple(q.elements)
R = np.array([
[a**2 + b**2 - c**2 - d**2, 2 * (b * c - a * d), 2 * (a * c + b * d)],
[2 * (b * c + a * d), a**2 - b**2 + c**2 - d**2, 2 * (c * d - a * b)],
[2 * (b * d - a * c), 2 * (a * b + c * d), a**2 - b**2 - c**2 + d**2]])
t = np.array([[0],[0],[0]])
T = np.vstack([np.hstack([R,t]), np.array([0,0,0,1])])
np.testing.assert_almost_equal(R, q.rotation_matrix, decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(T, q.transformation_matrix, decimal=ALMOST_EQUAL_TOLERANCE)
# Test no scaling of rotated vectors
v1 = np.array([1, 0, 0])
v2 = np.hstack((np.random.uniform(-10, 10, 3), 1.0))
v1_ = np.dot(q.rotation_matrix, v1)
v2_ = np.dot(q.transformation_matrix, v2)
self.assertAlmostEqual(np.linalg.norm(v1_), 1.0, ALMOST_EQUAL_TOLERANCE)
self.assertAlmostEqual(np.linalg.norm(v2_), np.linalg.norm(v2), ALMOST_EQUAL_TOLERANCE)
# Test transformation of vectors is equivalent for quaternion & matrix
np.testing.assert_almost_equal(v1_, q.rotate(v1), decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(v2_[0:3], q.rotate(v2[0:3]), decimal=ALMOST_EQUAL_TOLERANCE)
def test_conversion_to_ypr(self):
def R_x(theta):
c = cos(theta)
s = sin(theta)
return np.array([
[1, 0, 0],
[0, c,-s],
[0, s, c]])
def R_y(theta):
c = cos(theta)
s = sin(theta)
return np.array([
[ c, 0, s],
[ 0, 1, 0],
[-s, 0, c]])
def R_z(theta):
c = cos(theta)
s = sin(theta)
return np.array([
[ c,-s, 0],
[ s, c, 0],
[ 0, 0, 1]])
p = np.random.randn(3)
q = Quaternion.random()
yaw, pitch, roll = q.yaw_pitch_roll
p_q = q.rotate(p)
R_q = q.rotation_matrix
# build rotation matrix, R = R_z(yaw)*R_y(pitch)*R_x(roll)
R_ypr = np.dot(R_x(roll), np.dot(R_y(pitch), R_z(yaw)))
p_ypr = np.dot(R_ypr, p)
np.testing.assert_almost_equal(p_q , p_ypr, decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(R_q , R_ypr, decimal=ALMOST_EQUAL_TOLERANCE)
def test_matrix_io(self):
v = np.random.uniform(-100, 100, 3)
for i in range(10):
q0 = Quaternion.random()
R = q0.rotation_matrix
q1 = Quaternion(matrix=R)
np.testing.assert_almost_equal(q0.rotate(v), np.dot(R, v), decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(q0.rotate(v), q1.rotate(v), decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(q1.rotate(v), np.dot(R, v), decimal=ALMOST_EQUAL_TOLERANCE)
self.assertTrue((q0 == q1) or (q0 == -q1)) # q1 and -q1 are equivalent rotations
def validate_axis_angle(self, axis, angle):
def wrap_angle(theta):
""" Wrap any angle to lie between -pi and pi
Odd multiples of pi are wrapped to +pi (as opposed to -pi)
"""
result = ((theta + pi) % (2*pi)) - pi
if result == -pi: result = pi
return result
theta = wrap_angle(angle)
v = axis
q = Quaternion(angle=theta, axis=v)
v_ = q.axis
theta_ = q.angle
if theta == 0.0: # axis is irrelevant (check defaults to x=y=z)
np.testing.assert_almost_equal(theta_, 0.0, decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(v_, np.zeros(3), decimal=ALMOST_EQUAL_TOLERANCE)
return
elif abs(theta) == pi: # rotation in either direction is equivalent
self.assertTrue(
np.isclose(theta, pi) or np.isclose(theta, -pi)
and
np.isclose(v, v_).all() or np.isclose(v, -v_).all()
)
else:
self.assertTrue(
np.isclose(theta, theta_) and np.isclose(v, v_).all()
or
np.isclose(theta, -theta_) and np.isclose(v, -v_).all()
)
# Ensure the returned axis is a unit vector
np.testing.assert_almost_equal(np.linalg.norm(v_), 1.0, decimal=ALMOST_EQUAL_TOLERANCE)
def test_conversion_to_axis_angle(self):
random_axis = np.random.uniform(-1, 1, 3)
random_axis /= np.linalg.norm(random_axis)
angles = np.array([-3, -2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2, 3]) * pi
axes = [np.array([1, 0, 0]), np.array([0, 1, 0]), np.array([0, 0, 1]), random_axis]
for v in axes:
for theta in angles:
self.validate_axis_angle(v, theta)
def test_axis_angle_io(self):
for i in range(20):
v = np.random.uniform(-1, 1, 3)
v /= np.linalg.norm(v)
theta = float(np.random.uniform(-2,2, 1)) * pi
self.validate_axis_angle(v, theta)
def test_exp(self):
from math import exp
q = Quaternion(axis=[1,0,0], angle=pi)
exp_q = Quaternion.exp(q)
self.assertEqual(exp_q, exp(0) * Quaternion(scalar=cos(1.0), vector=[sin(1.0), 0,0]))
def test_log(self):
from math import log
q = Quaternion(axis=[1,0,0], angle=pi)
log_q = Quaternion.log(q)
self.assertEqual(log_q, Quaternion(scalar=0, vector=[pi/2,0,0]))
def test_distance(self):
q = Quaternion(scalar=0, vector=[1,0,0])
p = Quaternion(scalar=0, vector=[0,1,0])
self.assertEqual(pi/2, Quaternion.distance(q,p))
q = Quaternion(angle=pi/2, axis=[1,0,0])
p = Quaternion(angle=pi/2, axis=[0,1,0])
self.assertEqual(pi/3, Quaternion.distance(q,p))
q = Quaternion(scalar=1, vector=[1,1,1])
p = Quaternion(scalar=-1, vector=[-1,-1,-1])
p._normalise()
q._normalise()
self.assertAlmostEqual(0, Quaternion.distance(q,p), places=8)
def test_absolute_distance(self):
q = Quaternion(scalar=0, vector=[1,0,0])
p = Quaternion(scalar=0, vector=[0,1,0])
self.assertEqual((q-p).norm, Quaternion.absolute_distance(q,p))
q = Quaternion(angle=pi/2, axis=[1,0,0])
p = Quaternion(angle=pi/2, axis=[0,1,0])
self.assertEqual((q-p).norm, Quaternion.absolute_distance(q,p))
q = Quaternion(scalar=0, vector=[1,0,0])
p = Quaternion(scalar=-1, vector=[0,-1,0])
self.assertEqual((q+p).norm, Quaternion.absolute_distance(q,p))
q = Quaternion(scalar=1, vector=[1,1,1])
p = Quaternion(scalar=-1, vector=[-1,-1,-1])
p._normalise()
q._normalise()
self.assertAlmostEqual(0, Quaternion.absolute_distance(q,p), places=8)
def test_sym_distance(self):
q = Quaternion(scalar=0, vector=[1,0,0])
p = Quaternion(scalar=0, vector=[0,1,0])
self.assertEqual(pi/2, Quaternion.sym_distance(q,p))
q = Quaternion(angle=pi/2, axis=[1,0,0])
p = Quaternion(angle=pi/2, axis=[0,1,0])
self.assertAlmostEqual(pi/3, Quaternion.sym_distance(q,p), places=6)
q = Quaternion(scalar=0, vector=[1,0,0])
p = Quaternion(scalar=0, vector=[0,-1,0])
self.assertEqual(pi/2, Quaternion.sym_distance(q,p))
q = Quaternion(scalar=1, vector=[1,1,1])
p = Quaternion(scalar=-1, vector=[-1,-1,-1])
p._normalise()
q._normalise()
self.assertAlmostEqual(pi, Quaternion.sym_distance(q,p), places=8)
def test_slerp(self):
q1 = Quaternion(axis=[1, 0, 0], angle=0.0)
q2 = Quaternion(axis=[1, 0, 0], angle=pi/2)
q3 = Quaternion.slerp(q1, q2, 0.5)
self.assertEqual(q3, Quaternion(axis=[1,0,0], angle=pi/4))
def test_slerp_extensive(self):
for axis in [[1, 0, 0], [0, 1, 0], [0, 0, 1]]:
q1 = Quaternion(axis=axis, angle=0.0)
q2 = Quaternion(axis=axis, angle=pi/2.0)
q3 = Quaternion(axis=axis, angle=pi*3.0/2.0)
for t in np.arange(0.1, 1, 0.1):
q4 = Quaternion.slerp(q1, q2, t)
q5 = Quaternion.slerp(q1, q3, t)
q6 = Quaternion(axis=axis, angle=t*pi/2)
q7 = Quaternion(axis=axis, angle=-t*pi/2)
assert q4 == q6 or q4 == -q6
assert q5 == q7 or q5 == -q7
def test_interpolate(self):
q1 = Quaternion(axis=[1, 0, 0], angle=0.0)
q2 = Quaternion(axis=[1, 0, 0], angle=2*pi/3)
num_intermediates = 3
base = pi/6
list1 = list(Quaternion.intermediates(q1, q2, num_intermediates, include_endpoints=False))
list2 = list(Quaternion.intermediates(q1, q2, num_intermediates, include_endpoints=True))
self.assertEqual(len(list1), num_intermediates)
self.assertEqual(len(list2), num_intermediates+2)
self.assertEqual(list1[0], list2[1])
self.assertEqual(list1[1], list2[2])
self.assertEqual(list1[2], list2[3])
self.assertEqual(list2[0], q1)
self.assertEqual(list2[1], Quaternion(axis=[1, 0, 0], angle=base))
self.assertEqual(list2[2], Quaternion(axis=[1, 0, 0], angle=2*base))
self.assertEqual(list2[3], Quaternion(axis=[1, 0, 0], angle=3*base))
self.assertEqual(list2[4], q2)
def test_differentiation(self):
q = Quaternion.random()
omega = np.random.uniform(-1, 1, 3) # Random angular velocity
q_dash = 0.5 * q * Quaternion(vector=omega)
self.assertEqual(q_dash, q.derivative(omega))
def test_integration(self):
rotation_rate = [0, 0, 2*pi] # one rev per sec around z
v = [1, 0, 0] # test vector
for dt in [0, 0.25, 0.5, 0.75, 1, 2, 10, 1e-10, random()*10]: # time step in seconds
qt = Quaternion() # no rotation
qt.integrate(rotation_rate, dt)
q_truth = Quaternion(axis=[0,0,1], angle=dt*2*pi)
a = qt.rotate(v)
b = q_truth.rotate(v)
np.testing.assert_almost_equal(a, b, decimal=ALMOST_EQUAL_TOLERANCE)
self.assertTrue(qt.is_unit())
# Check integrate() is norm-preserving over many calls
q = Quaternion()
for i in range(1000):
q.integrate([pi, 0, 0], 0.001)
self.assertTrue(q.is_unit())
class TestQuaternionUtilities(unittest.TestCase):
def test_copy(self):
from copy import copy
q = Quaternion.random()
q2 = copy(q)
self.assertEqual(q, q2)
self.assertFalse(q is q2)
self.assertTrue(all(q.q == q2.q))
def test_deep_copy(self):
from copy import deepcopy
q = Quaternion.random()
q2 = deepcopy(q)
self.assertEqual(q, q2)
self.assertFalse(q is q2)
self.assertFalse(q.q is q2.q)
class TestQuaternionHashing(unittest.TestCase):
def test_equal_quaternions(self):
q1 = Quaternion(1, 0, 0, 0)
q2 = Quaternion(1, 0, 0, 0)
self.assertEqual(hash(q1), hash(q2))
def test_unequal_quaternions(self):
q1 = Quaternion(1, 0, 0, 0)
q2 = Quaternion(0, 1, 0, 0)
self.assertNotEqual(hash(q1), hash(q2))
if __name__ == '__main__':
unittest.main()
|
KieranWynn/pyquaternion
|
pyquaternion/test/test_quaternion.py
|
Python
|
mit
| 42,034
|
# from config import DevelopmentConfig
# configs = DevelopmentConfig()
import os
import re
def config(key):
val = os.environ.get(key)
if val and re.match('true', val, re.I):
val = True
elif val and re.match('false', val, re.I):
val = False
return val
|
whittlbc/jarvis
|
jarvis/helpers/configs.py
|
Python
|
mit
| 265
|
from ..domain import Image, Font
from ._base import Service
class ImageService(Service):
def __init__(self, template_store, font_store, image_store, **kwargs):
super().__init__(**kwargs)
self.template_store = template_store
self.font_store = font_store
self.image_store = image_store
def create(self, template, text, style=None, font=None):
font = font or self.font_store.find(Font.DEFAULT)
image = Image(template, text, style=style, font=font)
try:
self.image_store.create(image)
except OSError as exception:
if "name too long" in str(exception):
exception = self.exceptions.FilenameTooLong
elif "image file" in str(exception):
exception = self.exceptions.InvalidImageLink
raise exception from None
return image
|
DanLindeman/memegen
|
memegen/services/image.py
|
Python
|
mit
| 881
|
# coding:utf-8
def hello(fn):
fn('asd')
def test(name):
print name
hello(test)
# 匿名
def hello(fn):
print fn('asd')
hello(lambda x: 'hello '+x )
|
seerjk/reboot06
|
08/bootstrap/08.py
|
Python
|
mit
| 165
|
# -*- coding: utf-8 -*-
"""
Запуск моделирования, остановка и пауза сети.
"""
from openre.agent.decorators import action
from openre.agent.domain.decorators import state
@action(namespace='domain')
@state('run')
def run(event):
"""
Запуск моделирования
"""
agent = event.pool.context['agent']
net = agent.context['net']
net.tick()
if net.is_stop:
net.is_stop = False
else:
event.prevent_done()
@action(namespace='domain')
def pause(event):
"""
Ставим моделирование на паузу
"""
agent = event.pool.context['agent']
net = agent.context['net']
agent.send_server('domain_state', {
'state': 'run',
'status': 'pause',
})
return net.pause()
@action(namespace='domain')
def start(event):
"""
Запускаем, если ставили на паузу
"""
agent = event.pool.context['agent']
net = agent.context['net']
agent.send_server('domain_state', {
'state': 'run',
'status': 'running',
})
return net.start()
@action(namespace='domain')
def stop(event):
"""
Останавливаем моделирование. При этом event 'run' завершается.
Для повторного запуска надо заново запускать event 'run'.
"""
agent = event.pool.context['agent']
net = agent.context['net']
agent.send_server('domain_state', {
'state': 'run',
'status': 'done',
})
return net.stop()
|
openre/openre
|
openre/agent/domain/action/run.py
|
Python
|
mit
| 1,604
|
# Copyright (C) 2017 Zhixian MA <zxma_sjtu@qq.com>
"""
Rename samples of Best into the JHHMMSS.ss+/-DDMMSS.s style
Reference
=========
[1] math.modf
http://www.runoob.com/python/func-number-modf.html
"""
import os
import math
import numpy as np
import time
import argparse
def batch_rename_csv(listpath, batch, fromfolder,savefolder):
"""Batchly rename the samples
Inputs
======
listpath: str
The path of the data list
batch: tuple
The region of indices w.r.t. samples to be fetched.
fromfolder: str
Folder saved the samples to be renamed
savefolder: str
Folder to save the fetched sample files
"""
from pandas import read_csv
from astropy import units as u
from astropy.coordinates import SkyCoord
import time
# load csv
f = read_csv(listpath, sep=' ')
ra = f['RAJ2000'] # RA
dec = f['DEJ2000'] # DEC
# regularize the batch
if batch[1] > len(f):
batch[1] = len(f)
# log file optional
fl = open('log.txt', 'a')
# Iteration body
for i in range(batch[0], batch[1]+1):
# timestamp
t = time.strftime('%Y-%m-%d',time.localtime(time.time()))
# get params
temp_c = SkyCoord(ra=ra[i]*u.degree, dec=dec[i]*u.degree, frame='icrs')
# Coordinate transform
ra_rms = tuple(temp_c.ra.hms)
dec_dms = tuple(temp_c.dec.dms)
ra_h = str(int(ra_rms[0]))
ra_m = str(int(ra_rms[1]))
ra_s = str(np.round(ra_rms[2]*1000)/1000)
de_d = str((dec_dms[0]))
de_m = str(int(np.abs(dec_dms[1])))
de_s = str(np.abs(np.round(dec_dms[2]*1000)/1000))
# download file
fname_from = 'J' + ''.join([ra_h,ra_m,ra_s,de_d,de_m,de_s]) + '.fits'
frompath = os.path.join(fromfolder,fname_from)
# save name
ra_h = "%02d" % (int(ra_rms[0]))
ra_m = "%02d" % (int(ra_rms[1]))
ra_s_i = np.fix(np.round(ra_rms[2]*100)/100)
ra_s_f = np.round(ra_rms[2]*100)/100 - ra_s_i
ra_s = "%02d.%02d" % (int(ra_s_i),int(ra_s_f*100))
if dec_dms[0] > 0:
de_d = "+%02d" % (int(dec_dms[0]))
else:
de_d = "-%02d" % (abs(int(dec_dms[0])))
de_m = "%02d" % (abs(int(dec_dms[1])))
de_s_i = np.fix(np.abs(np.round(dec_dms[2]*10)/10))
de_s_f = np.abs(np.round(dec_dms[2]*10)/10) - de_s_i
de_s = "%02d.%01d" % (int(de_s_i),np.round(de_s_f*10))
fname_save = 'J' + ''.join([ra_h,ra_m,ra_s,de_d,de_m,de_s]) + '.fits'
savepath = os.path.join(savefolder,fname_save)
try:
print("[%s] f: %s\t s: %s" % (t, fname_from, fname_save))
os.system("cp %s %s" % (frompath, savepath))
except:
fl.write("%d: %s" % (i, fname))
continue
# print log
# print('[%s]: Fetching %s' % (t, fname))
fl.close()
def main():
# Init
parser = argparse.ArgumentParser(description="Rename FIRST observations.")
# Parameters
# parser.add_argument("url", help="URL of the archive'")
parser.add_argument("listpath", help="Path of the sample list.")
parser.add_argument("batchlow", help="Begin index of the batch.")
parser.add_argument("batchhigh",help="End index of the batch.")
parser.add_argument("fromfolder", help="Path saved samples to be renamed.")
parser.add_argument("savefolder", help="The folder to save files.")
args = parser.parse_args()
listpath = args.listpath
batch = [int(args.batchlow),int(args.batchhigh)]
savefolder = args.savefolder
fromfolder = args.fromfolder
if not os.path.exists(savefolder):
os.mkdir(savefolder)
batch_rename_csv(listpath=listpath,
batch=batch,
fromfolder=fromfolder,
savefolder=savefolder)
if __name__ == "__main__":
main()
|
myinxd/agn-ae
|
utils/sample-rename.py
|
Python
|
mit
| 3,882
|
#!/usr/bin/python
from __future__ import print_function
import argparse
import sys
from .cmds import define
def main():
parser = argparse.ArgumentParser(
description="Find the board specification from Arduino IDE board.txt files" )
subparsers = parser.add_subparsers( metavar = 'command' )
define( subparsers )
args = parser.parse_args()
try:
args.func( args )
except RuntimeError as er:
print( er, file=sys.stderr )
exit( 1 )
if __name__ == "__main__":
main()
|
devberry/cliduino
|
cliduino/__main__.py
|
Python
|
mit
| 531
|
import argparse
import gzip
import sys
import re
import FeaturesCombiner
parser = argparse.ArgumentParser(description='combine POS tags (from tree-tagged file) with morphological features (from lefff lexicon)')
parser.add_argument('--language', default='french', type=str,
help='available languages: french, italian')
#parser.add_argument('--tagged', required=True, type=str,
# help='file containing the tree-tagged corpus in factored format: word|pos|lemma ...')
#parser.add_argument('--out', required=True, type=str,
# help='file to print output corpus')
parser.add_argument('--lexicon', required=True, type=str,
help='path to lefff lexicon, format: wordTABposTABlemmaTABfeats')
config = parser.parse_args()
def main():
fc = FeaturesCombiner.Combiner(config)
n_empty_feats = 0
for line in sys.stdin:
tokens = line.rstrip().split(" ")
line_out = ""
for tok in tokens:
# there can be more than 1 lemma in case of ambiguous words,
# if so, only the first is kept
# if len(tok.split("|"))>3:
# print("strange: " + tok)
(word,pos,lem) = tok.split("|",2)
feats = fc.get_morphfeats(word,pos,lem)
if feats == "__":
n_empty_feats += 1
line_out += (word+"|"+pos+"|"+lem+"|"+feats+" ")
sys.stdout.write(line_out.rstrip() + "\n")
sys.stderr.write("num tokens with no morph features: " + str(n_empty_feats) + "\n")
main()
|
arianna-bis/glass-box-nmt
|
combine_pos_morph_features.py
|
Python
|
mit
| 1,559
|
import requests
import json
import datetime
TOGGL_URL = "https://www.toggl.com/api/v8"
class TogglUser(object):
def __init__(self, email, password):
self._auth = (email, password)
self.workspaces = {}
def currentTimeEntry():
resp = self.apiRequest("time_entries/current")
if resp["data"] == None:
return None
else:
return TogglTimeEntry(resp["data"], None)
def fetchData(self):
"""Fetch user data from Toggl and put it into a hierarchicial
structure"""
resp = self.apiRequest("me?with_related_data=true")
for workspace in resp["data"]["workspaces"]:
self.workspaces[workspace["id"]] = TogglWorkspace(workspace, self)
for data in resp["data"]["clients"]:
workspace = self.workspaces[data["wid"]]
workspace.addClient(data)
for data in resp["data"]["projects"]:
workspace = self.workspaces[data["wid"]]
workspace.addProject(data)
for data in resp["data"]["time_entries"]:
workspace = self.workspaces[data["wid"]]
workspace.addTimeEntry(data)
def apiRequest(self, path, data=None, requestType="get"):
url = "%s/%s" % (TOGGL_URL, path)
resp = getattr(requests, requestType)(url, auth=self._auth, data=data)
resp.raise_for_status()
return json.loads(resp.text)
DATE_FORMATS = [
"%Y-%m-%dT%H:%M:%SZ",
# FIXME: This will break if times are not UTC!
"%Y-%m-%dT%H:%M:%S+00:00"
]
def _parseDate(date):
"""Parse date string from Toggl
Toggl haven't used the same date format in all of their messages so this
function attempts to cope with that.
"""
for dateFormat in DATE_FORMATS:
try:
date = datetime.datetime.strptime(date, dateFormat)
return date
except ValueError:
pass
return None
class TogglProject(object):
def __init__(self, data, workspace, user):
self._user = user
self.workspace = workspace
self.togglId = data["id"]
self.name = data["name"]
self._timeEntries = {}
def timeEntries(self):
return self._timeEntries
def addTimeEntry(self, data):
self._timeEntries[data["id"]] = TogglTimeEntry(data, self, self._user)
def startTimeEntry(self, description):
resp = self._user.apiRequest("time_entries/start",
json.dumps({
"time_entry": {
"description": description,
"pid": self.togglId,
"created_with": "lite-toggl"
}
}),
requestType="post")
return TogglTimeEntry(resp["data"], self, self._user)
class TogglClient(object):
def __init__(self, data, workspace, user):
self._user = user
self.workspace = workspace
self.togglId = data["id"]
self.name = data["name"]
self._projects = {}
def addProject(self, project):
self._projects[project.togglId] = project
class TogglWorkspace(object):
def __init__(self, data, user):
self._user = user
self._projects = {
"noproject": TogglProject({"id": None, "name": None}, self, self._user)
}
self._clients = {
"noclient": TogglClient({"id": None, "name": None}, self, self._user)
}
self.togglId = data["id"]
self.name = data["name"]
def addTimeEntry(self, data):
if "pid" in data:
project = self._projects[data["pid"]]
else:
project = self._projects["noproject"]
project.addTimeEntry(data)
def addClient(self, data):
self._clients[data["id"]] = TogglClient(data, self, self._user)
def addProject(self, data):
if "cid" in data:
client = self._clients[data["cid"]]
else:
client = self._clients["noclient"]
project = TogglProject(data, self, self._user)
self._projects[data["id"]] = project
client.addProject(project)
def clients(self):
return self._clients
def projects(self):
return self._projects
def timeEntries(self):
entries = {}
for project in self.projects().values():
entries.update(project.timeEntries())
return entries
class TogglTimeEntry(object):
def __init__(self, data, project, user):
self._user = user
self.project = project
self.data = data
self.data["start"] = _parseDate(self.data["start"])
def stop(self):
return self._user.apiRequest("time_entries/%s/stop" % (self.data["id"]),
requestType="put")
|
ramblex/lite-toggl
|
lite_toggl/toggl_api.py
|
Python
|
mit
| 4,943
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-18 03:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forms', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='turnexform',
name='publish',
field=models.BooleanField(db_index=True, default=False, editable=False),
),
]
|
mleger45/turnex
|
forms/migrations/0002_turnexform_publish.py
|
Python
|
mit
| 475
|
"""Test the validation for the core configuration schema."""
import json
import pytest
from flask_jsondash import charts_builder as app
def _schema(**vals):
"""Default schema."""
data = dict(
id='a-b-c-d-e',
date="2016-08-23 15:03:49.178000",
layout="grid",
name="testlayout",
modules=[],
)
data.update(**vals)
return json.dumps(data)
@pytest.mark.schema
def test_validate_raw_json_valid_empty_modules():
assert app.validate_raw_json(_schema())
@pytest.mark.schema
def test_validate_raw_json_valid_freeform():
d = _schema(
layout='freeform',
modules=[
dict(guid='a-b-c-d-e', name='foo', dataSource='foo',
width=1, height=1, type='line',
family='C3')]
)
assert app.validate_raw_json(d)
@pytest.mark.schema
def test_validate_raw_json_valid_fixed():
d = _schema(
layout='freeform',
modules=[
dict(guid='a-b-c-d-e', name='foo', dataSource='foo',
width='1', height=1, type='line',
family='C3')]
)
assert app.validate_raw_json(d)
@pytest.mark.schema
@pytest.mark.parametrize('field', [
'type',
'family',
'width',
'height',
'dataSource',
])
def test_validate_raw_json_missing_required_module_keys(field):
module = dict(
guid='a-b-c-d-e',
name='foo', dataSource='foo',
width='col-1', height=1, type='line',
family='C3')
del module[field]
d = _schema(
layout='grid',
modules=[module]
)
with pytest.raises(app.InvalidSchemaError):
app.validate_raw_json(d)
@pytest.mark.schema
@pytest.mark.parametrize('field', [
'row',
])
def test_validate_raw_json_missing_required_fixedgrid_module_keys(field):
module = dict(
guid='a-b-c-d-e',
name='foo', dataSource='foo',
width='col-1', height=1, type='line',
row=1, family='C3')
del module[field]
d = _schema(
layout='grid',
modules=[module]
)
with pytest.raises(app.InvalidSchemaError):
app.validate_raw_json(d)
@pytest.mark.schema
@pytest.mark.parametrize('field', [
'row',
])
def test_validate_raw_json_missing_optional_freeform_module_keys(field):
# Ensure that required fields for fixed grid
# are not required for freeform layouts.
module = dict(
guid='a-b-c-d-e',
name='foo', dataSource='foo',
width=1, height=1, type='line',
row=1, family='C3')
del module[field]
d = _schema(
layout='freeform',
modules=[module]
)
assert app.validate_raw_json(d)
@pytest.mark.schema
@pytest.mark.parametrize('field', [
'id',
'layout',
'name',
'modules',
])
def test_validate_raw_json_invalid_missing_toplevel_keys(field):
module = dict(
guid='a-b-c-d-e',
layout='freeform',
name='foo', dataSource='foo',
width=1, height=1, type='line', family='C3',
)
config = _schema(
layout='freeform',
modules=[module]
)
config = json.loads(config)
del config[field]
with pytest.raises(app.InvalidSchemaError) as exc:
app.validate_raw_json(json.dumps(config))
assert "{'" + field + "': ['required field']}" in str(exc.value)
@pytest.mark.schema
def test_validate_raw_json_invalid_mixed_use_freeform_with_rows():
# Ensure `row` in modules and layout `freeform` cannot be mixed.
module = dict(
guid='a-b-c-d-e',
name='foo', dataSource='foo',
width=1, height=1, type='line',
row=1, family='C3',
)
config = _schema(
layout='freeform',
modules=[module]
)
with pytest.raises(app.InvalidSchemaError) as exc:
app.validate_raw_json(config)
assert 'Cannot mix' in str(exc.value)
@pytest.mark.schema
def test_validate_raw_json_missing_row_for_layout_grid():
module = dict(
guid='a-b-c-d-e',
name='foo', dataSource='foo',
width='col-1', height=1, type='line', layout='grid', family='C3',
)
config = _schema(
layout='grid',
modules=[module]
)
with pytest.raises(app.InvalidSchemaError) as exc:
app.validate_raw_json(config)
assert 'Invalid row value for module "foo"' in str(exc.value)
@pytest.mark.schema
def test_validate_raw_json_invalid_grid_nonconsencutive_rows():
# Ensure row numbers can't "skip", e.g. [1, 2, 10]
config = _schema(
layout='grid',
modules=[
dict(guid='a-b-c-d-e', name='f', dataSource='f', width='col-1',
row=1, height=1, family='C3', type='line'),
dict(guid='a-b-c-d-e', name='f', dataSource='f', width='col-1',
row=2, height=1, family='C3', type='line'),
dict(guid='a-b-c-d-e', name='f', dataSource='f', width='col-1',
row=10, height=1, family='C3', type='line'),
]
)
with pytest.raises(app.InvalidSchemaError) as exc:
app.validate_raw_json(config)
assert 'Row order is not consecutive' in str(exc.value)
@pytest.mark.schema
def test_validate_raw_json_invalid_grid_consecutive_but_duplicate_rows():
# Ensure duplicate row numbers are consecutive, IF they were unique.
# e.g. [1, 1, 2, 2, 3] is valid.
config = _schema(
layout='grid',
id='a-b-c-d-e',
modules=[
dict(guid='a-b-c-d-e', name='f', dataSource='f', width='col-1',
row=1, height=1, family='C3', type='line'),
dict(guid='a-b-c-d-e', name='f', dataSource='f', width='col-1',
row=1, height=1, family='C3', type='line'),
dict(guid='a-b-c-d-e', name='f', dataSource='f', width='col-1',
row=2, height=1, family='C3', type='line'),
dict(guid='a-b-c-d-e', name='f', dataSource='f', width='col-1',
row=2, height=1, family='C3', type='line'),
dict(guid='a-b-c-d-e', name='f', dataSource='f', width='col-1',
row=3, height=1, family='C3', type='line'),
]
)
assert app.validate_raw_json(config)
@pytest.mark.schema
def test_validate_raw_json_invalid_family():
config = _schema(
layout='grid',
modules=[
dict(guid='a-b-c-d-e', name='f', dataSource='f', width='col-1',
row=1, height=1, family='LOLWUT', type='line'),
]
)
with pytest.raises(app.InvalidSchemaError) as exc:
app.validate_raw_json(config)
assert 'unallowed value LOLWUT' in str(exc.value)
@pytest.mark.schema
def test_validate_raw_json_invalid_width_string_cols_for_freeform_type():
config = _schema(
layout='freeform',
modules=[
dict(guid='a-b-c-d-e',
name='f',
dataSource='f',
width='col-12',
height=1,
family='C3',
type='line'),
]
)
with pytest.raises(app.InvalidSchemaError) as exc:
app.validate_raw_json(config)
err = str(exc.value)
assert 'Invalid value for width in `freeform` layout.' in err
|
christabor/flask_jsondash
|
tests/test_jsonschema.py
|
Python
|
mit
| 7,161
|
import unittest
from Db import Db
from Server import Prudence
from Server import User
from Server import Super_user
class MyTest(unittest.TestCase):
def test(self):
d = Db('localhost')
p = Prudence('ad','bb','localhost')
users_type = {True:Super_user,False:User}
raw_user = p.get_raw_user()
user = p.get_user()
self.assertEqual(raw_user['session_id'], user.get_session_id(), 'no session_id')
self.assertEqual(raw_user['group_id'], user.get_group_id(), 'no group_id')
self.assertEqual("?session_id={0}%group_id={1}".format(user.get_session_id(),user.get_group_id()),
"?session_id={0}%group_id={1}".format(user.get_session_id(),user.get_group_id()), 'no parameters')
|
arpho/bb
|
exporter/unitest.py
|
Python
|
mit
| 692
|
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import random
from pyqrllib.pyqrllib import bin2hstr
from pyqryptonight.pyqryptonight import UInt256ToString
from twisted.internet import reactor
from twisted.internet.protocol import ServerFactory
from qrl.core import config
from qrl.core.Block import Block
from qrl.core.ChainManager import ChainManager
from qrl.core.ESyncState import ESyncState
from qrl.core.messagereceipt import MessageReceipt
from qrl.core.misc import ntp, logger
from qrl.core.node import SyncState
from qrl.core.p2p.p2pprotocol import P2PProtocol
from qrl.core.p2p.IPMetadata import IPMetadata
from qrl.core.processors.TxnProcessor import TxnProcessor
from qrl.core.txs.MessageTransaction import MessageTransaction
from qrl.core.txs.SlaveTransaction import SlaveTransaction
from qrl.core.txs.LatticeTransaction import LatticeTransaction
from qrl.core.txs.TokenTransaction import TokenTransaction
from qrl.core.txs.TransferTokenTransaction import TransferTokenTransaction
from qrl.core.txs.TransferTransaction import TransferTransaction
from qrl.core.txs.multisig.MultiSigCreate import MultiSigCreate
from qrl.core.txs.multisig.MultiSigSpend import MultiSigSpend
from qrl.core.txs.multisig.MultiSigVote import MultiSigVote
from qrl.generated import qrllegacy_pb2, qrl_pb2
p2p_msg_priority = {
qrllegacy_pb2.LegacyMessage.VE: 0,
qrllegacy_pb2.LegacyMessage.PL: 0,
qrllegacy_pb2.LegacyMessage.PONG: 0,
######################
qrllegacy_pb2.LegacyMessage.MR: 2,
qrllegacy_pb2.LegacyMessage.SFM: 1,
qrllegacy_pb2.LegacyMessage.BK: 1,
qrllegacy_pb2.LegacyMessage.FB: 0,
qrllegacy_pb2.LegacyMessage.PB: 0,
qrllegacy_pb2.LegacyMessage.BH: 1,
############################
qrllegacy_pb2.LegacyMessage.TX: 1,
qrllegacy_pb2.LegacyMessage.MT: 1,
qrllegacy_pb2.LegacyMessage.TK: 1,
qrllegacy_pb2.LegacyMessage.TT: 1,
qrllegacy_pb2.LegacyMessage.LT: 1,
qrllegacy_pb2.LegacyMessage.SL: 1,
qrllegacy_pb2.LegacyMessage.EPH: 3,
qrllegacy_pb2.LegacyMessage.SYNC: 0,
qrllegacy_pb2.LegacyMessage.CHAINSTATE: 0,
qrllegacy_pb2.LegacyMessage.HEADERHASHES: 1,
qrllegacy_pb2.LegacyMessage.P2P_ACK: 0,
qrllegacy_pb2.LegacyMessage.MC: 1,
qrllegacy_pb2.LegacyMessage.MS: 1,
qrllegacy_pb2.LegacyMessage.MV: 1,
}
class P2PFactory(ServerFactory):
protocol = P2PProtocol
def __init__(self,
chain_manager: ChainManager,
sync_state: SyncState,
qrl_node):
self.master_mr = MessageReceipt()
self.pow = None
self.sync_state = sync_state
self._ntp = ntp
self._qrl_node = qrl_node
self._chain_manager = chain_manager
self._chain_manager.set_broadcast_tx(self.broadcast_tx)
self._syncing_enabled = False
self._target_channel = None
self._target_node_header_hash = None
self._last_requested_block_number = None
self._genesis_processed = False
self._peer_connections = []
self._txn_processor_running = False
self.peer_blockheight = dict()
reactor.callLater(config.user.monitor_connections_interval,
self.monitor_connections)
self.p2p_msg_priority = p2p_msg_priority
# Maintains the list of ips in the queue that can be tried to form a new p2p connection
self._peer_q = []
def add_new_peers_to_peer_q(self, peer_list):
"""
Checks ip must not already be in the _peer_q and
connection has not already been established from that ip and port
before adding the new set of peer into _peer_q
"""
peer_set = set(peer_list)
for peer_conn in self._peer_connections:
ip_port = peer_conn.peer.full_address
if ip_port in peer_set:
peer_set.remove(ip_port)
for ip_port in self._peer_q:
if ip_port in peer_set:
peer_set.remove(ip_port)
self._peer_q.extend(peer_set)
###################################################
###################################################
###################################################
###################################################
###################################################
###################################################
def get_random_peer(self):
# FIXME: Used a named tuple to improve readability?
# FIXME: This probably can go the peerManager
max_cumulative_difficulty = 0
for addr_remote in self.peer_blockheight:
max_cumulative_difficulty = max(max_cumulative_difficulty, self.peer_blockheight[addr_remote][2])
best_connection_ids = []
for addr_remote in self.peer_blockheight:
if self.peer_blockheight[addr_remote][2] == max_cumulative_difficulty:
best_connection_ids.append(addr_remote)
selected_peer_connections = []
for addr_remote in best_connection_ids:
for peer_conn in self._peer_connections:
if peer_conn.peer.full_address == addr_remote:
selected_peer_connections.append(peer_conn)
if len(selected_peer_connections) == 0 or max_cumulative_difficulty == 0:
return None
return random.sample(selected_peer_connections, 1)[0]
def update_peer_blockheight(self, addr_remote, block_number, headerhash, cumulative_difficulty):
# FIXME: Use a named tuple to improve readability?
self.peer_blockheight[addr_remote] = [block_number, headerhash, int(UInt256ToString(cumulative_difficulty))]
def request_peer_blockheight(self):
for peer in self._peer_connections:
msg = qrllegacy_pb2.LegacyMessage(func_name=qrllegacy_pb2.LegacyMessage.BH,
bhData=qrl_pb2.BlockHeightData(block_number=0))
peer.send(msg)
###################################################
###################################################
###################################################
###################################################
###################################################
###################################################
###################################################
@property
def num_connections(self):
return len(self._peer_connections)
@property
def connections(self):
return list(self._peer_connections)
@property
def synced(self):
return self.pow.sync_state.state == ESyncState.synced
@property
def reached_conn_limit(self):
return len(self._peer_connections) >= config.user.max_peers_limit
def get_connected_peer_addrs(self):
return set([peer.peer.full_address for peer in self._peer_connections])
###################################################
###################################################
###################################################
###################################################
###################################################
###################################################
@property
def chain_height(self):
return self._chain_manager.height
def get_last_block(self):
return self._chain_manager.last_block
def get_headerhashes(self, start_blocknumber):
return self._chain_manager.get_headerhashes(start_blocknumber)
def get_cumulative_difficulty(self):
return self._chain_manager.get_cumulative_difficulty()
def get_block_by_number(self, block_number):
return self._chain_manager.get_block_by_number(block_number)
def is_block_present(self, header_hash: bytes) -> bool:
if not self._chain_manager.get_block(header_hash):
if header_hash not in self.pow.future_blocks:
return False
return True
def block_received(self, source, block: Block):
self.pow.last_pb_time = ntp.getTime()
logger.info('>>> Received Block #%d %s', block.block_number, bin2hstr(block.headerhash))
if source != self._target_channel:
if self._target_channel is None:
logger.warning('Received block and target channel is None')
else:
logger.warning('Received block from unexpected peer')
logger.warning('Expected peer: %s', self._target_channel.peer)
logger.warning('Found peer: %s', source.peer)
return
if block.block_number != self._last_requested_block_number:
logger.warning('Did not match %s', self._last_requested_block_number)
self._qrl_node.peer_manager.ban_channel(source)
return
target_start_blocknumber = self._target_node_header_hash.block_number
expected_headerhash = self._target_node_header_hash.headerhashes[block.block_number - target_start_blocknumber]
if block.headerhash != expected_headerhash:
logger.warning('Did not match headerhash')
logger.warning('Expected headerhash %s', expected_headerhash)
logger.warning('Found headerhash %s', block.headerhash)
self._qrl_node.peer_manager.ban_channel(source)
return
if not block.validate(self._chain_manager, self.pow.future_blocks):
logger.warning('Syncing Failed: Block Validation Failed')
self._qrl_node.peer_manager.ban_channel(source)
return
if self._chain_manager.add_block(block, check_stale=False):
if self._chain_manager.last_block.headerhash == block.headerhash:
self.pow.suspend_mining_timestamp = ntp.getTime() + config.dev.sync_delay_mining
else:
logger.warning('Failed to Add Block')
self._qrl_node.peer_manager.ban_channel(source)
return
try:
reactor.download_monitor.cancel()
except Exception as e:
logger.warning("PB: %s", e)
if self.is_syncing_finished():
return
self._last_requested_block_number += 1
self.peer_fetch_block()
def is_syncing(self) -> bool:
return self._syncing_enabled
def is_syncing_finished(self, force_finish=False):
curr_index = self._last_requested_block_number - self._target_node_header_hash.block_number + 1
if curr_index == len(self._target_node_header_hash.headerhashes) or force_finish:
self._last_requested_block_number = None
self._target_node_header_hash = None
self._target_channel = None
self._syncing_enabled = False
return True
return False
def peer_fetch_block(self, retry=0):
node_header_hash = self._target_node_header_hash
curr_index = self._last_requested_block_number - node_header_hash.block_number
block_headerhash = node_header_hash.headerhashes[curr_index]
block = self._chain_manager.get_block(block_headerhash)
if retry >= 1:
logger.debug('Retry Limit Hit')
self._qrl_node.peer_manager.ban_channel(self._target_channel)
self.is_syncing_finished(force_finish=True)
return
while block and curr_index + 1 < len(node_header_hash.headerhashes):
self._last_requested_block_number += 1
curr_index = self._last_requested_block_number - node_header_hash.block_number
block_headerhash = node_header_hash.headerhashes[curr_index]
block = self._chain_manager.get_block(block_headerhash)
if block and self.is_syncing_finished():
return
self._target_channel.send_fetch_block(self._last_requested_block_number)
reactor.download_monitor = reactor.callLater(100, self.peer_fetch_block, retry + 1)
def compare_and_sync(self, source_peer, node_header_hash: qrl_pb2.NodeHeaderHash):
if self._syncing_enabled:
logger.info('>> Ignoring compare_and_sync Syncing Enabled')
return
last_block = self.get_last_block()
node_last_block_number = node_header_hash.block_number + len(node_header_hash.headerhashes) - 1
last_block_number = min(last_block.block_number, node_last_block_number)
if last_block_number < node_header_hash.block_number:
return
fork_block_number = last_block.block_number + 1
fork_found = False
for i in range(last_block_number, node_header_hash.block_number - 1, -1):
block = self._chain_manager.get_block_by_number(i)
if block:
if block.headerhash == node_header_hash.headerhashes[i - node_header_hash.block_number]:
break
fork_block_number = i
fork_found = True
if fork_found or (last_block.block_number < node_last_block_number):
self._target_channel = source_peer
self._target_node_header_hash = node_header_hash
self._last_requested_block_number = fork_block_number
self._syncing_enabled = True
self.peer_fetch_block()
###################################################
###################################################
###################################################
###################################################
###################################################
###################################################
def request_full_message(self, mr_data: qrllegacy_pb2.MRData):
"""
Request Full Message
This function request for the full message against,
the Message Receipt received.
:return:
"""
# FIXME: Again, breaking encasulation
# FIXME: Huge amount of lookups in dictionaries
msg_hash = mr_data.hash
if msg_hash in self.master_mr._hash_msg:
if msg_hash in self.master_mr.requested_hash:
del self.master_mr.requested_hash[msg_hash]
return
if msg_hash not in self.master_mr.requested_hash:
return
peers_list = self.master_mr.requested_hash[msg_hash].peers_connection_list
message_request = self.master_mr.requested_hash[msg_hash]
for peer in peers_list:
if peer in message_request.already_requested_peers:
continue
message_request.already_requested_peers.append(peer)
msg = qrllegacy_pb2.LegacyMessage(func_name=qrllegacy_pb2.LegacyMessage.SFM,
mrData=qrllegacy_pb2.MRData(hash=mr_data.hash, type=mr_data.type))
peer.send(msg)
call_later_obj = reactor.callLater(config.dev.message_receipt_timeout,
self.request_full_message,
mr_data)
message_request.callLater = call_later_obj
return
# If execution reach to this line, then it means no peer was able to provide
# Full message for this hash thus the hash has to be deleted.
# Moreover, negative points could be added to the peers, for this behavior
if msg_hash in self.master_mr.requested_hash:
del self.master_mr.requested_hash[msg_hash]
##############################################
##############################################
##############################################
##############################################
def reset_processor_flag(self, _):
self._txn_processor_running = False
def reset_processor_flag_with_err(self, msg):
logger.error('Exception in txn task')
logger.error('%s', msg)
self._txn_processor_running = False
def add_unprocessed_txn(self, tx, ip) -> bool:
if tx.fee < config.user.transaction_minimum_fee:
logger.info("Dropping Txn %s", bin2hstr(tx.txhash))
logger.info("Reason: Fee %s is below threshold fee %s", tx.fee, config.user.transaction_minimum_fee)
return False
if not self._chain_manager.tx_pool.update_pending_tx_pool(tx, ip):
return False
if not self._txn_processor_running:
txn_processor = TxnProcessor(chain_manager=self._chain_manager,
transaction_pool_obj=self._chain_manager.tx_pool,
broadcast_tx=self.broadcast_tx)
task_defer = TxnProcessor.create_cooperate(txn_processor).whenDone()
task_defer.addCallback(self.reset_processor_flag) \
.addErrback(self.reset_processor_flag_with_err)
self._txn_processor_running = True
return True
##############################################
##############################################
##############################################
##############################################
def broadcast_tx(self, tx: TransferTransaction):
logger.info('<<<Transmitting TX: %s', bin2hstr(tx.txhash))
if isinstance(tx, MessageTransaction):
legacy_type = qrllegacy_pb2.LegacyMessage.MT
elif isinstance(tx, TransferTransaction):
legacy_type = qrllegacy_pb2.LegacyMessage.TX
elif isinstance(tx, TokenTransaction):
legacy_type = qrllegacy_pb2.LegacyMessage.TK
elif isinstance(tx, TransferTokenTransaction):
legacy_type = qrllegacy_pb2.LegacyMessage.TT
elif isinstance(tx, SlaveTransaction):
legacy_type = qrllegacy_pb2.LegacyMessage.SL
elif isinstance(tx, LatticeTransaction):
legacy_type = qrllegacy_pb2.LegacyMessage.LT
elif isinstance(tx, MultiSigCreate):
legacy_type = qrllegacy_pb2.LegacyMessage.MC
elif isinstance(tx, MultiSigSpend):
legacy_type = qrllegacy_pb2.LegacyMessage.MS
elif isinstance(tx, MultiSigVote):
legacy_type = qrllegacy_pb2.LegacyMessage.MV
else:
raise ValueError('Invalid Transaction Type')
self.register_and_broadcast(legacy_type, tx.get_message_hash(), tx.pbdata)
def broadcast_block(self, block: Block):
# logger.info('<<<Transmitting block: ', block.headerhash)
data = qrllegacy_pb2.MRData()
data.stake_selector = block.transactions[0].public_key
data.block_number = block.block_number
data.prev_headerhash = bytes(block.prev_headerhash)
self.register_and_broadcast(qrllegacy_pb2.LegacyMessage.BK, block.headerhash, block.pbdata, data)
def register_and_broadcast(self, msg_type, msg_hash: bytes, pbdata, data=None):
self.master_mr.register(msg_type, msg_hash, pbdata)
self.broadcast(msg_type, msg_hash, data)
def broadcast(self, msg_type, msg_hash: bytes, mr_data=None):
"""
Broadcast
This function sends the Message Receipt to all connected peers.
:return:
"""
ignore_peers = []
if msg_hash in self.master_mr.requested_hash:
ignore_peers = self.master_mr.requested_hash[msg_hash].peers_connection_list
if not mr_data:
mr_data = qrllegacy_pb2.MRData()
mr_data.hash = msg_hash
mr_data.type = msg_type
data = qrllegacy_pb2.LegacyMessage(func_name=qrllegacy_pb2.LegacyMessage.MR,
mrData=mr_data)
for peer in self._peer_connections:
if peer not in ignore_peers:
peer.send(data)
def broadcast_get_synced_state(self):
# Request all peers to update their synced status
for peer in self._peer_connections:
peer.send_sync()
###################################################
###################################################
###################################################
###################################################
# Event handlers / Comms related
def start_listening(self):
reactor.listenTCP(config.user.p2p_local_port, self)
def clientConnectionLost(self, connector, reason): # noqa
logger.debug('connection lost: %s', reason)
def clientConnectionFailed(self, connector, reason):
logger.debug('connection failed: %s', reason)
def startedConnecting(self, connector):
logger.debug('Started connecting: %s', connector)
def add_connection(self, conn_protocol) -> bool:
# TODO: Most of this can go peer manager
if self._qrl_node.peer_manager.is_banned(conn_protocol.peer):
return False
redundancy_count = 0
for conn in self._peer_connections:
if conn.peer.ip == conn_protocol.peer.ip:
redundancy_count += 1
if config.user.max_redundant_connections >= 0:
if redundancy_count >= config.user.max_redundant_connections:
logger.info('Redundant Limit. Disconnecting client %s', conn_protocol.peer)
return False
if self.reached_conn_limit:
# FIXME: Should we stop listening to avoid unnecessary load due to many connections?
logger.info('Peer limit hit. Disconnecting client %s', conn_protocol.peer)
return False
# Remove your own ip address from the connection
if conn_protocol.peer.ip == conn_protocol.host.ip and conn_protocol.peer.port == config.user.p2p_public_port:
peer_list = [p for p in self._qrl_node.peer_manager.known_peer_addresses if p != conn_protocol.peer.full_address]
self._qrl_node.peer_manager.extend_known_peers(peer_list)
return False
self._peer_connections.append(conn_protocol)
logger.debug('>>> new connection: %s ', conn_protocol.peer)
return True
def remove_connection(self, conn_protocol):
if conn_protocol in self._peer_connections:
self._peer_connections.remove(conn_protocol)
if conn_protocol.peer.full_address in self.peer_blockheight:
del self.peer_blockheight[conn_protocol.peer.full_address]
def monitor_connections(self):
reactor.callLater(config.user.monitor_connections_interval, self.monitor_connections)
if len(self._peer_connections) == 0:
logger.warning('No Connected Peer Found')
known_peers = self._qrl_node.peer_manager.load_known_peers()
self._peer_q.extend(known_peers)
connected_peers_set = set()
for conn_protocol in self._peer_connections:
connected_peers_set.add(conn_protocol.peer.full_address)
for peer_item in config.user.peer_list:
peer_metadata = IPMetadata.from_full_address(peer_item)
if peer_metadata.full_address in self._peer_q:
self._peer_q.remove(peer_metadata.full_address)
if peer_metadata.full_address not in connected_peers_set:
self.connect_peer([peer_metadata.full_address])
if len(self._peer_connections) >= config.user.max_peers_limit:
return
if len(self._peer_q) == 0:
return
peer_address_list = []
max_length = min(10, config.user.max_peers_limit)
while len(self._peer_q) > 0 and len(peer_address_list) != max_length:
peer_address_list.append(self._peer_q.pop(0))
self.connect_peer(peer_address_list)
def connect_peer(self, full_address_list):
for full_address in full_address_list:
try:
addr = IPMetadata.from_full_address(full_address)
connected_peers = self.get_connected_peer_addrs()
should_connect = addr.full_address not in connected_peers
if should_connect:
reactor.connectTCP(addr.ip, addr.port, self)
except Exception as e:
logger.warning("Could not connect to %s - %s", full_address, str(e))
|
theQRL/QRL
|
src/qrl/core/p2p/p2pfactory.py
|
Python
|
mit
| 24,304
|
#####
# Author : David Stewart
# Date : April 14, 2016
# Problem : https://projecteuler.net/problem=33
# Brief : There exist exactly four fractions of the form XY / ZX, YX / ZX, or XY / XZ such that removing the X digit preserves the answer. Find them, then find the lowest common denominator if they are multiplied together.
# Comments : This problem should be trivial to brute force. On inspection of the print statements at the bottom we find the resulting fraction to be
# 387296 / 38729600, which is trivial in reduction and therefore I didn't feel the need to write a reduction function. Answer is, for the denominator, 100.
#####
xs = []
ys = []
for x in range(11,100):
for y in range(11,100):
#print x/y
for char in `x`:
if char in `y` and char != '0':
new_x = `x`.replace(char,'')
new_y = `y`.replace(char,'')
if len(new_x) > 0 and len(new_y) > 0 and new_y != '0' and new_x != '0' and x/y < 1:
new_x = float(new_x)
new_y = float(new_y)
if new_x / new_y == float(x)/float(y):
xs.append(x)
ys.append(y)
nominator = 1
denominator = 1
for x in xs:
nominator *= x
for y in ys:
denominator *= y
print nominator, denominator
|
DavidOStewart/ProjectEuler
|
33.py
|
Python
|
mit
| 1,392
|
'''
TODO:
* More testing!
* Change 'similarities' from N*N to linear, so that speed is sub-second.
* Write a tutorial, in Jupyter notebook, to show what I did, and how it is SO FRIGGIN COOL!!!
* Fix other errors
'''
testWord = 'बन्दीप्रतिकोव्यवहारसम्बन्धीमापदण्डअनुकूलको'
def split_word(word):
'''Takes a word, returns all valid splits of it'''
splits = list(get_all_substrings(word))
return startWord(word, splits)
def get_all_substrings(string):
'''takes in an unsegmented string, and depends on the global var 'vocab'. returns: all substrings of the given string, every string a valid vocab token.
tokens beginning in markers are invalid tokens
'''
length = len(string)
for i in range(length):
for j in range(i + 1, length + 1):
if string[i:j] in vocab and string[i:j][0].isalpha():
yield(string[i:j])
def genChild(word, token, startPosInWord, endPosInList, tokenList):
'''Used by startWord. Takes the full word, current token being processed, etc'''
global parts
startPos = startPosInWord
nextWordPos = startPos + len(token)
nextPos = endPosInList
if nextWordPos >= len(word):
#This is the final token
return [token]
#There are other tokens to be made
#if token == 'सम्बन्धी' or 'सम्बन्धी' in token:
nextChar = word[nextWordPos]
#occurrences of next in me
repCount = token.count(nextChar)
genTokens = wordsStartingIn(nextChar, tokenList[nextPos:], repCount = repCount)
nextTokens = genTokens[0]
nextPos = genTokens[1] + nextPos
#Run this function recursively on each
toReturn = []
if len(nextTokens) > 0:
for tok in nextTokens:
children = genChild(word, tok, nextWordPos, nextPos, tokenList)
for child in children:
if token+child.replace('_','') in word:
res = token+"_"+child
toReturn.append(res)
#parts.add(res)
return toReturn
def startWord (word, tokenList):
'''Semi-helper for genChild. processes the token-creation by taking a word, and all valid splits'''
seedWords = wordsStartingIn(word[0], tokenList)
words = seedWords[0]
lastPos = seedWords[1]
tot = []
for each in words:
res = genChild(word, each, 0, lastPos, tokenList)
tot+=res
return tot
def sortMe(words):
'''Sorts the different possible splits by their similarities to each other from Word2Vec'''
return sorted(words, key=lambda x: similarities(x.split('_'), model))
def wordsStartingIn(startingChar, curTokenList, repCount = 0):
'''Returns the words in curToken list that start with startingChar.
repCount is the number of occurrences of the char inside the target word of interest.
'''
global counter
counter += 1
i = 0
tokenList = curTokenList
if len(tokenList)>0:
while repCount >= 0:
while i <len(tokenList) and startingChar != tokenList[i][0]:
i += 1
#Now that we have the position:
match_toks = []
while i< len(tokenList) and startingChar == tokenList[i][0]:
if repCount == 0:
match_toks.append(tokenList[i])
i += 1
repCount -=1
return match_toks, i
return [], len(curTokenList)
def similarities(wordList, model):
'''Sort the words in wordlist according to similarities as given by model'''
totalScore = 0
for i in range(len(wordList)):
for j in range(i+1, len(wordList)):
totalScore += model.n_similarity(wordList[i], wordList[j])
return totalScore/len(wordList)
|
shirish93/CoLing
|
word_segmentation.py
|
Python
|
mit
| 3,444
|
import os
import numpy as np
from vsm.structarr import arr_add_field
from vsm.split import split_corpus
__all__ = [ 'BaseCorpus', 'Corpus', 'add_metadata', 'align_corpora' ]
class BaseCorpus(object):
"""
A BaseCorpus object stores a corpus along with its tokenizations
(e.g., as sentences, paragraphs or documents).
BaseCorpus aims to provide an efficient method for viewing the
corpus in tokenized form (i.e., without copying). It currently
achieves this by storing the corpus as a numpy `ndarray`. Viewing
a tokenization is carried out using the `view` facilities of numpy
ndarrays. See documentation on `numpy.ndarray` for further
details.
:param corpus: Array, typically of strings or integers, of atomic words
(or tokens) making up the corpus.
:type corpus: array-like
:param dtype: The data-type used to interpret the corpus. If omitted, the
data-type is determined by `numpy.asarray`. Default is `None`.
:type dtype: data-type, optional
:param context_data: Each element in `context_data` is an array
containing the indices marking the context boundaries.
An element in `context_data` is intended for use as a value for
the `indices_or_sections` parameter in `numpy.split`.
Elements of `context_data` may also be 1-D arrays whose elements
are pairs, where the first element is a context boundary and
the second element is metadata associated with that context preceding
that boundary. For example, (250, 'dogs') might indicate that
the 'article' context ending at the 250th word of the corpus is named
'dogs'. Default is `None`.
:type context_data: list with 1-D array-like elements, optional
:param context_types: Each element in `context_types` is a type of a i
tokenization in `context_data`.
:type context_types: array-like, optional
:param remove_empty: If True, empty tokenizations are removed. Default it
`True`.
:type remove_empty: boolean, optional
:attributes:
* **corpus** (1-dimensional array)
Stores the value of the `corpus` parameter after it has been cast
to an array of data-type `dtype` (if provided).
* **words** (1-dimensional array)
The indexed set of atomic words appearing in `corpus`.
* **context_types** (1-dimensional array-like)
* **context_data** (list of 1-D array-like)
:methods:
* **meta_int**
Takes a type of tokenization and a query and returns the index of
the metadata found in the query.
* **get_metadatum**
Takes a type of tokenization and a query and returns the metadatum
corresponding to the query and the field.
* **view_contexts**
Takes a type of tokenization and returns a view of the corpus
tokenized accordingly.
* **view_metadata**
Takes a type of tokenization and returns a view of the metadata
of the tokenization.
* **tolist**
Returns Corpus object as a list of lists.
* **remove_empty**
Removes empty documents in the corpus.
:See Also: :class:`Corpus`
**Examples**
>>> corpus = ['the', 'dog', 'chased', 'the', 'cat',
'the', 'cat', 'ran', 'away']
>>> context_types = ['sentences']
>>> context_data = [np.array([(5, 'transitive'), (9, 'intransitive')],
dtype=[('idx', '<i8'), ('sent_label', '|S16')])]
>>> from vsm.corpus import BaseCorpus
>>> c = BaseCorpus(corpus, context_types=context_types, context_data=context_data)
>>> c.corpus
array(['the', 'dog', 'chased', 'the', 'cat', 'the', 'cat',
'ran', 'away'], dtype='|S6')
>>> c.words
array(['ran', 'away', 'chased', 'dog', 'cat', 'the'],
dtype='|S6')
>>> c.meta_int('sentences',{'sent_label': 'intransitive'})
1
>>> b.get_metadatum('sentences', {'sent_label': 'intransitive'}, 'sent_label')
'intransitive'
>>> c.view_contexts('sentences')
[array(['the', 'dog', 'chased', 'the', 'cat'],
dtype='|S6'),
array(['the', 'cat', 'ran', 'away'],
dtype='|S6')]
>>> c.view_metadata('sentences')[0]['sent_label']
'transitive'
"""
def __init__(self,
corpus,
dtype=None,
context_types=[],
context_data=[],
remove_empty=True):
self.corpus = np.asarray(corpus, dtype=dtype)
self.dtype = self.corpus.dtype
self.words = np.unique(self.corpus)
self.context_data = []
for t in context_data:
if self._validate_indices(t['idx']):
self.context_data.append(t)
self._gen_context_types(context_types)
if remove_empty:
self.remove_empty()
def __len__(self):
"""
Returns the number of tokens in the corpus.
:See Also: `len(self.words)` for the number of unique tokens.
"""
return len(self.corpus)
def _gen_context_types(self, context_types):
"""
Missing context types are filled in with 'ctx_' + an index.
"""
if self.context_data:
a = len(context_types) if context_types else 0
for i in xrange(a, len(self.context_data)):
context_types.append('ctx_' + str(i))
self.context_types = context_types
def _validate_indices(self, indices):
"""
Checks for invalid tokenizations. Specifically, checks to see
that the list of indices are sorted and are in range. Ignores
empty tokens.
:param indices:
:type indices : 1-D integer array-like
:returns: `True` if the indices are validated
:raises: Exception
:See Also: :class:`BaseCorpus`
"""
for i, j in enumerate(indices):
if i < len(indices) - 1 and j > indices[i + 1]:
msg = 'malsorted tokenization:'\
' ctx ' + str(j) + ' and ' + str(indices[i + 1])
raise Exception(msg)
if j > self.corpus.shape[0]:
msg = 'invalid tokenization'\
' : ' + str(j) + ' is out of range ('\
+ str(self.corpus.shape[0]) + ')'
raise Exception(msg)
return True
def remove_empty(self):
"""
Removes empty tokenizations, if `Corpus` object is not empty.
"""
if self:
for j, t in enumerate(self.context_types):
token_list = self.view_contexts(t)
indices = np.array([ctx.size != 0 for ctx in token_list], dtype=np.bool)
self.context_data[j] = self.context_data[j][indices]
def view_metadata(self, ctx_type):
"""
Displays the metadata corresponding to a tokenization of the
corpus. This method can be used in :class:`Corpus` as well as
:class:`BaseCorpus`
:param ctx_type: The type of a tokenization.
:type ctx_type: string-like
:returns: The metadata for a tokenization.
:See Also: :class:`BaseCorpus`, :class:`Corpus`
"""
i = self.context_types.index(ctx_type)
return self.context_data[i]
def meta_int(self, ctx_type, query):
"""
Returns the index of the metadata found in the query.
:param ctx_type: The type of a tokenization.
:type ctx_type: string-like
:param query: Dictionary with a key, value being a field, label
in metadata.
:type query: dictionary-like
:returns: The index of the metadata found in the query.
:raises: KeyError
:See Also: :class:`BaseCorpus`
"""
tok = self.view_metadata(ctx_type)
ind_set = np.ones(tok.size, dtype=bool)
for k,v in query.iteritems():
ind_set = np.logical_and(ind_set, (tok[k] == v))
n = np.count_nonzero(ind_set)
if n == 0:
raise KeyError('No token fits the description: ' +
', '.join(['{q}:{l}'.format(q=k, l=v)
for k,v in query.iteritems()]))
elif n > 1:
msg = ('Multiple tokens fit that description:\n'
+ str(tok[ind_set]))
raise KeyError(msg)
return ind_set.nonzero()[0][0]
def get_metadatum(self, ctx_type, query, field):
"""
Returns the metadatum corresponding to the query and the field.
:param ctx_type: The type of a tokenization.
:type ctx_type: string-like
:param query: Dictionary with a key, value being a field, label
in metadata.
:type query: dictionary-like
:param field: Field of the metadata
:type field: string
:returns: The metadatum corresponding to the query and the field.
:See Also: :class:`BaseCorpus`
"""
i = self.meta_int(ctx_type, query)
return self.view_metadata(ctx_type)[i][field]
def view_contexts(self, ctx_type, as_slices=False, as_indices=False):
"""
Displays a tokenization of the corpus.
:param ctx_type: The type of a tokenization.
:type ctx_type: string-like
:param as_slices: If True, a list of slices corresponding to
'ctx_type' is returned. Otherwise, integer representations
are returned. Default is `False`.
:type as_slices: Boolean, optional
:Returns: A tokenized view of `corpus`.
:See Also: :class:`BaseCorpus`, :meth:`numpy.split`
"""
indices = self.view_metadata(ctx_type)['idx']
if as_indices:
return indices
if as_slices:
if len(indices) == 0:
return [slice(0, 0)]
slices = []
slices.append(slice(0, indices[0]))
for i in xrange(len(indices) - 1):
slices.append(slice(indices[i], indices[i+1]))
return slices
return split_corpus(self.corpus, indices)
def tolist(self, context_type):
"""
Returns Corpus object as a list of lists.
"""
return self.view_contexts(context_type)
class Corpus(BaseCorpus):
"""
The goal of the Corpus class is to provide an efficient representation\
of a textual corpus.
A Corpus object contains an integer representation of the text and
maps to permit conversion between integer and string
representations of a given word.
As a BaseCorpus object, it includes a dictionary of tokenizations
of the corpus and a method for viewing (without copying) these
tokenizations. This dictionary also stores metadata (e.g.,
document names) associated with the available tokenizations.
:param corpus: A string array representing the corpus as a sequence of
atomic words.
:type corpus: array-like
:param context_data: Each element in `context_data` is an array containing
the indices marking the token boundaries. An element in `context_data` is
intended for use as a value for the `indices_or_sections`
parameter in `numpy.split`. Elements of `context_data` may also be
1-D arrays whose elements are pairs, where the first element
is a context boundary and the second element is metadata
associated with that context preceding that boundary. For
example, (250, 'dogs') might indicate that the 'article' context
ending at the 250th word of the corpus is named 'dogs'.
Default is `None`.
:type context_data: list-like with 1-D integer array-like elements, optional
:param context_types: Each element in `context_types` is a type of a context
in `context_data`.
:type context_types: array-like, optional
:attributes:
* **corpus** (1-D 32-bit integer array)
corpus is the integer representation of the input string array-like
value of the corpus parameter
* **words** (1-D string array)
The indexed set of strings occurring in corpus. It is a string-typed array.
* **words_in** (1-D 32-bit integer dictionary)
A dictionary whose keys are `words` and whose values are their
corresponding integers (i.e., indices in `words`).
:methods:
* **view_metadata**
Takes a type of tokenization and returns a view of the metadata
of the tokenization.
* **view_contexts**
Takes a type of tokenization and returns a view of the corpus tokenized
accordingly. The optional parameter `strings` takes a boolean value:
True to view string representations of words; False to view integer
representations of words. Default is `False`.
* **save**
Takes a filename and saves the data contained in a Corpus object to
a `npy` file using `numpy.savez`.
* **load**
Static method. Takes a filename, loads the file data into a Corpus
object and returns the object.
* **apply_stoplist**
Takes a list of stopwords and returns a copy of the corpus with
the stopwords removed.
* **tolist**
Returns Corpus object as a list of lists of either integers or strings,
according to `as_strings`.
:See Also: :class:`BaseCorpus`
**Examples**
>>> text = ['I', 'came', 'I', 'saw', 'I', 'conquered']
>>> context_types = ['sentences']
>>> context_data = [np.array([(2, 'Veni'), (4, 'Vidi'), (6, 'Vici')],
dtype=[('idx', '<i8'), ('sent_label', '|S6')])]
>>> from vsm.corpus import Corpus
>>> c = Corpus(text, context_types=context_types, context_data=context_data)
>>> c.corpus
array([0, 1, 0, 2, 0, 3], dtype=int32)
>>> c.words
array(['I', 'came', 'saw', 'conquered'],
dtype='|S9')
>>> c.words_int['saw']
2
>>> c.view_contexts('sentences')
[array([0, 3], dtype=int32), array([0, 2], dtype=int32),
array([0, 1], dtype=int32)]
>>> c.view_contexts('sentences', as_strings=True)
[array(['I', 'came'],
dtype='|S9'),
array(['I', 'saw'],
dtype='|S9'),
array(['I', 'conquered'],
dtype='|S9')]
>>> c.view_metadata('sentences')[1]['sent_label']
'Vidi'
>>> c = c.apply_stoplist(['saw'])
>>> c.words
array(['I', 'came', 'conquered'],
dtype='|S9')
"""
def __init__(self,
corpus,
context_types=[],
context_data=[],
remove_empty=True):
super(Corpus, self).__init__(corpus,
context_types=context_types,
context_data=context_data,
dtype=np.unicode_,
remove_empty=remove_empty)
self._set_words_int()
# Integer encoding of a string-type corpus
self.dtype = np.int32
self.corpus = np.asarray([self.words_int[word]
for word in self.corpus],
dtype=self.dtype)
self.stopped_words = set()
def _set_words_int(self):
"""
Mapping of words to their integer representations.
"""
self.words_int = dict((t,i) for i,t in enumerate(self.words))
def view_contexts(self, ctx_type, as_strings=False, as_slices=False, as_indices=False):
"""
Displays a tokenization of the corpus.
:param ctx_type: The type of a tokenization.
:type ctx_type: string-like
:param as_strings: If True, string representations of words are returned.
Otherwise, integer representations are returned. Default
is `False`.
:type as_strings: Boolean, optional
:param as_slices: If True, a list of slices corresponding to 'ctx_type'
is returned. Otherwise, integer representations are returned.
Default is `False`.
:type as_slices: Boolean, optional
:returns: A tokenized view of `corpus`.
:See Also: :class:`Corpus`, :class:`BaseCorpus`
"""
if as_strings:
token_list = super(Corpus, self).view_contexts(ctx_type)
token_list_ = []
for token in token_list:
token = self.words[token]
token_list_.append(token)
return token_list_
return super(Corpus, self).view_contexts(ctx_type,
as_slices=as_slices,
as_indices=as_indices)
def tolist(self, context_type, as_strings=False):
"""
Returns Corpus object as a list of lists of either integers or
strings, according to `as_strings`.
:param context_type: The type of tokenization.
:type context_type: string
:param as_strings: If True, string representations of words are returned.
Otherwise, integer representations are returned. Default
is `False`.
:type as_strings: Boolean, optional
:returns: List of lists
"""
ls = self.view_contexts(context_type, as_strings=as_strings)
return [arr.tolist() for arr in ls]
@staticmethod
def load(file=None, corpus_dir=None,
corpus_file='corpus.npy',
words_file='words.npy',
metadata_file='metadata.npy'):
"""Loads data into a Corpus object.
:param file: The file to read. See `numpy.load` for further
details. Assumes file has been constructed as by
`Corpus.save`. This option is exclusive of `corpus_dir`.
:type file: str-like or file object
:param corpus_dir: A directory containing the files
`corpus_file`, `words_file`, `metadata_file`, from which to
instantiate a Corpus object. This option is ignored if `file`
is not `None`.
:type corpus_dir: string
:param corpus_file: File under `corpus_dir` containing the
corpus data, stored as a numpy array of integers in an `npy`
file.
:type corpus_file: string or file object
:param words_file: File under `corpus_dir` containing the
corpus vocabulary, stored as a numpy array of strings in an
`npy` file.
:type words_file: string or file object
:param metadata_file: File under `corpus_dir` containing the
corpus metadata, stored as a numpy stuctured array in an `npy`
file. Note that this structured array should contain a file
`idx` which stores the integer indices marking the document
boundaries.
:type corpus_file: string or file object
:returns: A Corpus object.
:See Also: :class:`Corpus`, :meth:`Corpus.save`, :meth:`numpy.load`
"""
if not file is None:
arrays_in = np.load(file)
c = Corpus([], remove_empty=False)
c.corpus = arrays_in['corpus']
c.words = arrays_in['words']
c.context_types = arrays_in['context_types'].tolist()
c.context_data = list()
for n in c.context_types:
t = arrays_in['context_data_' + n]
c.context_data.append(t)
c._set_words_int()
return c
if not corpus_dir is None:
c = Corpus([], remove_empty=False)
c.corpus = np.load(os.path.join(corpus_dir, corpus_file))
c.words = np.load(os.path.join(corpus_dir, words_file))
c._set_words_int()
c.context_types = [ 'document' ]
c.context_data = [ np.load(os.path.join(corpus_dir, metadata_file)) ]
return c
def save(self, file):
"""
Saves data from a Corpus object as an `npz` file.
:param file: Designates the file to which to save data. See
`numpy.savez` for further details.
:type file: str-like or file-like object
:returns: None
:See Also: :class:`Corpus`, :meth:`Corpus.load`, :meth:`np.savez`
"""
print 'Saving corpus as', file
arrays_out = dict()
arrays_out['corpus'] = self.corpus
arrays_out['words'] = self.words
arrays_out['context_types'] = np.asarray(self.context_types)
for i,t in enumerate(self.context_data):
key = 'context_data_' + self.context_types[i]
arrays_out[key] = t
np.savez(file, **arrays_out)
def apply_stoplist(self, stoplist=[], freq=0):
"""
Takes a Corpus object and returns a copy of it with words in the
stoplist removed and with words of frequency <= `freq` removed.
:param stoplist: The list of words to be removed.
:type stoplist: list
:param freq: A threshold where words of frequency <= 'freq' are
removed. Default is 0.
:type freq: integer, optional
:returns: Copy of corpus with words in the stoplist and words
of frequnecy <= 'freq' removed.
:See Also: :class:`Corpus`
"""
if freq:
#TODO: Use the TF model instead
# print 'Computing collection frequencies'
cfs = np.zeros_like(self.words, dtype=self.corpus.dtype)
for word in self.corpus:
cfs[word] += 1
# print 'Selecting words of frequency <=', freq
freq_stop = np.arange(cfs.size)[(cfs <= freq)]
stop = set(freq_stop)
else:
stop = set()
# filter stoplist
stoplist = [t for t in stoplist if t in self.words]
for t in stoplist:
stop.add(self.words_int[t])
if not stop:
# print 'Stop list is empty.'
return self
# print 'Removing stop words'
f = np.vectorize(lambda x: x not in stop)
corpus = self.corpus[f(self.corpus)]
# print 'Rebuilding corpus'
corpus = [self.words[i] for i in corpus]
context_data = []
for i in xrange(len(self.context_data)):
# print 'Recomputing token breaks:', self.context_types[i]
tokens = self.view_contexts(self.context_types[i])
spans = [t[f(t)].size for t in tokens]
tok = self.context_data[i].copy()
tok['idx'] = np.cumsum(spans)
context_data.append(tok)
c = Corpus(corpus, context_data=context_data, context_types=self.context_types)
if self.stopped_words:
c.stopped_words.update(self.stopped_words)
c.stopped_words.update(stoplist)
return c
def add_metadata(corpus, ctx_type, new_field, metadata):
"""
Returns a corpus with metadata added.
:param corpus: Corpus object to add new metadata to.
:type corpus: :class:`Corpus`
:param ctx_type: A type of tokenization.
:type ctx_type: string
:param new_field: Field name of the new metadata.
:type new_field: string
:param metadata: List of values to be added to `corpus`.
:type metdata: list
:returns: Corpus with new metadata added to the existing metdata.
:See Also: :class:`Corpus`
"""
i = corpus.context_types.index(ctx_type)
md = corpus.context_data[i]
corpus.context_data[i] = arr_add_field(md, new_field, metadata)
return corpus
def align_corpora(old_corpus, new_corpus, remove_empty=True):
"""Takes two Corpus objects `old_corpus` and `new_corpus` and returns
a copy of `new_corpus` with the following modifications: (1) the
word to integer mapping agrees with that of `old_corpus` and (2)
words in `new_corpus` which do not appear in `old_corpus` are
removed from the corpus. Empty documents are removed.
"""
new_words = [w for w in new_corpus.words if w not in old_corpus.words]
out = new_corpus.apply_stoplist(new_words)
if remove_empty:
out.remove_empty()
int_words = out.words
words_int = old_corpus.words_int
int_int = {}
for i in xrange(len(int_words)):
int_int[i] = words_int[int_words[i]]
for i in xrange(len(out.corpus)):
out.corpus[i] = int_int[out.corpus[i]]
out.words = old_corpus.words.copy()
out._set_words_int()
return out
|
iSumitG/vsm
|
vsm/corpus/base.py
|
Python
|
mit
| 24,737
|
"""
WSGI config for EMEARoster project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "EMEARoster.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
faisaltheparttimecoder/EMEARoster
|
EMEARoster/wsgi.py
|
Python
|
mit
| 489
|
import json
import threading
import accounts
lock = threading.Lock()
_stats = None
def _load():
global _stats
if _stats is None:
try:
_stats = json.loads(open(accounts.getFile('stats.txt')).read())
except Exception:
_stats = {}
def update(name, value):
with lock:
_load()
if name in _stats and _stats[name] == value:
return
_stats[name] = value
with open(accounts.getFile('stats.txt'), 'w') as f:
f.write(json.dumps(_stats))
def get(name, default=None):
with lock:
_load()
return _stats.get(name, default)
|
kalinochkind/vkbot
|
stats.py
|
Python
|
mit
| 639
|
#!-*- coding:utf-8 -*-
"""
代理存活检测工具
结合gevent指定并发数进行并发存活检测
仿Yii框架中的console application执行方式
"""
import os
import time
import codecs
import gevent
import requests
import json
import sys
import gevent.monkey
gevent.monkey.patch_socket()
from gevent import monkey
monkey.patch_ssl()
sys.path.append('..')
from model.proxyRequest import *
PATH = os.path.abspath(os.path.dirname(__file__))
proxy_pool_path = os.path.join(PATH, '../extention/proxy_pool')
defaule_timeout_limit = 2 # 默认代理超时时间
online_proxy_timeout = 5 # 公网代理超时时间
daxiang_service_name = 'daxiang' # 大象代理service名称
asy_thread_count = 50 # 并发线程数
asy_check_sleep_interval = 10
alive_check_url = ProxyConfig.alive_check_url
def check_proxy(ip_port):
'''待测代理IP的存活
@:return
{
ip_port:
resObj:
time_consume:
}'''
result = {}
ip_port = ip_port.strip()
result['ip_port'] = ip_port
st = time.time()
try:
proxy = {
'http': ip_port,
'https': ip_port
}
retObj = requests.get(alive_check_url, proxies=proxy, timeout=defaule_timeout_limit)
result['resObj'] = retObj
except:
result['resObj'] = False
result['time_consume'] = time.time() - st
return result
def asy_proxy_list_check(ip_port_list):
proxyobj_list = []
if not ip_port_list:
return proxyobj_list
thread_count = len(ip_port_list)
threads = []
for i in range(thread_count):
ip_port = ip_port_list[i]
threads.append(gevent.spawn(check_proxy, ip_port))
gevent.joinall(threads)
for thread_ret in threads:
proxyobj_list.append(thread_ret.value)
return proxyobj_list
def check_common_proxy_pool():
'''common proxy alive check'''
redisModel = RedisBase()
proxy_services_list = ['ali']
# check proxy in redis
for proxy_service in proxy_services_list:
proxy_key = ProxyConfig.hermes_proxy_prefix + proxy_service
proxies_set = redisModel.smembers(proxy_key)
logger.info('proxy_service=%s\ttotal_alive_count=%s' % (proxy_service, len(proxies_set)))
for proxy in proxies_set:
proxies = {
'http': proxy,
'https': proxy
}
try:
ret = requests.get(ProxyConfig.alive_check_url, proxies=proxies, timeout=defaule_timeout_limit)
html_content = ret.content
if ret.status_code != 200 or 'html' not in html_content: # proxy dead
redisModel.srem(proxy_key, proxy)
logger.warn('common proxy pool redis check, failed, ip_port:%s' % proxy)
except BaseException, e:
redisModel.srem(proxy_key, proxy)
logger.warn('common proxy pool redis check, failed, ip_port:%s' % proxy)
# check proxies in proxy file
if not os.path.isfile(proxy_pool_path):
logger.error('file not exists:%s' % proxy_pool_path)
return
line_list = codecs.open(proxy_pool_path, encoding='utf-8').readlines()
for line in line_list:
line = line.strip()
if line.startswith('#') or not line:
continue
proxy, service = line.split()
proxies = {
'http': proxy,
'https': proxy
}
start_time = time.time()
proxy_key = ProxyConfig.hermes_proxy_prefix + service
try:
ret = requests.get(ProxyConfig.alive_check_url, proxies=proxies, timeout=defaule_timeout_limit)
time_consume = time.time() - start_time
html_content = ret.content
if ret.status_code == 200 and 'html' in html_content:
redisModel.sadd(proxy_key, proxy) # alive, add to proxy pool
logger.info(
'%s proxy alive check, sucess, ip_port:%s, time_consume:%s' % (service, proxy, time_consume))
else:
redisModel.srem(proxy_key, proxy) # dead, remove from proxy_pool
logger.warn(
'%s proxy alive check, failed, ip_port:%s, time_consume:%s' % (service, proxy, time_consume))
except:
redisModel.srem(proxy_key, proxy) # dead, remove from proxy_pool
logger.warn('%s proxy alive check, failed, ip_port:%s' % (service, proxy))
def redis_proxy_alive_check(proxy_service):
'''通用redis代理存活检测'''
redisModel = RedisBase()
proxy_key = ProxyConfig.hermes_proxy_prefix + proxy_service
proxies_set = redisModel.smembers(proxy_key)
proxy_list = list(proxies_set)
partial_count = len(proxy_list) / asy_thread_count
for i in range(partial_count + 1):
partial_proxy_list = proxy_list[i * asy_thread_count: (i + 1) * asy_thread_count]
proxyobj_list = asy_proxy_list_check(partial_proxy_list)
time.sleep(asy_check_sleep_interval)
for proxyobj in proxyobj_list:
resobj = proxyobj.get('resObj')
ip_port = proxyobj.get('ip_port')
time_consume = proxyobj.get('time_consume')
if resobj and resobj.status_code == 200:
logger.info('%s redis alive check, sucess, ip_port:%s, time_consume:%s' %
(proxy_service, ip_port, time_consume))
else:
redisModel.srem(proxy_key, ip_port)
logger.warn('%s redis alive check, failed, ip_port:%s, time_consume:%s' %
(proxy_service, ip_port, time_consume))
def daxiang_api_proxy_check():
''' check daxiang proxy alive status'''
global defaule_timeout_limit, alive_check_url
defaule_timeout_limit = 40
alive_check_url = ProxyConfig.daxiang_proxy_public_alive_check_url
redisModel = RedisBase()
proxy_key = ProxyConfig.hermes_proxy_prefix + daxiang_service_name
try:
ip_port_lines = requests.get(ProxyConfig.daxiang_proxy_api_url, timeout=20).content
if ip_port_lines.strip():
ip_port_list = ip_port_lines.split('\n')
partial_count = len(ip_port_list) / asy_thread_count
for i in range(partial_count + 1):
partial_proxy_list = ip_port_list[i * asy_thread_count: (i + 1) * asy_thread_count]
proxyobj_list = asy_proxy_list_check(partial_proxy_list)
time.sleep(asy_check_sleep_interval)
for proxyobj in proxyobj_list:
resobj = proxyobj.get('resObj')
ip_port = proxyobj.get('ip_port')
time_consume = proxyobj.get('time_consume')
if resobj and resobj.status_code == 200:
redisModel.sadd(proxy_key, ip_port)
logger.info('%s proxy alive check, sucess, ip_port:%s, time_consume:%s' % (ProxyConfig.service_daxiang, ip_port, time_consume))
else:
logger.warn('%s proxy alive check, failed, ip_port:%s, time_consume:%s' % (ProxyConfig.service_daxiang, ip_port, time_consume))
redisModel.srem(proxy_key, ip_port)
total_proxies = redisModel.smembers(proxy_key)
logger.info('proxy_service=%s\ttotal_alive_count=%s' % (ProxyConfig.service_daxiang, len(total_proxies)))
except BaseException, e:
logger.warn(e)
def daxiang_redis_proxy_alive_check():
'''大象代理池中代理存活检测'''
global defaule_timeout_limit
defaule_timeout_limit = ProxyConfig.public_net_timeout_interval
ProxyConfig.alive_check_url = ProxyConfig.daxiang_proxy_public_alive_check_url
redis_proxy_alive_check(daxiang_service_name)
def kuai_api_proxy_check():
redisModel = RedisBase()
proxy_key = ProxyConfig.hermes_proxy_prefix + ProxyConfig.service_kuai
uncheck_proxy_list = []
try:
content = requests.get(ProxyConfig.kuai_proxy_api_url, timeout=20).content
json_data = json.loads(content)
if not json_data.get('list'):
logger.warn('%s proxy api response null' % ProxyConfig.service_kuai)
return
for ip_port_dic in json_data.get('list'):
proxy = '%(ip)s:%(port)s' % ip_port_dic
uncheck_proxy_list.append(proxy)
except BaseException, e:
logger.warn('%s proxy net request timed out, err_msg:%s' % (ProxyConfig.service_kuai, e))
return
partial_count = len(uncheck_proxy_list) / asy_thread_count
for i in range(partial_count + 1):
partial_proxy_list = uncheck_proxy_list[i * asy_thread_count: (i + 1) * asy_thread_count]
proxyobj_list = asy_proxy_list_check(partial_proxy_list)
time.sleep(asy_check_sleep_interval)
for proxyobj in proxyobj_list:
resobj = proxyobj.get('resObj')
ip_port = proxyobj.get('ip_port')
time_consume = proxyobj.get('time_consume')
if resobj and resobj.status_code == 200:
redisModel.sadd(proxy_key, ip_port)
logger.info('%s proxy alive check, sucess, ip_port:%s, time_consume:%s' % (
ProxyConfig.service_kuai, ip_port, time_consume))
else:
redisModel.srem(proxy_key, ip_port)
logger.warn('%s proxy alive check, failed, ip_port:%s, time_consume:%s' % (
ProxyConfig.service_kuai, ip_port, time_consume))
total_proxies = redisModel.smembers(proxy_key)
logger.info('proxy_service=%s\ttotal_alive_count=%s' % (ProxyConfig.service_kuai, len(total_proxies)))
def kuai_redis_proxy_check():
redis_proxy_alive_check(ProxyConfig.service_kuai)
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) < 1:
print 'shell param error'
sys.exit(1)
funName = args[0].strip()
eval('%s()' % funName)
|
wanghuafeng/spider_tools
|
proxy_alive_check_utils.py
|
Python
|
mit
| 9,900
|
"""
Functions for working with the CrossRef.org rest api.
"""
import re
import sys
import urllib
import requests
import unicodedata
from operator import itemgetter
from refkit.util import doi
from refkit.util import isbn
from refkit.util import citation
from refkit.metadata import Metadata
# Default minimum value for overlap score to be valid without user intervention
defAutoSaveMinimum = 0.99
# Default maximum ratio of the second best to best overlaps for a result to be valid without user intervention
defAutoSaveMaximum = 0.7
def search(lookup, autoSaveMinimum = defAutoSaveMinimum, autoSaveMaximum = defAutoSaveMaximum):
"""
Search for a reference on CrossRef.org given a lookup string. This function goes through the following steps in
order to find a match:
(1) Search the lookup string for a DOI. If one is found, then use that to obtain the reference metadata. If a DOI
is not found, then continue to (2).
(2) Use the lookup string as a query against the CrossRef.org search api. Results obtained from this query are
processed in the following steps.
[2.1] Rank the results by the number of words from lookup that appear in the citation string of each result.
[2.2] If only one result is found then return it if the result of [2.1] is greater than autoSaveMinimum.
[2.3] If only one result is found and [2.2] is not true, then prompt the user to decide if the result is valid.
[2.4] If multiple results are found, return the best one if the result of [2.1] is greater than autoSaveMinimum
and the ratio of the second best result from [2.1] the best one is less than autoSaveMaximum.
[2.5] If multiple results are found and [2.4] is not true, then prompt the user to decide which result is
the best one.
:param lookup: String with the lookup to search for on CrossRef.org
:param autoSaveMinimum: Minimum value for overlap score to be valid without user intervention (0 - 1)
:param autoSaveMaximum: Maximum ratio of the second best to best overlaps for a result to be valid without
user intervention (0 - 1)
:raises ValueError: If a reference with the specified lookup could not be found on CrossRef.org
:returns: Metadata object with information about the reference that was identified
"""
try:
try:
lookup = unicodedata.normalize('NFKD', lookup).encode('ascii', 'ignore')
except:
pass
lookupDoi = _getDoi(lookup, autoSaveMinimum, autoSaveMaximum)
crossRefData = _getMetadataFromDoi(lookupDoi)
return _saveMetadata(crossRefData)
except Exception, e:
raise
def _getMetadataFromDoi(lookupDoi):
"""
Lookup a citation by DOI. The DOI does not need to be formatted.
:param lookupDoi: String with the DOI to search for
:raises ValueError: If the DOI could not be found on CrossRef.org
:returns: Dictionary with information that was obtained from CrossRef.org for the citation with the set DOI
"""
try:
rawDoi = doi.extract(lookupDoi)
url = 'http://api.crossref.org/works/' + rawDoi
return requests.get(url).json()
except Exception:
raise
def _getDoi(lookup, autoSaveMinimum, autoSaveMaximum):
"""
Get the DOI for a lookup string.
:param lookup: String with the lookup to search for on CrossRef.org
:param autoSaveMinimum: Minimum value for overlap score to be valid without user intervention (0 - 1)
:param autoSaveMaximum: Maximum ratio of the second best to best overlaps for a result to be valid without
user intervention (0 - 1)
:raises ValueError: If a DOI could not be obtained for the lookup string
:returns: DOI for the lookup string
"""
try:
resDoi = doi.extract(lookup)
return resDoi
except Exception:
pass
try:
resDoi = _getDoiForCitation(lookup, autoSaveMinimum, autoSaveMaximum)
return resDoi
except Exception, e:
raise
def _getDoiForCitation(lookup, autoSaveMinimum, autoSaveMaximum):
"""
Search CrossRef.org for a given citation.
:param lookup: String with the lookup to search for on CrossRef.org
:param autoSaveMinimum: Minimum value for overlap score to be valid without user intervention (0 - 1)
:param autoSaveMaximum: Maximum ratio of the second best to best overlaps for a result to be valid without
user intervention (0 - 1)
:raises ValueError: If a DOI could not be obtained for the lookup string
:returns: DOI for the lookup string
"""
try:
queryResults = _runQuery(lookup)
try:
bestResult = _getBestQueryResult(lookup, queryResults, autoSaveMinimum, autoSaveMaximum)
return doi.extract(bestResult['doi'])
except Exception, e:
raise ValueError('Could not match citation to DOI')
except Exception:
raise
def _runQuery(value):
"""
Run a free-form query.
:param value: String with the citation to search for
:returns: Dictionary with the results of the query
"""
try:
lowercaseCitation = value.lower()
formattedCitation = urllib.quote_plus(lowercaseCitation)
url = 'http://search.crossref.org/dois?q=' + formattedCitation + '&sort=score&page=1&rows=10&header=true'
return requests.get(url).json()
except Exception:
raise
def _getBestQueryResult(lookup, queryResults, autoSaveMinimum, autoSaveMaximum):
"""
Analyze results from a free-form query to determine the best match that was found.
:param lookup: String with the lookup to search for on CrossRef.org
:param queryResults: Json list with the query results to analyze
:param autoSaveMinimum: Minimum value for overlap score to be valid without user intervention (0 - 1)
:param autoSaveMaximum: Maximum ratio of the second best to best overlaps for a result to be valid without
user intervention (0 - 1)
:returns: Best match that was found or None if a viable match was not found
"""
results = [ (citation.overlap(lookup, i['fullCitation']), i) for i in queryResults ]
results = sorted(results, key = itemgetter(0), reverse = True)
if len(results) == 1 and results[0][0] >= autoSaveMinimum:
return results[0][1]
if len(results) > 1:
if results[0][0] >= autoSaveMinimum and float(results[1][0]) / results[0][0] < autoSaveMaximum:
return results[0][1]
if len(results) > 0:
return _askUserForBestResult(lookup, queryResults)
return None
def _askUserForBestResult(lookup, queryResults):
"""
Prompt the user to decide which in a list of query results is the correct match.
:param lookup: String with the lookup to search for on CrossRef.org
:param queryResults: Json list with the query results to analyze
:returns: Best match that was supplied or None if a viable match was not found
"""
while True:
try:
res = int(_promptForBestResult(lookup, queryResults))
return None if res == 0 else queryResults[res - 1]
except Exception:
pass
def _promptForBestResult(lookup, queryResults):
"""
Prompt the user to decide which in a list of query results is the correct match.
:param lookup: String with the lookup to search for on CrossRef.org
:param queryResults: Json list with the query results to analyze
:returns: Value entered by the user
"""
print ''
print 'LOOKUP STRING: ' + lookup
print 'QUERY RESULTS:'
for i in range(len(queryResults)):
print '[' + str(i + 1) + '] ' + queryResults[i]['fullCitation']
try:
return raw_input('ENTER NUMBER OF CORRECT RESULT (or 0 if no match): ')
except KeyboardInterrupt:
print ''
sys.exit(1)
def _askForManualEntry(lookup):
"""
Prompt the user to input the metadata themselves.
:param lookup: String with the lookup being searched for.
:returns: Object with the user entered information or None if they chose not to enter it.
"""
def _saveMetadata(data):
"""
Extract and save the metadata from the dictionary returned from a CrossRef.org api call.
:param data: Dictionary returned from a CrossRef.org api call
:returns: Metadata object with information saved from the input dictionary
"""
metadata = Metadata()
message = data['message']
_saveIsbn(metadata, message)
_saveYear(metadata, message)
_saveTitle(metadata, message)
_savePages(metadata, message)
_saveJournal(metadata, message)
_saveValue(metadata, message, 'volume', 'volume')
_saveValue(metadata, message, 'issue', 'issue')
_saveValue(metadata, message, 'DOI', 'doi')
_saveValue(metadata, message, 'ISSN', 'issn')
_saveValue(metadata, message, 'URL', 'url')
_saveValue(metadata, message, 'publisher', 'publisher')
_saveNames(metadata, message, 'author', 'author')
_saveNames(metadata, message, 'editor', 'editor')
metadata.tidy()
return metadata
def _saveValue(metadata, crossRefData, dictionaryKey, attributeName):
"""
Save an attribute on metadata object if it exists in the dictionary.
:param metadata: Metadata object to store results
:param crossRefData: Dictionary returned from a CrossRef.org api call
:param dictionaryKey: Key for the value to find in the dictionary
:param attributeName: Name to assign to the attribute
"""
try:
value = crossRefData[dictionaryKey]
if isinstance(value, list):
setattr(metadata, attributeName, value[0])
else:
setattr(metadata, attributeName, value)
except Exception:
pass
def _saveIsbn(metadata, crossRefData):
"""
Save the ISBN of a reference if it is available.
:param metadata: Metadata object to store results
:param crossRefData: Dictionary returned from a CrossRef.org api call
"""
try:
valuesToTest = crossRefData['ISBN']
for i in valuesToTest:
try:
setattr(metadata, 'isbn', isbn.extract(i))
return
except Exception:
pass
except Exception:
pass
def _saveYear(metadata, crossRefData):
"""
Save the year of a reference if it is available.
:param metadata: Metadata object to store results
:param crossRefData: Dictionary returned from a CrossRef.org api call
"""
try:
setattr(metadata, 'year', str(crossRefData['issued']['date-parts'][0][0]))
except Exception:
pass
def _savePages(metadata, crossRefData):
"""
Save start and end pages for a reference.
:param metadata: Metadata object to store results
:param crossRefData: Dictionary returned from a CrossRef.org api call
"""
try:
pages = crossRefData['page'].split('-')
value = pages[0]
if len(value) > 0:
setattr(metadata, 'pageStart', value)
value = pages[1]
if len(value) > 0:
setattr(metadata, 'pageEnd', value)
except Exception:
pass
def _saveTitle(metadata, crossRefData):
"""
Save title for a reference.
:param metadata: Metadata object to store results
:param crossRefData: Dictionary returned from a CrossRef.org api call
"""
title = []
try:
title.append(crossRefData['title'][0])
title.append(crossRefData['subtitle'][0])
except Exception:
pass
try:
setattr(metadata, 'title', ': '.join(title))
except Exception:
pass
def _saveJournal(metadata, crossRefData):
"""
Save the journal for a reference. This saves the journal with the longest name.
:param metadata: Metadata object to store results
:param crossRefData: Dictionary returned from a CrossRef.org api call
"""
res = 0
journals = crossRefData['container-title']
for i in range(1, len(journals)):
if len(journals[i]) > len(journals[res]):
res = i
try:
setattr(metadata, 'journal', journals[res])
except Exception:
pass
def _saveNames(metadata, crossRefData, dictionaryKey, attributeName):
"""
Save a list of authors to a metadata object if they exist in the dictionary.
:param metadata: Metadata object to store results
:param crossRefData: Dictionary returned from a CrossRef.org api call
:param dictionaryKey: Key for the value to find in the dictionary
:param attributeName: Name to assign to the attribute
"""
try:
names = crossRefData[dictionaryKey]
setattr(metadata, attributeName, filter(len, [ _saveName(i) for i in names ]))
except Exception:
pass
def _saveName(name):
"""
Put a name into a dictionary.
:param name: Name to put into dictionary
:returns: Dictionary with givenName and familyName
"""
try:
return {'givenName': name['given'], 'familyName': name['family']}
except Exception:
return {}
|
CitrineInformatics/refkit
|
lookup/crossref.py
|
Python
|
mit
| 13,256
|
# coding:utf-8
from flask.ext.login import UserMixin
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(db.Model, UserMixin):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
active = db.Column(db.Boolean, default=False)
username = db.Column(db.String(60), unique=True, nullable=False)
password = db.Column(db.String(20), nullable=False)
roles = db.relationship(
'Role', backref='roles', lazy='dynamic')
def __unicode__(self):
return self.username
# flask login expects an is_active method in your user model
# you usually inactivate a user account if you don't want it
# to have access to the system anymore
def is_active(self):
"""
Tells flask-login if the user account is active
"""
return self.active
class Role(db.Model):
"""
Holds our user roles
"""
__tablename__ = 'roles'
name = db.Column(db.String(60), primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def __unicode__(self):
return self.name
|
abacuspix/NFV_project
|
Build_Web_With_Flask/Building web applications with Flask_Code/chapter09/ex05/database.py
|
Python
|
mit
| 1,108
|
import os
from subprocess import Popen
from multiprocessing import Pool
import pyxnat
url = 'https://imagen.cea.fr/imagen_database'
interface = pyxnat.Interface(url, login, password)
def bet(in_image):
path, name = os.path.split(in_image)
in_image = os.path.join(path, name.rsplit('.')[0])
out_image = os.path.join(path, name.rsplit('.')[0] + '_brain')
print('==> %s' % in_image[-120:])
Popen('bet2 %s %s -f 0.5 -g 0 ' % (in_image, out_image),
shell=True).communicate()
return out_image
def notify(message):
print('<== %s' % message[-120:])
pool = Pool(processes=8)
for mprage in interface.select(
'//experiments/*SessionA*/assessors/*ADNI*/out/resources/files'
).where([('psytool:tci_parentData/TCI051', '=', '1'), 'AND']):
pool.apply_async(bet, (mprage.get(),), callback=notify)
pool.close()
pool.join()
|
BrainIntensive/OnlineBrainIntensive
|
resources/HCP/pyxnat/pyxnat/examples/examples.py
|
Python
|
mit
| 868
|
#from django.db.models import Model, TextField
#from djangotoolbox.fields import ListField, EmbeddedModelField, DictField
from django.contrib.auth.models import User
from django.db import connections
from bson.objectid import ObjectId
from pymongo.errors import InvalidId
import csv, re, json, datetime, random
from collections import defaultdict
import tb_app.kripp as kripp
def uses_mongo(function):
def _inner(*args, **kwargs):
mongo = connections["default"]
return function(mongo, *args, **kwargs)
return _inner
class MongoEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, ObjectId):
return str(obj)
if hasattr(obj, 'isoformat'):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
##############################################################################
#This is one way new collections are created
def convert_document_csv_to_bson(csv_text):
C = csv.reader(csv.StringIO(csv_text))
#Parse the header row
H = C.next()
#Capture the url/content column index
url_index, content_index = None, None
if 'url' in H:
url_index = H.index('url')
if 'content' in H:
content_index = H.index('content')
if url_index==None and content_index==None:
raise Exception('You must specify either a "url" column or a "content" column in the .csv header.')
#Identify metadata_fields
meta_fields = {}
for h in H:
if re.match('META_', h):
name = re.sub('^META_', '', h)
index = H.index(h)
if name in meta_fields:
raise Exception('Duplicate META_ name : '+name)
meta_fields[name] = index
# print json.dumps(meta_fields, indent=2)
documents_json = []
#http://lethain.com/handling-very-large-csv-and-xml-files-in-python/
#print csv.field_size_limit()
csv.field_size_limit(1000000)
#For each row in the collection
for row in C:
j = {}
#Grab the content or url
#If both are present, url gets precedence
if url_index != None:
j['url'] = row[url_index]
elif content_index != None:
j['content'] = row[content_index]
#Grab metadata fields
m = {}
for f in meta_fields:
#Don't include missing values
#! Maybe include other missing values here
if meta_fields[f] != '':
m[f] = row[meta_fields[f]]
#Don't include empty metadata objects
if m != {}:
j["metadata"] = m
documents_json.append(j)
# print json.dumps(documents_json, indent=2)
return documents_json
def get_new_collection_json(name, description, documents):
""" Create a new collection, given the name, description, and documents """
J = {
'profile' : {
'name' : name,
'description' : description,
'created_at' : datetime.datetime.now(),
'size' : len(documents),
},
'documents' : documents,
}
return J
@uses_mongo
def create_collection_json(mongo, name, description, collections):
""" Create a new collection using documents from other collections
collections is an array with the form:
[{tb_app_collection.$id : docs to retrieve from this collection}]
"""
coll = mongo.get_collection("tb_app_collection")
documents = []
for id_ in collections:
collection = coll.find_one({"_id": ObjectId(id_)})
doc_count = collections[id_]
doc_list = collection["documents"]
random.shuffle( doc_list )
for doc in doc_list[:doc_count]:
doc["metadata"]["source_id"] = id_
doc["metadata"]["source_name"] = collection["profile"]["name"]
documents += doc_list[:doc_count]
random.shuffle(documents)
return get_new_collection_json(name, description, documents)
def get_default_codebook_questions():
return [
{
"question_type": "Static text",
"var_name": "default_question",
"params": {
"header_text": "<h2> New codebook </h2><p><strong>Use the controls at right to add questions.</strong></p>",
}
},
{
"question_type": "Multiple choice",
"var_name": "mchoice",
"params": {
"header_text": "Here is an example of a multiple choice question. Which answer do you like best?",
"answer_array": ["This one", "No, this one", "A third option"],
}
},
{
"question_type": "Short essay",
"var_name": "essay",
"params": {
"header_text": "Here's a short essay question.",
}
}
]
def create_new_variable_json(question_index, subquestion_index, variable_name, question_header, subquestion_label, variable_type):
return {
'question_index': question_index,
'subquestion_index': subquestion_index,
'variable_name': variable_name,
'question_header': question_header,
'subquestion_label': subquestion_label,
'variable_type': variable_type
}
#! As the code is written, this method is never invoked.
#! Using the variables field would help clean up the code in a bunch of places
#! * reliability checking / csv export / table generation on the batch page
def get_codebook_variables_from_questions(questions):
variables = []
for i,q in enumerate(questions):
if q["var_name"]:
var_name = "_"+q["var_name"]
else:
var_name = ''
short_text = q["params"]["header_text"]
#variable_type = q["params"]["variable_type"]
if q["question_type"] == 'Static text':
variables.append( create_new_variable_json(i+1, None, "Q"+str(i+1)+var_name, short_text, "", "none") )
if q["question_type"] in ['Multiple choice', 'Two-way scale']:
variables.append( create_new_variable_json(i+1, None, "Q"+str(i+1)+var_name, short_text, "", "ordinal") )
if q["question_type"] == 'Check all that apply':
for j,a in enumerate(q["params"]["answer_array"]):
variables.append( create_new_variable_json(i+1, None, "Q"+str(i+1)+"_"+str(j+1)+var_name, short_text, "", "nominal") )
if q["question_type"] in ['Text box', 'Short essay']:
variables.append( create_new_variable_json(i+1, None, "Q"+str(i+1)+var_name, short_text, "", "text") )
elif q["question_type"] == 'Radio matrix':
for j,p in enumerate(q["params"]["question_array"]):
variables.append( create_new_variable_json(i+1, j+1, "Q"+str(i+1)+"_"+str(j+1)+var_name, short_text, p, "interval") )
elif q["question_type"] == 'Checkbox matrix':
for j,p in enumerate(q["params"]["question_array"]):
for k,r in enumerate(q["params"]["answer_array"]):
variables.append( create_new_variable_json(i+1, j+1, "Q"+str(i+1)+"_"+str(j+1)+"_"+str(k+1)+var_name, short_text, p, "nominal") )
elif q["question_type"] == 'Two-way matrix':
for j,p in enumerate(q["params"]["left_statements"]):
variables.append( create_new_variable_json(i+1, j+1, "Q"+str(i+1)+"_"+str(j+1)+var_name, short_text, p+"/"+q["params"]["right_statements"][j], "ordinal") )
elif q["question_type"] == 'Text matrix':
for j,p in enumerate(q["params"]["answer_array"]):
variables.append( create_new_variable_json(i+1, j+1, "Q"+str(i+1)+"_"+str(j+1)+var_name, short_text, p, "text") )
return variables
def get_new_codebook_json(name, description):
questions = get_default_codebook_questions()
variables = get_codebook_variables_from_questions(questions)
#Construct object
return {
'profile' : {
'name' : name,
'description' : description,
'created_at' : datetime.datetime.now(),
'version' : 1,
'children' : [],
'batches' : [],
'parent' : None,
},
'questions' : questions,
'variables' : variables,
}
def get_revised_codebook_json(parent_codebook, question_json):
#print parent_codebook
J = {
'profile' : {
'description' : parent_codebook['profile']["description"],
'created_at' : datetime.datetime.now(),
'version' : parent_codebook['profile']["version"] + 1,
'children' : [],
'batches' : [],
'parent' : parent_codebook['_id'],#ObjectId(parent_id),
},
'questions' : question_json,
'variables' : get_codebook_variables_from_questions(question_json),
}
if parent_codebook['profile']["children"]:
J['profile']['name'] = parent_codebook['profile']["name"] + " (branch)"
else:
J['profile']['name'] = parent_codebook['profile']["name"]
return J
def gen_codebook_column_names(codebook):
"""codebook should be in json format, hot off a mongodb query"""
col_names = ['created_at']
for i,q in enumerate(codebook["questions"]):
if q["var_name"]:
var_name = "_"+q["var_name"]
else:
var_name = ''
if q["question_type"] in ['Static text', 'Multiple choice', 'Check all that apply', 'Two-way scale', 'Text box', 'Short essay']:
col_names.append("Q"+str(i+1)+var_name)
elif q["question_type"] in ['Radio matrix', 'Checkbox matrix']:
for j,p in enumerate(q["params"]["question_array"]):
col_names.append("Q"+str(i+1)+"_"+str(j+1)+var_name)
elif q["question_type"] == 'Two-way matrix':
for j,p in enumerate(q["params"]["left_statements"]):
col_names.append("Q"+str(i+1)+"_"+str(j+1)+var_name)
elif q["question_type"] == 'Text matrix':
for j,p in enumerate(q["params"]["answer_array"]):
col_names.append("Q"+str(i+1)+"_"+str(j+1)+var_name)
return col_names
def gen_col_index_from_col_names(col_names):
return dict([(v,k) for (k,v) in enumerate(col_names)])
def gen_csv_column_from_batch_labels(labels, col_index):
csv_col = [None for i in range(len(col_index))]
print labels
for q in labels:
if type(labels[q]) == unicode:
csv_col[col_index[q]] = str(labels[q].encode("utf-8"))
else:
csv_col[col_index[q]] = labels[q]
return csv_col
### Batches ###################################################################
def get_batch_documents_json(coders, pct_overlap, shuffle, collection):
k = len(collection["documents"])
overlap = int((k * pct_overlap) / 100)
import random
doc_ids = range(k)
if shuffle:
# ? This can stay here until we do our DB refactor.
random.shuffle(doc_ids)
shared = doc_ids[:overlap]
unique = doc_ids[overlap:]
#Construct documents object
documents = []
empty_labels = dict([(x, []) for x in coders])
for i in shared:
documents.append({
'index': i,
# 'content': collection["documents"][i]["content"],
'labels': empty_labels
})
for i in unique:
documents.append({
'index': i,
# 'content': collection["documents"][i]["content"],
'labels': { coders[i%len(coders)] : [] }
#Populate the list with a random smattering of fake labels
#'labels': {coders[i % len(coders)]: random.choice([None for x in range(2)] + range(20))}
})
if shuffle:
random.shuffle(documents)
return documents
def get_new_batch_json(count, coders, pct_overlap, shuffle, codebook, collection):
#Construct profile object
profile = {
'name': 'Batch ' + str(count + 1),
'description': collection["profile"]["name"][:20] + " * " + codebook["profile"]["name"][:20] + " (" + str(codebook["profile"]["version"]) + ")",
'index': count + 1,
'codebook_id': codebook['_id'],
'collection_id': collection['_id'],
'coders': coders,
'pct_overlap': pct_overlap,
'shuffle': shuffle,
'created_at': datetime.datetime.now(),
}
documents = get_batch_documents_json(coders, pct_overlap, shuffle, collection)
#Construct batch object
batch = {
'profile' : profile,
'documents': documents,
'reports': {
'progress': {},
'reliability': {},
},
}
return batch
def get_most_recent_answer_set(answer_set_list):
#Get the most recent answer set for this coder (important if the coder used did an "undo")
most_recent_answer_set = {}
most_recent_date = None
for answer_set in answer_set_list:
if not most_recent_date or answer_set["created_at"] > most_recent_date:
most_recent_answer_set = answer_set
most_recent_date = answer_set["created_at"]
return most_recent_answer_set
@uses_mongo
def update_batch_progress(mongo, id_):
#Connect to the DB
coll = mongo.get_collection("tb_app_batch")
#Retrieve the batch
batch = coll.find_one({"_id": ObjectId(id_)})
# print json.dumps(batch, indent=2, cls=MongoEncoder)
#Scaffold the progress object
coders = batch["profile"]["coders"]
progress = {
"coders": dict([(c, {"assigned":0, "complete":0}) for c in coders]),
"summary": {}
}
#Count total and complete document codes
assigned, complete = 0, 0
for doc in batch["documents"]:
for coder in doc["labels"]:
assigned += 1
progress["coders"][coder]["assigned"] += 1
if doc["labels"][coder] != []:
complete += 1
progress["coders"][coder]["complete"] += 1
#Calculate percentages
for coder in progress["coders"]:
c = progress["coders"][coder]
c["percent"] = round(float(100 * c["complete"]) / c["assigned"], 1)
progress["summary"] = {
"assigned": assigned,
"complete": complete,
"percent": round(float(100 * complete) / assigned, 1),
}
batch["reports"]["progress"] = progress
coll.update({"_id": ObjectId(id_)}, batch)
def convert_batch_to_2d_arrays(batch, var_names, missing_val=None):
#2-D arrays wrapped in a dictionary : [question][document][coder]
coder_index = dict([(c,i) for i,c in enumerate(batch["profile"]["coders"])])
#Create empty arrays
#! The "None" here should be zero for CATA variables.
#! But I don't have a good way to detect CATA variables.
#! This code needs a refactor, but now is not the time.
code_arrays = dict([ (n, [[None for c in coder_index] for d in batch["documents"]]) for n in var_names])
for i, doc in enumerate(batch["documents"]):
for coder in doc["labels"]:
answer_set = get_most_recent_answer_set(doc["labels"][coder])
#print answer_set
for question in answer_set:
if question in code_arrays.keys():
try:
#print '\t'.join([str(x) for x in [question, i, coder, answer_set[question]]])
code_arrays[question][i][coder_index[coder]] = float(answer_set[question])
except ValueError:
code_arrays[question][i][coder_index[coder]] = missing_val
return code_arrays
@uses_mongo
def update_batch_reliability(mongo, batch_id):
batch = mongo.get_collection("tb_app_batch").find_one({"_id": ObjectId(batch_id)})
codebook = mongo.get_collection("tb_app_codebook").find_one({"_id": ObjectId(batch["profile"]["codebook_id"])})
variables = codebook["variables"]
var_names = [v["variable_name"] for v in variables]
data_arrays = convert_batch_to_2d_arrays(batch, var_names)
summary = {}
for i, v in enumerate(variables):
# print v
v_name = v["variable_name"]
# print q, '\t', kripp.alpha(data_arrays[q], kripp.interval)
#print v_name, '\t', v["variable_type"]
#Get variable metric
v_type = v["variable_type"]
if v_type == "nominal":
metric = kripp.nominal
elif v_type in ["interval", "ordinal"]:
metric = kripp.interval
elif v_type == "ratio":
metric = kripp.ratio
if metric:
alpha = kripp.alpha(data_arrays[v_name], metric)
try:
alpha_100 = 100*alpha
except TypeError:
alpha_100 = None
summary[v_name] = dict(v.items() + {
'alpha': alpha,
'alpha_100': alpha_100,
}.items())
#Build the reliability object
reliability = {
"updated_at" : datetime.datetime.now(),
#"docs": {},
#"coders": dict([(c, {}) for c in coders]),
"summary": summary,
}
#batch["reports"]["reliability"] = reliability
#print json.dumps(reliability, indent=2, cls=MongoEncoder)
mongo.get_collection("tb_app_batch").update(
{ "_id": ObjectId(batch_id) },
{ "$set": { 'reports.reliability' : reliability}}
)
|
abegong/textbadger
|
textbadger/tb_app/models.py
|
Python
|
mit
| 17,389
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteFiltersOperations:
"""RouteFiltersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_filter_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_filter_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_filter_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.RouteFilter":
"""Gets the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param expand: Expands referenced express route bgp peering resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilter, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.RouteFilter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_filter_name: str,
route_filter_parameters: "_models.RouteFilter",
**kwargs: Any
) -> "_models.RouteFilter":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_parameters, 'RouteFilter')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilter', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_filter_name: str,
route_filter_parameters: "_models.RouteFilter",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteFilter"]:
"""Creates or updates a route filter in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param route_filter_parameters: Parameters supplied to the create or update route filter
operation.
:type route_filter_parameters: ~azure.mgmt.network.v2018_11_01.models.RouteFilter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilter or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_11_01.models.RouteFilter]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
route_filter_parameters=route_filter_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
route_filter_name: str,
route_filter_parameters: "_models.PatchRouteFilter",
**kwargs: Any
) -> "_models.RouteFilter":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_parameters, 'PatchRouteFilter')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
route_filter_name: str,
route_filter_parameters: "_models.PatchRouteFilter",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteFilter"]:
"""Updates a route filter in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param route_filter_parameters: Parameters supplied to the update route filter operation.
:type route_filter_parameters: ~azure.mgmt.network.v2018_11_01.models.PatchRouteFilter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilter or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_11_01.models.RouteFilter]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
route_filter_parameters=route_filter_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.RouteFilterListResult"]:
"""Gets all route filters in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.RouteFilterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.RouteFilterListResult"]:
"""Gets all route filters in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.RouteFilterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeFilters'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations/_route_filters_operations.py
|
Python
|
mit
| 30,024
|
#!/usr/bin/env python
import os
import datetime
import re
import sqlite3
# this function returns a count of the immediate subdirectories as an integer
def get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
# the base bath of the DRMC as it is mounted on VMDRMC02
base_path_1 = '/home/archivesuser/moma/drmc/'
base_path_2 = '/mnt/pre-ingest/'
# a dictionary of the workflow containing a nested data structure: the values are 1)directory name 2)placeholder for count of directories 3)placeholder list array of artwork folder names 4) placeholder for directory size in bytes
locations_dict = {'preIngest':[base_path_1+'pre-ingest_staging','',[''],''], 'readyForIngest':[base_path_1+'ready_for_ingest-bagged','',[''],''], 'readyForIngest2':[base_path_1+'ready_for_ingest-unbagged','',[''],''], 'artworkBacklog':[base_path_1+'Artwork_level_backlog','',[''],''], 'preIngestIsilon':[base_path_2+'staging','',[''],''], 'preIngestIsilon':[base_path_2+'ready_for_ingest','',[''],'']}
# for each location in the above dictionary
for location in locations_dict:
#assemble the full path
fullpath = locations_dict[location][0]
#set this in the dictionary
locations_dict[location][0] = fullpath
#get the immediate subdirectories
fullpath_listing = get_immediate_subdirectories(fullpath)
#put them in the dictionary
locations_dict[location][2] = fullpath_listing
# count the length
fullpath_size = len(fullpath_listing)
locations_dict[location][1] = fullpath_size
# print locations_dict[location][1]
# The sqlite DB called "metrics" has 9 tables
#
# CREATE TABLE artworkBacklog (ObjID int, folderName text, appearedOn text, disappearedOn text);
# CREATE TABLE counting (date text, preIngest int, runComponent int, readyForIngest int, artworkBacklog int, mpaBacklog int, preIngestIsilon int, readyForIngest2 int);
# CREATE TABLE mpaBacklog (ObjID int, folderName text, appearedOn text, disappearedOn text);
# CREATE TABLE preIngest (ObjID int, folderName text, appearedOn text, disappearedOn text);
# CREATE TABLE preIngestIsilon (ObjID int, folderName text, appearedOn text, disappearedOn text);
# CREATE TABLE readyForIngest (ObjID int, folderName text, appearedOn text, disappearedOn text);
# CREATE TABLE readyForIngest2 (ObjID int, folderName text, appearedOn text, disappearedOn text);
# CREATE TABLE runComponent (ObjID int, folderName text, appearedOn text, disappearedOn text);
# CREATE TABLE size (date text, preIngest int, runComponent int, readyForIngest int, artworkBacklog int, mpaBacklog int, preIngestIsilon int, readyForIngest2 int);
#
i = datetime.datetime.now()
now = i.isoformat()
#######
####### this first function is to add any newly appeared directories to the database, and log the present date as the "appeared on" date
#######
def dbSync(location):
a = 0
b = 0
artworklist = locations_dict[location][2]
for artwork in artworklist:
objectID = re.sub('.*---.*---.*---', '', artwork)
if objectID != "" and len(objectID) < 10 and isinstance(objectID, int) == True:
# these conditions mitigate parsing errors for cases when the object ID is missing from the folder name
conn = sqlite3.connect('/var/www/automation-audit/metrics.db')
c = conn.cursor()
query = c.execute("SELECT * FROM {0} WHERE ObjID = '{1}' ;".format(location,objectID))
one = c.fetchone()
if one != None:
# print "{0} is already in the {1} DB".format(one,location)
a = a+1
else:
# print "{0} will be added to the {1} table".format(objectID,location)
b = b+1
c.execute("INSERT INTO "+location+" VALUES (?,?,?,'')",(objectID,buffer(artwork),now))
conn.commit()
conn.close()
print "{0} folders that are already tracked in the {1} DB".format(a, location)
print "{0} folders that have been added to the {1} DB".format(b, location)
#######
####### this second function is to look through the database, and see if anything in the database is no longer in the directory -- if so, log the date
#######
def checkForMoves(location):
a = 0
b = 0
artworklist = locations_dict[location][2]
conn = sqlite3.connect('/var/www/automation-audit/metrics.db')
c = conn.cursor()
query = c.execute("SELECT * FROM {0}".format(location))
for row in query:
objectID = row[0]
templist = []
for artwork in artworklist:
artworkObjectID = re.sub('.*---.*---.*---', '', artwork)
if artworkObjectID != "" and len(artworkObjectID) < 10 and isinstance(artworkObjectID, int) == True:
templist.append(int(artworkObjectID))
if objectID in templist:
# print "{0} is in the {1} table and still in the {2} dir".format(objectID,location,locations_dict[location][0])
a = a+1
else:
# print "something has disappeared from the {0} dir".format(locations_dict[location][0])
c.execute("UPDATE "+location+" SET disappearedOn=(?) WHERE ObjID=(?)",(now,objectID))
b = b+1
print "{} folders have not moved".format(a)
print "{} folders have disappeared".format(b)
conn.commit()
conn.close()
def updateCounts():
i = datetime.datetime.now().date()
updatedate = i.isoformat()
print "{} is the date".format(updatedate)
conn = sqlite3.connect('/var/www/automation-audit/metrics.db')
c = conn.cursor()
query = c.execute("SELECT * FROM counting WHERE Date=(?)",(updatedate,))
one = c.fetchone()
print "result is: {}".format(one)
if one == None:
print "Logging counts for today..."
c.execute("INSERT INTO counting VALUES (?,'','','','','','','')",(updatedate,))
for location in locations_dict:
c.execute("UPDATE counting SET "+location+"=(?) WHERE Date=(?)",(locations_dict[location][1],updatedate))
conn.commit()
conn.close()
else:
print "Already an entry for today - let's update those numbers"
for location in locations_dict:
c.execute("UPDATE counting SET "+location+"=(?) WHERE Date=(?)",(locations_dict[location][1],updatedate))
conn.commit()
conn.close()
for location in locations_dict:
print 'moving on to %s table' % location
dbSync(location)
checkForMoves(location)
updateCounts()
|
finoradin/moma-utils
|
pre-ingest-metrics/metrics.py
|
Python
|
mit
| 6,065
|
from apps.account.models import User
from apps.common.hasher import get_hasher
from apps.common.serializers import HashidModelSerializer
from django.test import TestCase
class TestUserSerializer(HashidModelSerializer):
"""
For testing
"""
class Meta:
fields = ('pk', 'email')
model = User
class TestHashidModelSerializer(TestCase):
def test_hashid_model_serializer_successful(self):
"""
Test that we can serialize with the hashid serializer
:return: None
"""
hasher = get_hasher()
user = User.objects.create_user(email='test@someemail.com')
hashed_pk = hasher.encode(user.pk)
serialized_user_data = TestUserSerializer(user).data
self.assertEquals(len(serialized_user_data.values()), 2) # only pk and email
self.assertEquals(serialized_user_data['email'], user.email)
self.assertEquals(serialized_user_data['pk'], hashed_pk)
|
RonquilloAeon/django-golden-image
|
src/apps/common/test/test_serializers.py
|
Python
|
mit
| 955
|
from __future__ import unicode_literals
# -*- coding: utf-8 -*-
# Faça um Programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês. Calcule e mostre o total do seu salário no referido mês.
ganho = float(raw_input("Quanto você ganha por hora? \n"))
horas = int(raw_input("Quantas horas você trabalha por mês? \n"))
print "O seu salário total no final do mês é de: {}".format(ganho*horas)
|
josecostamartins/pythonreges
|
lista1/exercicio7.py
|
Python
|
mit
| 432
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattercarpet.marker.colorbar"
_path_str = "scattercarpet.marker.colorbar.title"
_valid_props = {"font", "side", "text"}
# font
# ----
@property
def font(self):
"""
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.scattercarpet.marker.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# side
# ----
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Note that the title's location used to be set by
the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the color bar. Note that before the existence
of `title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattercarpet.
marker.colorbar.Title`
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattercarpet.marker.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.marker.colorbar.Title`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("side", None)
_v = side if side is not None else _v
if _v is not None:
self["side"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotly/python-api
|
packages/python/plotly/plotly/graph_objs/scattercarpet/marker/colorbar/_title.py
|
Python
|
mit
| 6,786
|
from setuptools import find_packages, setup
import proxyprefix
setup(
name='proxyprefix',
version=proxyprefix.__version__,
description='Prefix SCRIPT_NAME with X-Forwarded-Prefix header',
long_description=proxyprefix.__doc__,
author='Yola',
author_email='engineers@yola.com',
license='MIT (Expat)',
url='https://github.com/yola/proxyprefix',
packages=find_packages(exclude=['tests', 'tests.*']),
test_suite='nose.collector',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware',
],
extras_require={
'djproxy': ['djproxy>=2.0.0'],
},
)
|
yola/proxyprefix
|
setup.py
|
Python
|
mit
| 769
|
def cycle_sort(nums):
n = len(nums)
for cycle_index in range(n - 1):
item, index = nums[cycle_index], cycle_index
for i in range(cycle_index + 1, n):
if nums[i] < item:
index += 1
if index == cycle_index:
continue
while item == nums[index]:
index += 1
nums[index], item = item, nums[index]
while index != cycle_index:
index = cycle_index
for i in range(cycle_index + 1, n):
if nums[i] < item:
index += 1
while item == nums[index]:
index += 1
nums[index], item = item, nums[index]
def main():
nums = [3, 654, 3, 33, 10, 8, 2, 99, 101, 94, 60]
print(nums)
cycle_sort(nums)
print(nums)
if __name__ == '__main__':
main()
|
sshh12/SchoolCode
|
Algorithms/SortingSeaching/CycleSort.py
|
Python
|
mit
| 869
|
# Copyright (c) 2017 Weitian LI <liweitianux@live.com>
# MIT license
"""
Portal to 'acispy' module/package
"""
import os
import sys
sys.path.insert(
0,
os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
)
import acispy
|
liweitianux/chandra-acis-analysis
|
bin/_context.py
|
Python
|
mit
| 243
|
import math
for a in range(1,1000,1):
for b in range(1,1000,1):
c = math.sqrt(a**2+b**2)
if a + b +c == 1000:
print a, b, c
print a * b * c
else:
pass
|
WinCanton/PYTHON_CODE
|
PythagoreanTriplet/PythagoreanTriplet.py
|
Python
|
mit
| 215
|
"""
An Armstrong number is equal to the sum of its own digits each raised to the
power of the number of digits.
For example, 370 is an Armstrong number because 3*3*3 + 7*7*7 + 0*0*0 = 370.
Armstrong numbers are also called Narcissistic numbers and Pluperfect numbers.
On-Line Encyclopedia of Integer Sequences entry: https://oeis.org/A005188
"""
PASSING = (1, 153, 370, 371, 1634, 24678051, 115132219018763992565095597973971522401)
FAILING: tuple = (-153, -1, 0, 1.2, 200, "A", [], {}, None)
def armstrong_number(n: int) -> bool:
"""
Return True if n is an Armstrong number or False if it is not.
>>> all(armstrong_number(n) for n in PASSING)
True
>>> any(armstrong_number(n) for n in FAILING)
False
"""
if not isinstance(n, int) or n < 1:
return False
# Initialization of sum and number of digits.
sum = 0
number_of_digits = 0
temp = n
# Calculation of digits of the number
while temp > 0:
number_of_digits += 1
temp //= 10
# Dividing number into separate digits and find Armstrong number
temp = n
while temp > 0:
rem = temp % 10
sum += rem**number_of_digits
temp //= 10
return n == sum
def pluperfect_number(n: int) -> bool:
"""Return True if n is a pluperfect number or False if it is not
>>> all(armstrong_number(n) for n in PASSING)
True
>>> any(armstrong_number(n) for n in FAILING)
False
"""
if not isinstance(n, int) or n < 1:
return False
# Init a "histogram" of the digits
digit_histogram = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
digit_total = 0
sum = 0
temp = n
while temp > 0:
temp, rem = divmod(temp, 10)
digit_histogram[rem] += 1
digit_total += 1
for (cnt, i) in zip(digit_histogram, range(len(digit_histogram))):
sum += cnt * i**digit_total
return n == sum
def narcissistic_number(n: int) -> bool:
"""Return True if n is a narcissistic number or False if it is not.
>>> all(armstrong_number(n) for n in PASSING)
True
>>> any(armstrong_number(n) for n in FAILING)
False
"""
if not isinstance(n, int) or n < 1:
return False
expo = len(str(n)) # the power that all digits will be raised to
# check if sum of each digit multiplied expo times is equal to number
return n == sum(int(i) ** expo for i in str(n))
def main():
"""
Request that user input an integer and tell them if it is Armstrong number.
"""
num = int(input("Enter an integer to see if it is an Armstrong number: ").strip())
print(f"{num} is {'' if armstrong_number(num) else 'not '}an Armstrong number.")
print(f"{num} is {'' if narcissistic_number(num) else 'not '}an Armstrong number.")
print(f"{num} is {'' if pluperfect_number(num) else 'not '}an Armstrong number.")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
|
TheAlgorithms/Python
|
maths/armstrong_numbers.py
|
Python
|
mit
| 3,035
|
import random
class Onepad:
@staticmethod
def encrypt(text: str) -> tuple[list[int], list[int]]:
"""Function to encrypt text using pseudo-random numbers"""
plain = [ord(i) for i in text]
key = []
cipher = []
for i in plain:
k = random.randint(1, 300)
c = (i + k) * k
cipher.append(c)
key.append(k)
return cipher, key
@staticmethod
def decrypt(cipher: list[int], key: list[int]) -> str:
"""Function to decrypt text using pseudo-random numbers."""
plain = []
for i in range(len(key)):
p = int((cipher[i] - (key[i]) ** 2) / key[i])
plain.append(chr(p))
return "".join([i for i in plain])
if __name__ == "__main__":
c, k = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
|
TheAlgorithms/Python
|
ciphers/onepad_cipher.py
|
Python
|
mit
| 872
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#現在日時を表示。
import datetime
if __name__ == "__main__":
today = datetime.date.today()
todaydetail = datetime.datetime.today()
print "now year is '", todaydetail.year, "'."
print "now month is '", todaydetail.month, "'."
print "now day is '", todaydetail.day, "'"
print "now hour is '", todaydetail.hour, "'."
print "now minute is '", todaydetail.minute, "'."
print "now second is '", todaydetail.second, "'."
print "now microsecond is '", todaydetail.microsecond, "'."
wd = ["monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"]
print "now weekday is '", todaydetail.weekday(), "(", wd[todaydetail.weekday()], ")'."
|
kotaro920oka/freescripts
|
show-now.py
|
Python
|
mit
| 782
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# operation platform documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 1 12:57:18 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.graphviz']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u"golden's 文档笔记"
copyright = '2017, golden'
author = 'golden'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'lastest'
# The full version, including alpha/beta/rc tags.
release = 'lastest'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "zh_CN"
html_search_language = 'zh_CN'
source_encoding = 'UTF-8'
locale_dirs = ['locales', './locale']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = ['_themes']
html_theme_options = {
'collapse_navigation': True,
'display_version': True,
'navigation_depth': 3,
}
html_show_sourcelink = True
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'operationplatformdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
'preamble': '''
\\hypersetup{unicode=true}
\\usepackage{CJKutf8}
\\AtBeginDocument{\\begin{CJK}{UTF8}{gbsn}}
\\AtEndDocument{\\end{CJK}}
''',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'operationplatform.tex', 'operation platform Documentation',
'golden', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'operationplatform', 'operation platform Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'operationplatform', 'operation platform Documentation',
author, 'operationplatform', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
goodking-bq/golden-note
|
source/conf.py
|
Python
|
mit
| 6,165
|
from django.db.models import F
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from .models import Question, Choice
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'question_list'
def get_queryset(self):
""" Return the last 8 questions """
return Question.objects.order_by('-pub_date')[:8]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def index(request):
question_list = Question.objects.order_by('-pub_date')[:5]
context = {'question_list': question_list}
return render(request, 'polls/index.html', context)
def vote(request, question_id):
question_id = question_id.strip('/')
question = get_object_or_404(Question, pk=question_id)
try:
selection = question.choice_set.get(pk=request.POST['choice'])
selection.votes = F('votes') + 1
selection.save()
except (KeyError, Choice.DoesNotExist):
return render(request, 'polls/results.html',
{
'question': question,
'error_message': 'You didn\'t select nothin\' foo!'
})
else:
sel_id = request.POST['choice']
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question_id, sel_id)))
|
hckrtst/learnpython
|
django/mysite/polls/views.py
|
Python
|
mit
| 1,786
|
from __future__ import print_function
from itertools import chain
from collections import deque
from rbtree import rbtree
from log import get_logger
from trade import Trade
from orders import LimitBuyOrder, LimitSellOrder
logger = get_logger(__name__)
class OrderBook(object):
def __init__(self, trade_callback=print):
self.trade_callback = trade_callback
self.asks = rbtree()
# reverse comparator
self.bids = rbtree(cmp=lambda x,y: y-x)
def has_bids(self):
return len(self.bids) > 0
def has_asks(self):
return len(self.asks) > 0
def max_bid(self):
# reverse comparator
return self.bids.min()
def min_ask(self):
return self.asks.min()
def quote(self):
return "BID: {0}, ASK: {1}".format(self.max_bid(), self.min_ask())
def _add_entry(self, book, key, value):
logger.debug("Adding to book - price:{0} order:{1}".format(key, value))
entries = book.get(key, deque())
entries.append(value)
book[key] = entries
def add_bid(self, bid):
remaining_bid = self.fill_bid(bid)
if remaining_bid:
# Don't let the books get crossed
assert (not self.has_asks() or self.min_ask() > remaining_bid.price)
self._add_entry(self.bids, remaining_bid.price, remaining_bid)
def add_ask(self, ask):
remaining_ask = self.fill_ask(ask)
if remaining_ask:
# Don't let the books get crossed
assert (not self.has_bids() or self.max_bid() < remaining_ask.price)
self._add_entry(self.asks, remaining_ask.price, remaining_ask)
def fill_bid(self, bid):
logger.debug("Filling bid: {0}".format(bid))
for (price, asks) in (ask.item for ask in self.asks.iternodes() if bid.price_matches(ask.key)):
# remove the current node from the tree
# current list of asks is not deleted since it is in scope, yay garbage collection
del self.asks[price]
for ask in (asks.popleft() for _ in xrange(len(asks))):
(ask, bid) = self.make_trade(ask, bid, ask.price)
if ask.quantity:
assert bid.quantity == 0
logger.debug("Partial fill for ask: {0}".format(ask))
# if we only partially filled the ask, our bid has been filled
# add the partially filled ask to the front of the order list and return
self.asks[price] = deque(chain([ask], asks))
return None
if not bid.quantity:
# We filled both orders exactly
return None
assert bid.quantity > 0
# We will only reach this point if we failed to fill our order
return bid
# Logic is very similar to fill_bid
# TODO: consolidate fill_bid and fill_ask into a generalized match method
def fill_ask(self, ask):
logger.debug("Filling ask: {0}".format(ask))
for (price, bids) in (bid.item for bid in self.bids.iternodes() if ask.price_matches(bid.key)):
# remove the current node from the tree
del self.bids[price]
for bid in (bids.popleft() for _ in xrange(len(bids))):
(ask, bid) = self.make_trade(ask, bid, bid.price)
if bid.quantity:
assert ask.quantity == 0
logger.debug("Partial fill for bid: {0}".format(bid))
self.bids[price] = deque(chain([bid], bids))
return None
if not ask.quantity:
return None
assert ask.quantity > 0
return ask
def make_trade(self, ask, bid, price):
# Don't give someone a lower price than what they asked
assert price >= ask.price
# Don't give someone a higher price than what they bid
assert price <= bid.price
# Sanity test: make sure the asset is corret
assert ask.symbol == bid.symbol
quantity = min(ask.quantity, bid.quantity)
trade = Trade(ask.symbol, quantity, price, bid.trader_id, ask.trader_id)
logger.debug("Making trade: {0}".format(trade))
self.trade_callback(trade)
# return partials
return (LimitSellOrder(ask.price, ask.symbol, ask.quantity - quantity, ask.trader_id),
LimitBuyOrder(bid.price, bid.symbol, bid.quantity - quantity, bid.trader_id))
def execute(self, order):
order.execute(self)
|
amidvidy/multimatch
|
multimatch/book.py
|
Python
|
mit
| 4,553
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-03-22 17:44
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('visualize', '0002_load_zhang_data'),
]
operations = [
migrations.CreateModel(
name='BinnedData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bin_150_50', django.contrib.postgres.fields.ArrayField(base_field=django.contrib.postgres.fields.ArrayField(base_field=models.DecimalField(decimal_places=10, max_digits=15), blank=True, null=True, size=None), blank=True, null=True, size=None)),
('bin_100_30', django.contrib.postgres.fields.ArrayField(base_field=django.contrib.postgres.fields.ArrayField(base_field=models.DecimalField(decimal_places=10, max_digits=15), blank=True, null=True, size=None), blank=True, null=True, size=None)),
('bin_50_15', django.contrib.postgres.fields.ArrayField(base_field=django.contrib.postgres.fields.ArrayField(base_field=models.DecimalField(decimal_places=10, max_digits=15), blank=True, null=True, size=None), blank=True, null=True, size=None)),
('bin_150_50_extents', django.contrib.postgres.fields.ArrayField(base_field=django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), blank=True, null=True, size=None), blank=True, null=True, size=None)),
('bin_100_30_extents', django.contrib.postgres.fields.ArrayField(base_field=django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), blank=True, null=True, size=None), blank=True, null=True, size=None)),
('bin_50_15_extents', django.contrib.postgres.fields.ArrayField(base_field=django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), blank=True, null=True, size=None), blank=True, null=True, size=None)),
('labels', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=30, null=True), blank=True, null=True, size=None)),
('site', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='visualize.Site')),
],
),
]
|
brookefitzgerald/neural-exploration
|
neural_exploration/visualize/migrations/0003_binneddata.py
|
Python
|
mit
| 2,380
|
'''
This uses the homemade Feather Wing doubler: one of the top boards is the
Feather Wing SSD1306 OLED and the other is a homemade board that has a
play/pause button and a potentiometer.
This micropython script displays songs that are being scrobbled to the mqtt broker
running in AWS EC2
On Huzzah ESP8266 Feather, buttons A, B & C connect to 0, 16, 2 respectively
The buttons on OLED are also used:
- Button A (GPIO 0): play wnyc since that is such a frequent thing that I want to do
- Button B (GPIO 16): some boards this is redirected to another pin because 16 is not a normal
pin but might work (needs a physical pull-up since there isn't a builtin pullup)
- Button C (GPIO 2): plays any songs in the queue
Note that this script sends mqtt messages and a script on the raspberry pi named esp_check_mqtt.py
which looks for the messages and then issues sonos commands
There is a separate button that is connected to GPIO 14 that is on the board that has the
volume potentiometer and that button play_pauses.
On some setups, I have rewired GPIO 16 on the OLED to GPIO 13, which is a normal pin
The script also pings the broker to keep it alive
The topic is sonos/ct or sonos/nyc
'''
import gc
from time import sleep, time
import json
import network
from config import mqtt_aws_host, ssid, pw
from ssd1306_min import SSD1306 as SSD
from umqtt_client_official import MQTTClient as umc
from machine import Pin, I2C, ADC
with open('mqtt_id', 'r') as f:
mqtt_id = f.read().strip()
with open('location', 'r') as f:
loc = f.read().strip()
print("version plays wnyc")
print("mqtt_id =", mqtt_id)
print("location =", loc)
print("mqtt_aws_host =", mqtt_aws_host)
i2c = I2C(scl=Pin(5), sda=Pin(4), freq=400000)
d = SSD(i2c)
d.init_display()
d.draw_text(0, 0, "HELLO STEVE")
d.display()
c = umc(mqtt_id, mqtt_aws_host, 1883)
def mtpPublish(topic, msg):
mtopic = bytes([len(topic) >> 8, len(topic) & 255]) + topic.encode('utf-8')
return bytes([0b00110001, len(mtopic) + len(msg)]) + mtopic + msg.encode('utf-8')
play_wnyc_msg = mtpPublish('sonos/'+loc, '{"action":"play_wnyc"}')
play_queue_msg = mtpPublish('sonos/'+loc, '{"action":"play_queue"}')
play_pause_msg = mtpPublish('sonos/'+loc, '{"action":"play_pause"}')
b = bytearray(1)
#callbacks
# note that b[0] is set to 0 in the while loop
def play_wnyc(p):
if b[0]:
print("debounced", p, b[0])
return
b[0] = c.sock.send(play_wnyc_msg)
print("change pin", p, b[0])
def play_queue(p):
if b[0]:
print("debounced", p, b[0])
return
b[0] = c.sock.send(play_queue_msg)
print("change pin", p, b[0])
def play_pause(p):
if b[0]:
print("debounced", p, b[0])
return
b[0] = c.sock.send(play_pause_msg)
print("change pin", p, b[0])
p0 = Pin(0, Pin.IN, Pin.PULL_UP) #button A on FeatherWing OLED
p2 = Pin(2, Pin.IN, Pin.PULL_UP) #button C on FeatherWing OLED
p13 = Pin(13, Pin.IN, Pin.PULL_UP) #some boards redirected pin 16 to pin 13 on FeatherWing OLED
p14 = Pin(14, Pin.IN, Pin.PULL_UP) #button on homemade volume play/pause board
p0.irq(trigger=Pin.IRQ_RISING, handler=play_wnyc)
p2.irq(trigger=Pin.IRQ_RISING, handler=play_queue)
p13.irq(trigger=Pin.IRQ_RISING, handler=play_pause)
p14.irq(trigger=Pin.IRQ_FALLING, handler=play_pause)
adc = ADC(0)
def wrap(text,lim):
lines = []
pos = 0
line = []
for word in text.split():
if pos + len(word) < lim + 1:
line.append(word)
pos+= len(word) + 1
else:
lines.append(' '.join(line))
line = [word]
pos = len(word)
lines.append(' '.join(line))
return lines
def run():
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
if not wlan.isconnected():
print('connecting to network...')
wlan.connect(ssid, pw)
while not wlan.isconnected():
pass
print('network config:', wlan.ifconfig())
def callback(topic,msg):
zz = json.loads(msg.decode('utf-8'))
d.clear()
d.display()
d.draw_text(0, 0, zz.get('artist', '')[:20])
title = wrap(zz.get('title', ''), 20)
d.draw_text(0, 12, title[0])
if len(title) > 1:
d.draw_text(0, 24, title[1])
d.display()
r = c.connect()
print("connect:",r)
c.set_callback(callback)
r = c.subscribe('sonos/{}/track'.format(loc))
print("subscribe:",r)
sleep(5)
cur_time = time()
bb = True
level = 300
while 1:
new_level = 1000-adc.read() # since current wiring has clockwise decreasing voltage
if abs(new_level-level) > 10:
try:
c.publish('sonos/'+loc, json.dumps({"action":"volume", "level":new_level}))
except Exception as e:
print(e)
c.sock.close()
c.connect()
level = new_level
print("new level =", level)
c.check_msg()
t = time()
if t > cur_time + 30:
c.ping()
cur_time = t
gc.collect()
b[0] = 0 # for debouncing
sleep(1)
run()
|
slzatz/esp8266
|
sonos_remote.py
|
Python
|
mit
| 4,881
|
from collections import namedtuple
Notam = namedtuple('Notam', 'intro attributes')
Intro = namedtuple('Intro', 'id operation target')
NotamID = namedtuple('NotamID', 'series number year raw')
Attribute = namedtuple('Attribute', 'type body')
Coordinates = namedtuple('Coordinates', 'lon lat radius')
|
dbrgn/notam-parse
|
notam/ast.py
|
Python
|
mit
| 301
|
import logging
logging.basicConfig(level=logging.DEBUG, \
format="%(asctime)s" "[%(module)s:%(funcName)s:%(lineno)d]\n" "%(message)s \n" \
)
|
ZhiweiYAN/weixin_project_logging
|
logmsg.py
|
Python
|
mit
| 180
|
# Jorge Castanon, March 2016
# Data Scientist @ IBM
# run in terminal with comamnd sitting on YOUR-PATH-TO-REPO:
# ~/Documents/spark-1.5.1/bin/spark-submit ml-scripts/w2vAndKmeans.py
# Replace this line with:
# /YOUR-SPARK-HOME/bin/spark-submit ml-scripts/w2vAndKmeans.py
import numpy as np
import pandas as pd
import time
import math
from nltk.corpus import stopwords
from pyspark import SparkContext
from pyspark import Row
from pyspark.sql import SQLContext
from pyspark.ml.feature import Word2Vec
from pyspark.ml.clustering import KMeans
## Spark and sql contexts
sc = SparkContext('local', 'train-w2v') #change to cluster mode when needed
sqlContext = SQLContext(sc)
datapath = '/Users/jorgecastanon/Documents/github/w2v/data/tweets.gz'
# Replace this line with:
# datapath = '/YOUR-PATH-TO-REPO/w2v/data/tweets.gz'
## Read Tweets
t0 = time.time()
tweets = sqlContext.read.json(datapath)
tweets.registerTempTable("tweets")
timeReadTweets = time.time() - t0
## Read Keywords from w2v/data/filter.txt
filterPath = '/Users/jorgecastanon/Documents/github/w2v/data/filter.txt'
filter = pd.read_csv(filterPath,header=None)
## Filter Tweets
# construct SQL Command
t0 = time.time()
sqlString = "("
for substr in filter[0]: #iteration on the list of words to filter (at most 50-100 words)
sqlString = sqlString+"text LIKE '%"+substr+"%' OR "
sqlString = sqlString+"text LIKE '%"+substr.upper()+"%' OR "
sqlString=sqlString[:-4]+")"
sqlFilterCommand = "SELECT lang, text FROM tweets WHERE (lang = 'en') AND "+sqlString
tweetsDF = sqlContext.sql(sqlFilterCommand)
timeFilterTweets = time.time() - t0
## Parse and Remove Stop Words
tweetsRDD = tweetsDF.select('text').rdd
def parseAndRemoveStopWords(text):
t = text[0].replace(";"," ").replace(":"," ").replace('"',' ')
t = t.replace(',',' ').replace('.',' ').replace('-',' ')
t = t.lower().split(" ")
stop = stopwords.words('english')
return [i for i in t if i not in stop]
tw = tweetsRDD.map(parseAndRemoveStopWords)
## Train Word2Vec Model with Spark ML
try:
twDF = tw.map(lambda p: Row(text=p)).toDF()
except:
print "For some reason, the first time to run the last command trows an error. The Error dissapears the second time that the command is run"
twDF = tw.map(lambda p: Row(text=p)).toDF()
t0 = time.time()
word2Vec = Word2Vec(vectorSize=100, minCount=5, stepSize=0.025, inputCol="text", outputCol="result")
modelW2V = word2Vec.fit(twDF)
wordVectorsDF = modelW2V.getVectors()
timeW2V = time.time() - t0
## Train K-means on top of the Word2Vec matrix:
t0 = time.time()
vocabSize = wordVectorsDF.count()
K = int(math.floor(math.sqrt(float(vocabSize)/2)))
# K ~ sqrt(n/2) this is a rule of thumb for choosing K,
# where n is the number of words in the model
# feel free to choose K with a fancier algorithm
dfW2V = wordVectorsDF.select('vector').withColumnRenamed('vector','features')
kmeans = KMeans(k=K, seed=1)
modelK = kmeans.fit(dfW2V)
labelsDF = modelK.transform(dfW2V).select('prediction').withColumnRenamed('prediction','labels')
vocabSize = wordVectorsDF.count()
timeKmeans = time.time() - t0
sc.stop()
## Print Some Results
printResults = 1 # set t
if (printResults):
## Read Tweets
print "="*80
print "Read Tweets..."
print "Elapsed time (seconds) to read tweets as a data frame: ", timeReadTweets
print "="*80
## Filter Tweets
print "Filter Tweets..."
print "Elapsed time (seconds) to filter tweets of interest: ", timeFilterTweets
print "="*80
## Word2Vec
print "Build Word2Vec Matrix..."
print "Elapsed time (seconds) to build Word2Vec: ", timeW2V
print "Vocabulary Size: ", vocabSize
print "="*80
## Kmeans
print "Train K-means clustering..."
print "Elapsed time (seconds) training K-means: ", timeKmeans
print "Number of Clusters: ", K
print "="*80
#save models:
saveModels = 0
if(saveModels):
def toList(df,colName):
dfCol = df.select(colName)
return dfCol.map(lambda e: e[0]).collect()
w2vMatrix = toList(wordVectorsDF,'vector')
np.save('w2vMatrix.npy',w2vMatrix)
words = toList(wordVectorsDF,'word')
np.save('words.npy',words)
lables = toList(labelsDF,'labels')
np.save('labels.npy',words)
|
castanan/w2v
|
ml-scripts/w2vAndKmeans.py
|
Python
|
mit
| 4,285
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_export_tessellated_edges_edge1364
except ImportError:
bt_export_tessellated_edges_edge1364 = sys.modules[
"onshape_client.oas.models.bt_export_tessellated_edges_edge1364"
]
class BTExportTessellatedEdgesBody890AllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"edges": (
[bt_export_tessellated_edges_edge1364.BTExportTessellatedEdgesEdge1364],
), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"edges": "edges", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_export_tessellated_edges_body890_all_of.BTExportTessellatedEdgesBody890AllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
edges ([bt_export_tessellated_edges_edge1364.BTExportTessellatedEdgesEdge1364]): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
onshape-public/onshape-clients
|
python/onshape_client/oas/models/bt_export_tessellated_edges_body890_all_of.py
|
Python
|
mit
| 5,176
|
import base64
def HexToBase64(hex_input):
hex_decoded = hex_input.decode("hex")
base64_encoded = base64.b64encode(hex_decoded)
return base64_encoded
def main():
assert "3q2+7w==" == HexToBase64("deadbeef"), HexToBase64("deadbeef")
assert "SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t" == (
HexToBase64(
"49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f697"
"36f6e6f7573206d757368726f6f6d"))
if __name__ == "__main__":
main()
|
aawc/cryptopals
|
sets/1/challenges/common/hex_to_base64.py
|
Python
|
mit
| 493
|
# Test various argument semantics for sendmess
#a dummy method so there is something to connect to
def nothing(rip,rport,msg,handle):
pass
# test that the locaip used by openconn is getmyip
def test_remoteip(rip,rport,msg,handle):
if rip != getmyip():
print 'remoteip did not equal getmyip: '+rip+' vs '+getmyip()
if callfunc == 'initialize':
#start a listener
handle = recvmess('127.0.0.1',12345,nothing)
#ip given is not a string
try:
sendmess(127001,12345,'hi')
except:
pass
else:
print 'int used for ip did not cause exception'
#localip given is not a string
try:
sock = openconn('127.0.0.1',12345,'hello',127002,23456)
except:
pass
else:
print 'int used for local ip did not cause exception'
# test port ranges
for port in [-5000,65536]:
try:
sendmess('127.0.0.1',port,"hello")
except:
pass
else:
print 'invalid port did not cause exception: '+str(port)
try:
sendmess('127.0.0.1',12345,'hello','127.0.0.2',port)
except:
pass
else:
print 'invalid localport did not cause an exception: '+str(port)
#test msg is a string
try:
sendmess('127.0.0.1',12345,42)
except:
pass
else:
print 'invalid msg (int) did not cause exception'
#test that if local ip / port is specified both are
try:
sendmess('127.0.0.1',12345,"hello",localip='127.0.0.1')
except:
pass
else:
print 'specifing localip and not locaPort did not cause exception'
try:
sendmess('127.0.0.1',12345,"hello",localport=12345)
except:
pass
else:
print 'specifing localport and not localip did not cause exception'
# test that an unspecified localip will resolve to getmyip()
h2 = recvmess(getmyip(),12345,test_remoteip)
sendmess(getmyip(),12345,"hi")
stopcomm(handle)
stopcomm(h2)
|
sburnett/seattle
|
network_semantics_tests/tests/sendmess/sendmess_arg.py
|
Python
|
mit
| 1,901
|
#-*- coding: utf-8 -*-
# Todo : keras model 에서 predict_probs() 할 때 message off 하는 방법
# evaluator.run(img_files, do_nms=False) 에서 do_nms option 을 사용하지 않도록 detector 자체의 class 에서 nms 객체를 갖고 있도록 하자.
import cv2
import numpy as np
import digit_detector.region_proposal as rp
import digit_detector.detect as detect
import digit_detector.file_io as file_io
import digit_detector.preprocess as preproc
import digit_detector.annotation as ann
import digit_detector.evaluate as eval
import digit_detector.classify as cls
model_filename = "detector_model.hdf5"
model_input_shape = (32,32,1)
DIR = '../datasets/svhn/train'
ANNOTATION_FILE = "../datasets/svhn/train/digitStruct.json"
detect_model = "detector_model.hdf5"
recognize_model = "recognize_model.hdf5"
mean_value_for_detector = 107.524
mean_value_for_recognizer = 112.833
if __name__ == "__main__":
# 1. load test image files, annotation file
img_files = file_io.list_files(directory=DIR, pattern="*.png", recursive_option=False, n_files_to_sample=1000, random_order=False)
annotator = ann.SvhnAnnotation(ANNOTATION_FILE)
preprocessor_for_detector = preproc.GrayImgPreprocessor(mean_value_for_detector)
preprocessor_for_recognizer = preproc.GrayImgPreprocessor(mean_value_for_recognizer)
detector = cls.CnnClassifier(detect_model, preprocessor_for_detector, model_input_shape)
recognizer = cls.CnnClassifier(recognize_model, preprocessor_for_recognizer, model_input_shape)
proposer = rp.MserRegionProposer()
# 2. create detector
det = detect.DigitSpotter(detector, recognizer, proposer)
# 3. Evaluate average precision
evaluator = eval.Evaluator(det, annotator, rp.OverlapCalculator())
recall, precision, f1_score = evaluator.run(img_files)
# recall value : 0.513115508514, precision value : 0.714285714286, f1_score : 0.597214783074
# 4. Evaluate MSER
detector = cls.TrueBinaryClassifier(input_shape=model_input_shape)
preprocessor = preproc.NonePreprocessor()
# Todo : detector, recognizer 를 none type 으로
det = detect.DigitSpotter(detector, recognizer, proposer)
evaluator = eval.Evaluator(det, annotator, rp.OverlapCalculator())
recall, precision, f1_score = evaluator.run(img_files, do_nms=False)
#recall value : 0.630004601933, precision value : 0.0452547023239, f1_score : 0.0844436220084
|
penny4860/SVHN-deep-digit-detector
|
4_evaluate.py
|
Python
|
mit
| 2,449
|
import sys
from setuptools import setup
assert sys.version_info[0] > 2
if sys.version_info[0] == 3 and sys.version_info[1] < 5:
raise RuntimeError("Python >= 3.5 is required to install Streamis.")
def read(fname):
with open(fname, 'r') as f:
text = f.read()
return text
setup(
name="streamis",
version="0.1.0.dev",
description="Subscribe to Redis pubsub channels via HTTP and EventSource.",
long_description=read('README.md'),
author="Michael V. DePalatis",
author_email="mike@depalatis.net",
license="MIT",
url="https://github.com/mivade/streamis",
py_modules=["streamis"],
install_requires=[
'tornado>=4.3',
'aioredis>=0.2'
]
)
|
mivade/streamis
|
setup.py
|
Python
|
mit
| 713
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: oesteban
# @Date: 2015-06-15 14:50:01
# @Last Modified by: Oscar Esteban
# @Last Modified time: 2015-06-23 13:05:13
|
oesteban/diffantom
|
diffantom/scripts/__init__.py
|
Python
|
mit
| 177
|
# -*- coding: utf-8 -*-
"""
Utility for generating placeholder images from http://placehold.it/
"""
import random
URL = 'http://placehold.it/%(width)sx%(height)s/%(bcolor)s/%(tcolor)s/'
def _get_random_color():
"""
Returns a random color hex value.
"""
return '%02X%02X%02X' % (
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255)
)
def get_url(
width, height=None, background_color="cccccc",
text_color="969696", text=None, random_background_color=False
):
"""
Craft the URL for a placeholder image.
You can customize the background color, text color and text using
the optional keyword arguments
If you want to use a random color pass in random_background_color as True.
"""
if random_background_color:
background_color = _get_random_color()
# If height is not provided, presume it is will be a square
if not height:
height = width
d = dict(
width=width,
height=height,
bcolor=background_color,
tcolor=text_color
)
url = URL % d
if text:
text = text.replace(" ", "+")
url = url + "?text=" + text
return url
|
datadesk/django-greeking
|
greeking/placeholdit.py
|
Python
|
mit
| 1,207
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""http://www.pythonchallenge.com/pc/ring/bell.html:repeat:switch"""
__author__ = "子風"
__copyright__ = "Copyright 2015, Sun All rights reserved"
__version__ = "1.0.0"
import get_challenge
from PIL import Image
img = Image.open(get_challenge.download("http://www.pythonchallenge.com/pc/ring/bell.png", "repeat", "switch"))
r,g,b = img.split()
r.show()
g.show()
b.show()
gdata = list(g.getdata())
paris=[(gdata[i],gdata[i+1]) for i in range(0,len(gdata),2)] # 根據"my paris" 將像素兩兩分為一組
# 可以看出基本上每個paris內兩像素之差都為42
print(paris[:10])
diffs=[abs(i[0]-i[1]) for i in paris] # 計算兩兩像素之差的絕對值
print(diffs[:10])
s = ''
for i in diffs:
if i != -42 and i != 42:
s += chr(abs(i))
print(s)
# 難道是發音像 who done it 誰做了這些
print('Guido Van Rossum'.split()[0])
|
z-Wind/Python_Challenge
|
Level28_Image_ch.py
|
Python
|
mit
| 951
|
# coding=utf-8
from __future__ import absolute_import, division
import os
from typing import Union
from keras.preprocessing.image import img_to_array, array_to_img
import numpy as np
from pathlib import Path
from PIL import Image as PILImage, ImageOps
# I/O
def files_under(path: Path):
for f in path.glob("*"):
if f.is_file():
yield f
def basename_without_ext(path_to_file: Path):
return path_to_file.stem
def ensure_dir(dir_path: Union[Path, str]):
"""
Creates folder f if it doesn't exist
:param dir_path: directory path
:return:
"""
path = dir_path if isinstance(dir_path, Path) else Path(dir_path)
path.mkdir(parents=True, exist_ok=True)
def normalize(img):
if isinstance(img, np.ndarray):
processed_img = ImageOps.equalize(PILImage.fromarray(img, mode='RGB'))
else:
processed_img = ImageOps.equalize(img)
return processed_img
# masks
def mask_rgb_to_gray(rgb, palette):
rows = rgb.shape[0]
cols = rgb.shape[1]
gray = np.zeros((rows, cols))
for r in range(rows):
for c in range(cols):
gray[r, c] = palette[rgb[r, c, 0], rgb[r, c, 1], rgb[r, c, 2]]
return gray
def one_hot_to_rgb(onehot, id_to_palette):
"""
Converts a one hot label to an rgb image. Pixels that belong to more than one categories
are assigned the maximum class id
:param onehot: label in onehot representation
:param id_to_palette: dictionary that maps a class id to rgb
:return:
"""
label = np.argmax(onehot, axis=2)
rgb_label = np.repeat(np.expand_dims(label, axis=2), 3, axis=2)
return rgb_label
def soften_targets(array, low=0.1, high=0.9):
assert list(set(np.unique(array)) ^ {0, 1}) == [], 'Targets must be binary'
array_new = np.empty_like(array)
array_new = np.copyto(array_new, array)
array_new[array == 0] = low
array_new[array == 1] = high
return array_new
# misc
def load_image(img_path):
"""
Loads an image from a file
:param img_path: path to image on disk
:return: An instance of PIL.Image target is 'pillow', numpy.ndarray otherwise.
"""
img = PILImage.open(img_path).convert('RGB')
converted_img = img_to_array(img)
return converted_img
def resize(item, target_h, target_w, keep_aspect_ratio=False):
"""
Resizes an image to match target dimensions
:type item: np.ndarray
:type target_h: int
:type target_w: int
:param item: 3d numpy array or PIL.Image
:param target_h: height in pixels
:param target_w: width in pixels
:param keep_aspect_ratio: If False then image is rescaled to smallest dimension and then cropped
:return: 3d numpy array
"""
img = array_to_img(item, scale=False)
if keep_aspect_ratio:
img.thumbnail((target_w, target_w), PILImage.ANTIALIAS)
img_resized = img
else:
img_resized = img.resize((target_w, target_h), resample=PILImage.NEAREST)
# convert output
img_resized = img_to_array(img_resized)
img_resized = img_resized.astype(dtype=np.uint8)
return img_resized
def center_crop(x, y=None, crop_size=None, data_format='channels_last'):
"""
Takes a pair of numpy arrays (image and label) and returns a pair of matching center crops
:param x: image in numpy array format
:param y: label in numpy array format
:param crop_size: (height, width) tuple
:param data_format: 'channels_first' or 'channels_last'
:return: (cropped image, cropped label) tuple
"""
if crop_size is None:
return x if y is None else x, y
if data_format == 'channels_first':
centerh, centerw = x.shape[1] // 2, x.shape[2] // 2
elif data_format == 'channels_last':
centerh, centerw = x.shape[0] // 2, x.shape[1] // 2
else:
raise NotImplementedError()
crop_size = (2 * centerh, 2 * centerw) if crop_size is None else crop_size
lh, lw = crop_size[0] // 2, crop_size[1] // 2
rh, rw = crop_size[0] - lh, crop_size[1] - lw
start_h, end_h = centerh - lh, centerh + rh
start_w, end_w = centerw - lw, centerw + rw
if data_format == 'channels_first':
cropped_x = x[:, start_h:end_h, start_w:end_w]
if y is None:
return cropped_x
else:
cropped_y = y[:, start_h:end_h, start_w:end_w]
return cropped_x, cropped_y
elif data_format == 'channels_last':
cropped_x = x[start_h:end_h, start_w:end_w, :]
if y is None:
return cropped_x
else:
cropped_y = y[start_h:end_h, start_w:end_w, :]
return cropped_x, cropped_y
def random_crop(x, y=None, crop_size=None, data_format='channels_last', sync_seed=None):
"""
Takes a pair of numpy arrays (image and label) and returns a pair of matching random crops
:param x: image in numpy array format. Shape is (h, w, c) or (c, h, w), depending on data_format.
:param y: label in numpy array format. Shape is (h, w, c) or (c, h, w), depending on data_format.
:param crop_size: (height, width) tuple
:param data_format: 'channels_first' or 'channels_last'
:param sync_seed: random seed (for easier reproduction)
:return: (cropped image, cropped label) tuple
"""
if crop_size is None:
return x if y is None else x, y
np.random.seed(sync_seed)
if data_format == 'channels_first':
h, w = x.shape[1], x.shape[2]
elif data_format == 'channels_last':
h, w = x.shape[0], x.shape[1]
else:
raise NotImplementedError()
rangeh = (h - crop_size[0]) // 2
rangew = (w - crop_size[1]) // 2
offseth = 0 if rangeh == 0 else np.random.randint(rangeh)
offsetw = 0 if rangew == 0 else np.random.randint(rangew)
start_h, end_h = offseth, offseth + crop_size[0]
start_w, end_w = offsetw, offsetw + crop_size[1]
if data_format == 'channels_first':
cropped_x = x[:, start_h:end_h, start_w:end_w]
if y is None:
return cropped_x
else:
cropped_y = y[:, start_h:end_h, start_w:end_w]
return cropped_x, cropped_y
elif data_format == 'channels_last':
cropped_x = x[start_h:end_h, start_w:end_w, :]
if y is None:
return cropped_x
else:
cropped_y = y[start_h:end_h, start_w:end_w, :]
return cropped_x, cropped_y
def pillow_invert_channels(img):
r, g, b = img.split()
img = PILImage.merge("RGB", (b, g, r))
return img
def identity(*args):
if len(args) == 1:
return args[0]
return args
def unzip_and_remove(zipped_file):
import zipfile
outpath = os.path.dirname(os.path.realpath(zipped_file))
with open(zipped_file, 'rb') as fin:
z = zipfile.ZipFile(file=fin)
z.extractall(outpath)
z.close()
# os.remove(zipped_file)
# Temporary place for data preprocessing pipeline
def preprocess_image(img):
# TODO: Populate with actual logic
# TODO: move away from here into a dedicated class (like ImageDataGenerator)
# img = normalize(img, mode, target_type='numpy')
# return img
def standardize(img, minval=0, maxval=1):
# normalize to [minval, maxval]
standardized = img - np.min(img)
standardized = (maxval - minval) * standardized / np.max(standardized)
standardized += minval
return standardized
# img = standardize(img, minval=-1, maxval=1)
return img
# def preprocess_label(lbl, mapper, nc, mode, keep_aspect_ratio=False):
def preprocess_label(label):
"""
load label image, keep a single channel (all three should be the same)
:param label:
:return:
"""
# TODO: Populate with actual logic and move away from here into a dedicated class (like ImageDataGenerator)
# target = 'pillow' if mode == 'pillow' else 'numpy'
# # lbl = resize(lbl, target_h, target_w, mode=mode, target_type='numpy', keep_aspect_ratio=keep_aspect_ratio)
# if mode == 'pillow':
# # lbl = np.expand_dims(lbl[:, :, 0], axis=2)
# assert np.all(lbl[:, :, 0] == lbl[:, :, 1]) and np.all(lbl[:, :, 0] == lbl[:, :, 2])
# lbl = lbl[:, :, 0].astype(np.uint8)
# array2d = mapper[lbl]
# onehot_lbl = to_categorical(array2d, num_classes=nc)
# return onehot_lbl
return label
|
PavlosMelissinos/enet-keras
|
src/data/utils.py
|
Python
|
mit
| 8,335
|
from __future__ import print_function
import os
import sys
import shutil
import pickle
from tinytag import TinyTagException
from tinytag import TinyTag
def removeNonAscii(s): return "".join(i for i in s if ord(i)<128 and ord(i)>0)
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
class MusicOrganizer:
def __init__(self, db={}, show_year=False, show_type=True):
self.db = db
self.show_year = show_year
self.show_type = show_type
def saveDB(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self.db, f, pickle.HIGHEST_PROTOCOL)
def loadDB(self, filename):
with open(filename, 'rb') as f:
self.db = pickle.load(f)
def setDirNameOptions(self, show_year=False, show_type=True):
self.show_year = show_year
self.show_type = show_type
def printDB(self,filename=None):
if filename == None:
f = sys.stdout
else:
f = open(filename, 'w')
for key in self.db.keys():
print(key, end='\n', file=f)
for elem in sorted(self.db[key]):
if isinstance(elem, basestring):
print('\t', elem, end='\n', file=f)
else:
raise Exception('Wrong DB format: non-string elements are not allowed.')
return
if filename != None:
f.close()
def constructDirName(self,filename):
tag = TinyTag.get(filename)
dirname = []
artist_tag = removeNonAscii(tag.artist).strip(' ,')
album_tag = removeNonAscii(tag.album).strip(' ,')
dirname.append(''.join([artist_tag,' - ',album_tag]))
if self.show_year and tag.year:
year_tag = removeNonAscii(tag.year).strip(' ,')
dirname.append(''.join([' (',year_tag,')']))
if self.show_type:
ext = os.path.splitext(filename)[1][1:].upper()
if ext != 'MP3':
dirname.append(''.join([' [',ext,']']))
return ''.join(dirname).strip()
def scanFile(self, filename):
if filename.endswith('.m4a') or filename.endswith('.mp3') or filename.endswith('.flac'):
print('Reading file:', ''.join(['\'',filename,'\'']))
ret = -1
try:
dirname = self.constructDirName(filename)
if self.db.has_key(dirname):
seen = set(self.db[dirname])
if filename not in seen:
self.db[dirname].append(filename)
ret = 0
else:
ret = 1
else:
self.db[dirname] = [filename]
ret = 0
except TinyTagException as err:
eprint('Tag Error:', err)
#except Exception as e:
# eprint('Error:', str(e), '>file:', filename)
return ret
else:
return -1
def scanDirTree(self, path):
for root, dirs, files in os.walk(path):
for elem in files:
self.scanFile(os.path.join(root,elem))
def organizeFiles(self, outpath, force_dir_merge=False, move=False):
for key in self.db.keys():
dirpath = os.path.join(outpath,key)
if not os.path.exists(dirpath):
print('Creating directory', ''.join(['\'',dirpath,'\'.']))
os.makedirs(dirpath)
elif force_dir_merge:
print('Entering directory', ''.join(['\'',dirpath,'\'.']))
else:
eprint('Warning: directory ', ''.join(['\'',dirpath,'\'']),
('already exists. To ensure that no data is being overwritten, '
'this directory is skipped.') )
continue
for elem in self.db[key]:
# prepare filename
filename = removeNonAscii(os.path.split(elem)[1])
fullpath = os.path.join(dirpath,filename)
if move:
print('Moving', ''.join(['\'',elem,'\'']), 'to',
''.join(['\'',dirpath,'\'.']))
shutil.move(elem,fullpath)
else:
print('Copying', ''.join(['\'',elem,'\'']), 'to',
''.join(['\'',fullpath,'\'.']))
shutil.copy2(elem,fullpath)
|
kpatsis/music_organizer
|
MusicOrganizer.py
|
Python
|
mit
| 3,622
|
import urllib2
from django.db import models
from django.utils import timezone
from django.core.urlresolvers import reverse
class ServiceGroupModel(models.Model):
"""
ServiceGroupModel holds services, so that you can have more than
one physical service per group, e.g. two databases
"""
name = models.CharField(max_length=250)
def get_absolute_url(self):
return reverse('services:index')
def __unicode__(self):
return self.name
def get_fields(self):
""" Fields which are allowed to be updated """
return ['name']
class ServiceModel(models.Model):
"""ServiceModel is the base model for each service"""
#attributes
name = models.CharField(max_length=250)
is_up = models.BooleanField(default=False)
url = models.CharField(max_length=250, null=True, blank=True)
health_url = models.CharField(max_length=250, null=True, blank=True)
is_refresh_on = models.BooleanField(default=False)
created_on = models.DateTimeField(default=timezone.now())
service_group = models.ForeignKey(ServiceGroupModel, related_name='services')
def get_fields(self):
""" Fields which are allowed to be updated """
return ['name', 'is_up', 'url', 'health_url', 'is_refresh_on', 'service_group']
def ping(self, force_ping=False):
""" Checks the health of a given service, and creates a history to log the response """
if force_ping or self.is_refresh_on:
response_to_log = None
self.is_up = False
try:
contents = urllib2.urlopen(self.health_url)
if self._is_service_ok(contents):
self.is_up = True
response_to_log = self._get_response_to_log(contents)
except Exception, e:
response_to_log = 'Unable to ping'
self.save()
self._create_history(response_to_log, self.is_up)
def _is_service_ok(self, contents):
""" Determines if a service is ok or not, returning a boolean """
return (contents.getcode() == 200)
def _get_response_to_log(self, contents):
""" Extracts the response that will be logged in the service's history """
return contents.getcode()
def _create_history(self, response, is_up):
""" Creates a history of a given response to the ping """
s = ServiceHistoryModel(response=response, is_up=is_up, service=self, created_on=timezone.now())
s.save()
def get_absolute_url(self):
return reverse('services:detail', args=(self.pk,))
class ServiceHistoryModel(models.Model):
"""ServiceHistoryModel is the history tracker for a ServiceModel"""
created_on = models.DateTimeField(default=timezone.now())
response = models.CharField(max_length=250, null=True, blank=True)
is_up = models.BooleanField(default=False)
service = models.ForeignKey(ServiceModel, related_name='history')
class Meta:
ordering = ['-created_on']
get_latest_by = 'created_on'
|
ograycode/ServiceZen
|
services/models.py
|
Python
|
mit
| 3,131
|
# -*- coding: utf-8 -*-
from PyQt4 import QtCore
from serieswatcher.config import Config
from serieswatcher.thetvdb import TheTVDB
class MakeSearch(QtCore.QObject):
searchFinished = QtCore.pyqtSignal(list)
def __init__(self, userInput):
super(MakeSearch, self).__init__()
self._userInput = userInput
def run(self):
bdd = TheTVDB()
languages = tuple(Config.config['languages'].split(','))
seriesFound = []
for lang in languages:
seriesFound.extend(bdd.search_serie(self._userInput, lang))
self.searchFinished.emit(seriesFound)
|
lightcode/SeriesWatcher
|
serieswatcher/serieswatcher/tasks/makesearch.py
|
Python
|
mit
| 611
|
import logging
import json
from googleapiclient.discovery import build
logger = logging.getLogger(__name__)
class GoogleImageSearchService:
DEFAULT_SEARCH_FIELDS = 'items(fileFormat,image(byteSize,height,width),labels,link,mime,snippet,title),queries,searchInformation(searchTime,totalResults)'
DEFAULT_PAGE_SIZE = 10
def __init__(self, api_key, engine_id):
"""
Google Image search service wrapper.
Parameters
----------
api_key : string
Google API Key that you obtain from https://console.developers.google.com/apis/api/customsearch.googleapis.com/overview
engine_id : string
Custom Search Engine Id that you obtain from https://cse.google.com/cse/all
Returns
-------
Array
Returns search results as an Array
"""
self.api_key = api_key
self.engine_id = engine_id
def _calculate_page_count(self, count):
page_count = count / self.DEFAULT_PAGE_SIZE
if count % self.DEFAULT_PAGE_SIZE:
page_count += 1
return page_count
def call(self, term, count=10, search_fields=DEFAULT_SEARCH_FIELDS):
"""
Image search by give term.
Parameters
----------
term : string
Keyword to search
fields : string
Field descriptions to include in search results
Returns
-------
Dictionary
Returns search results as a Dictionary
"""
service = build('customsearch', 'v1', developerKey=self.api_key)
items = []
start_index = 1
for page in range(0, self._calculate_page_count(count)):
logger.info('Downloading search terms, page %d', page)
response = service.cse().list(
q=term,
cx=self.engine_id,
searchType='image',
fields=search_fields,
start=start_index
).execute()
logging.debug(json.dumps(response, indent=4, sort_keys=True))
items += response['items']
start_index = response['queries']['nextPage'][0]['startIndex']
return items[:count]
|
kakkoyun/image_searcher
|
google_image_search_service.py
|
Python
|
mit
| 2,223
|
#! /usr/bin/env python
"""
Create symbolic links of yersinia and anthracis in a new folder.
sys.argv[1]: data/pathogens-to-samples.txt
sys.argv[2]: data/runs-to-samples.txt
mkdir sra-pathogens
mkdir sra-pathogens/anthracis
mkdir sra-pathogens/yersinia
"""
import os
import errno
import glob
import sys
def mkdir(directory):
""" Make a directory using sample id and pthogen. """
if not os.path.exists(directory):
try:
print 'Making {0}'.format(directory)
os.makedirs(directory)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def symbolic_link(indir, outdir, run_acc, sample_id):
""" Create symlink of the SRA fastq, and rename it with sample ID. """
for run_file in glob.glob(indir):
run_file = os.path.abspath(run_file)
sample_file = outdir + os.path.basename(run_file)
print 'Creating symlink: {0} -> {1}'.format(run_file, sample_file)
os.symlink(run_file, sample_file)
# Make some directories
mkdir('sra-pathogens')
mkdir('sra-pathogens/anthracis')
mkdir('sra-pathogens/yersinia')
# Read sample ids and the pathogen
sample_ids = {}
fh = open(sys.argv[1], 'r')
for line in fh:
line = line.rstrip()
cols = line.split('\t')
# cols[0]: Sample ID, cols[1]: Pathogen (anthracis|yersinia)
sample_ids[cols[0]] = cols[1]
fh.close()
# Read sample ids and their run accessions
runs_to_samples = {}
fh = open(sys.argv[2], 'r')
for line in fh:
line = line.rstrip()
cols = line.split('\t')
# cols[0]: Run accession, cols[1]: Sample ID
if cols[1] in sample_ids:
indir = 'sra-fastq/{0}/*'.format(cols[0])
outdir = 'sra-pathogens/{0}/{1}/'.format(sample_ids[cols[1]], cols[1])
mkdir(outdir)
symbolic_link(indir, outdir, cols[0], cols[1])
fh.close()
|
Read-Lab-Confederation/nyc-subway-anthrax-study
|
data/01-accessing-data-and-controls/map-pathogens.py
|
Python
|
mit
| 1,850
|
import torch.nn as nn
import torch.optim as optim
from ptutils.model import Model
from ptutils.model.net import MNIST
from ptutils.contrib.datasource import mnist
from ptutils.datastore import MongoDatastore
from ptutils.coordinator.trainer import Trainer
class MNISTModel(Model):
def __init__(self, *args, **kwargs):
super(MNISTModel, self).__init__(*args, **kwargs)
self.net = MNIST()
self.learning_rate = 1e-3
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.net.parameters(), self.learning_rate)
class MNISTTrainer(Trainer):
def __init__(self):
super(MNISTTrainer, self).__init__()
self.exp_id = 'my_experiment'
self.model = MNISTModel()
self.datastore = MongoDatastore('test_mnist', 'testcol')
self.datasource = mnist.MNISTSource()
def step(self, input, target):
super(MNISTTrainer, self).step(input, target)
# Save anything you would like
self.datastore.save({'step': self.global_step,
'loss': self.model._loss.data[0]})
def run(self):
print(trainer.to_params())
super(MNISTTrainer, self).run()
params = {
'name': 'mnist_trainer',
'exp_id': 'my_experiment',
'model': {MNISTModel: {}},
'my_datastore': {MongoDatastore: {'database_name': 'test_mnist',
'collection_name': 'testcol'}},
'my_datasource': {mnist.MNISTSource: {}}}
mnist_trainer = Trainer.from_params(params)
# OR
trainer = MNISTTrainer()
# trainer.run()
|
alexandonian/ptutils
|
examples/mnist.py
|
Python
|
mit
| 1,580
|
from collections import Counter
import numpy as np
def calculate_pmf(y):
"""
calculate the probability mass function given sample data.
"""
assert(len(y.shape) == 1)
# values = np.unique(y)
counter = Counter(y)
for k, v in counter.items():
counter[k] = v / len(y)
return counter
def calculate_entropy(pmf):
"""
calculate entropy of a probability mass function
Inputs
------
- pmf: list or numpy array, dimension (N, ), probability mass function, or dict
"""
if isinstance(pmf, dict):
pmf = list(pmf.values())
entropy = sum([-p * np.log2(p) for p in pmf])
print("entropy: ", entropy, pmf)
return entropy
def calculate_gini_index(pmf: list):
if isinstance(pmf, dict):
pmf = list(pmf.values())
return 1 - sum([p**2 for p in pmf])
def calculate_variance(X):
mu = np.mean(X)
return np.sum([(x - mu)**2 for x in X])
def calculate_cross_entropy(Y, T):
return sum([-T[i] * np.log2(y) for i, y in enumerate(Y)])
def precision_score(Y, T):
"""
calculate true positive ratio for binary classification: 0 and 1 labeled
"""
if not isinstance(Y, np.ndarray):
Y = np.array(Y)
if not isinstance(T, np.ndarray):
T = np.array(T)
tp = (Y[T == 1] == 1) # true positive
pp = Y == 1 # predicted positive: true positive + false positive
precision = np.sum(tp) / np.sum(pp)
return precision
def recall_score(Y, T):
if not isinstance(Y, np.ndarray):
Y = np.array(Y)
if not isinstance(T, np.ndarray):
T = np.array(T)
tp = (Y[T == 1] == 1) # true positive
sp = T == 1# all sample positives: true positive + false negative
recall = np.sum(tp) / np.sum(sp)
return recall
def f1_score(Y, T):
"""
F1 score is the harmonic mean of precision and recall
"""
precision = precision_score(Y, T)
recall = recall_score(Y, T)
f1 = 2*precision*recall/(precision + recall)
print(precision, recall, f1, format(f1, '.6f'))
return f1
|
Alexoner/skynet
|
skynet/metrics/__init__.py
|
Python
|
mit
| 2,038
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Sync's doctype and docfields from txt files to database
perms will get synced only if none exist
"""
import frappe
import os, sys
from frappe.modules.import_file import import_file_by_path
from frappe.utils import get_path, cstr
def sync_all(force=0, verbose=False):
for app in frappe.get_installed_apps():
sync_for(app, force, verbose=verbose)
frappe.clear_cache()
def sync_for(app_name, force=0, sync_everything = False, verbose=False):
for module_name in frappe.local.app_modules.get(app_name) or []:
folder = os.path.dirname(frappe.get_module(app_name + "." + module_name).__file__)
walk_and_sync(folder, force, sync_everything, verbose=verbose)
def walk_and_sync(start_path, force=0, sync_everything = False, verbose=False):
"""walk and sync all doctypes and pages"""
modules = []
document_type = ['doctype', 'page', 'report', 'print_format']
for path, folders, files in os.walk(start_path):
# sort folders so that doctypes are synced before pages or reports
for dontwalk in (".git", "locale", "public"):
if dontwalk in folders:
folders.remove(dontwalk)
folders.sort()
if sync_everything or (os.path.basename(os.path.dirname(path)) in document_type):
for f in files:
f = cstr(f)
if f.endswith(".json"):
doc_name = f.split(".json")[0]
if doc_name == os.path.basename(path):
module_name = path.split(os.sep)[-3]
doctype = path.split(os.sep)[-2]
name = path.split(os.sep)[-1]
if import_file_by_path(os.path.join(path, f), force=force) and verbose:
print module_name + ' | ' + doctype + ' | ' + name
frappe.db.commit()
return modules
|
cadencewatches/frappe
|
frappe/model/sync.py
|
Python
|
mit
| 1,785
|
import functools
try:
unicode_str = unicode
except NameError:
unicode_str = str
try:
memoryview
except NameError:
memoryview = bytes
def metaclass(mcs):
def _decorator(cls):
attrs = dict(vars(cls))
try:
if isinstance(cls.__slots__, str):
slots = (cls.__slots__, )
else:
slots = cls.__slots__
for slot in slots:
if slot.startswith('__') and not slot.endswith('__'):
slot = '_{cls}{slot}'.format(cls=cls.__name__, slot=slot)
attrs.pop(slot, None)
except AttributeError:
pass
for prop in '__weakref__', '__dict__':
attrs.pop(prop, None)
return mcs(cls.__name__, cls.__bases__, attrs)
return _decorator
class ReferenceType(type):
def __call__(cls, *args, **kwargs):
if len(args) == 1 and len(kwargs) == 0:
if isinstance(args[0], cls):
return args[0]
return super(ReferenceType, cls).__call__(*args, **kwargs)
def to_bytes(obj, encoding='utf-8', error_callback=None):
try:
if isinstance(obj, (bytes, bytearray, memoryview)):
return bytes(obj)
if obj is None:
return b''
try:
return obj.__bytes__()
except AttributeError:
return unicode_str(obj).encode(encoding)
except Exception as error:
if error_callback is not None:
error_callback(error)
raise
def coroutine(fn):
@functools.wraps(fn)
def _fn(*args, **kwargs):
co = fn(*args, **kwargs)
co.send(None)
return co
return _fn
def import_module(path):
module = path.rpartition('.')[2]
return __import__(path, fromlist=(module, ))
from .lazy import Lazy, CachedDescriptor, cached_property
|
renskiy/marnadi
|
marnadi/utils/__init__.py
|
Python
|
mit
| 1,859
|
from ConfigParser import SafeConfigParser
class Config():
def __init__(self, path):
self.config = SafeConfigParser()
self.config.read(path)
def get_aws_keys(self):
key = self.config.get('aws', 'key')
secret_key = self.config.get('aws', 'secret_key')
return {'key': key,
'secret': secret_key}
def get_bucket_name(self):
return self.config.get('aws', 'bucket_name')
def get_encrypt_password(self):
return self.config.get('core', 'encrypt_password')
|
hirokikana/s3-encfs-fuse
|
s3encfs/config.py
|
Python
|
mit
| 548
|
try:
import ujson as json
except ImportError:
import json
import falcon
from falcon_dbapi.exceptions import ParamException
class BaseResource(object):
"""
Base resource class that you would probably want to use to extend all of your other resources
"""
def __init__(self, objects_class):
"""
:param objects_class: class represent single element of object lists that suppose to be returned
"""
self.objects_class = objects_class
@staticmethod
def render_response(result, req, resp, status=falcon.HTTP_OK):
"""
:param result: Data to be returned in the response
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
:param status: HTTP status code
:type status: str
"""
resp.body = result
resp.status = status
def serialize(self, obj):
"""
Converts the object to an external representation.
If the object is serializable, no conversion is necessary.
:param obj: single instance of `objects_class`
:return: python json serializable object like dicts / lists / strings / ints and so on...
Example:
.. code-block:: python
return {'id': obj.id, 'name': obj.name}
"""
return obj
def deserialize(self, data):
"""
Converts an external representation to values that can be assigned to an instance of `objects_class`.
:param data: a dictionary
:type data: dict
:return: a dictionary with converted values
:rtype: dict
"""
if data is None:
return {}
return data
def get_schema(self, objects_class):
"""
Gets a JSON Schema (http://json-schema.org) for current objects class.
:param objects_class: class represent single element of object lists that suppose to be returned
:return: a JSON Schema
:rtype: dict
"""
raise NotImplementedError
def clean(self, data):
"""
Called after :func:`deserialize`, might perform more complex data filtering and validation.
:param data:
:type data: dict
:return: a tuple of data and errors after additional cleanup
"""
errors = {}
result = {}
for key, value in data.items():
valid_func = getattr(self, 'clean_%s' % key, None)
if not valid_func:
result[key] = value
continue
try:
result[key] = valid_func(value)
except ParamException as e:
errors.setdefault(key, []).append(str(e))
return result, errors
def get_param_or_post(self, req, name, default=None, pop_params=True):
"""
Gets specified param from request params or body.
If found in params, it's removed.
:param req: Falcon request
:type req: falcon.request.Request
:param name: param name
:type name: str
:param default: Default value
:param pop_params: if True, will pop from req params
:type pop_params: bool
:return: param extracted from query params or request body
"""
if name in req.params:
return req.params.pop(name) if pop_params else req.params.get(name)
elif 'doc' in req.context:
return req.context['doc'].get(name, default)
return default
def on_options(self, req, resp, **kwargs):
"""
Returns allowed methods in the Allow HTTP header.
Also returns a JSON Schema, if supported by current resource.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
"""
allowed_methods = []
for method in falcon.HTTP_METHODS:
try:
responder = getattr(self, 'on_' + method.lower())
except AttributeError:
# resource does not implement this method
pass
else:
# Usually expect a method, but any callable will do
if callable(responder):
allowed_methods.append(method)
resp.set_header('Allow', ', '.join(sorted(allowed_methods)))
result = {'name': self.objects_class.__name__}
if self.objects_class.__doc__:
result['description'] = self.objects_class.__doc__.strip()
try:
result['schema'] = self.get_schema(self.objects_class)
except NotImplementedError:
pass
self.render_response(result, req, resp)
class BaseCollectionResource(BaseResource):
"""
Base resource class for working with collections of records.
Allows to:
* GET - fetch a list of records, filtered by using query params
* POST - create a new record
"""
PARAM_LIMIT = 'limit'
PARAM_OFFSET = 'offset'
PARAM_ORDER = 'order'
PARAM_TOTAL_COUNT = 'total_count'
PARAM_TOTALS = 'totals'
PARAM_SEARCH = 'search'
PARAM_TEXT_QUERY = 'q'
AGGR_GROUPBY = 'group_by'
AGGR_GROUPLIMIT = 'group_limit'
def __init__(self, objects_class, max_limit=None):
"""
:param objects_class: class represent single element of object lists that suppose to be returned
:param max_limit: max limit of elements that suppose to be returned by default
:type max_limit: int
"""
super().__init__(objects_class)
self.max_limit = max_limit
def get_param_totals(self, req):
"""
Gets the totals and total_count params and normalizes them into a single list.
:param req: Falcon request
:type req: falcon.request.Request
:return: total expressions
:rtype: list
"""
totals = self.get_param_or_post(req, self.PARAM_TOTALS, [])
if totals:
if isinstance(totals, str):
totals = json.loads(totals)
if isinstance(totals, dict):
totals = [totals]
else:
totals = list(map(lambda x: x if isinstance(x, dict) else {x: None}, totals))
total_count = self.get_param_or_post(req, self.PARAM_TOTAL_COUNT)
if total_count and not list(filter(lambda x: 'count' in x, totals)):
totals.append({'count': None})
return totals
def get_queryset(self, req, resp):
"""
Return a query object used to fetch data.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
:return: a query from `object_class`
"""
raise NotImplementedError
def get_total_objects(self, queryset, totals):
"""
Return total number of results in a query.
:param queryset: queryset object from :func:`get_queryset`
:param totals: a list of dicts with aggregate function as key and column as value
:type totals: list
:return: dict with totals calculated in this query, ex. total_count with number of results
:rtype: dict
"""
if not totals:
return {}
for total in totals:
if len(total) > 1 or 'count' not in total or total['count'] is not None:
raise falcon.HTTPBadRequest('Invalid attribute', 'Only _count_ is supported in the _totals_ param')
return {'total_count': queryset.count()}
def get_object_list(self, queryset, limit=None, offset=None):
"""
Return a list of objects returned from a query.
:param queryset: queryset from :func:`get_queryset`
:param limit: number of elements to return, `max_limit` will be used if None
:type limit: int
:param offset: slice list of element at the beginning
:type offset: int
:return: sliced results based on `limit` and `offset`
"""
if limit is None:
limit = self.max_limit
offset = 0 if offset is None else max(offset, 0)
if limit is not None:
if self.max_limit is not None:
limit = min(limit, self.max_limit)
limit = max(limit, 0)
return queryset[offset:limit + offset]
return queryset[offset:]
def get_data(self, req, resp):
"""
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
"""
limit = self.get_param_or_post(req, self.PARAM_LIMIT, self.max_limit)
offset = self.get_param_or_post(req, self.PARAM_OFFSET, 0)
totals = self.get_param_totals(req)
queryset = self.get_queryset(req, resp)
totals = self.get_total_objects(queryset, totals)
object_list = self.get_object_list(queryset, int(limit) if limit is not None else None, int(offset))
return object_list, totals
def on_get(self, req, resp):
"""
Gets a list of records.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
"""
object_list, totals = self.get_data(req, resp)
total_count = totals.pop('total_count', None)
result = {'results': [self.serialize(obj) for obj in object_list],
'total': total_count,
'returned': len(object_list)}
result.update(totals)
headers = {'x-api-total': str(total_count) if isinstance(total_count, int) else '',
'x-api-returned': str(result['returned'])}
resp.set_headers(headers)
self.render_response(result, req, resp)
def on_head(self, req, resp):
"""
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
"""
object_list, totals = self.get_data(req, resp)
total_count = totals.pop('total_count', None)
headers = {'x-api-total': str(total_count) if isinstance(total_count, int) else '',
'x-api-returned': str(len(object_list))}
resp.set_headers(headers)
resp.status = falcon.HTTP_NO_CONTENT
def create(self, req, resp, data):
"""
Create a new or update an existing record using provided data.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
:param data:
:type data: dict
:return: created or updated object
"""
raise NotImplementedError
def on_post(self, req, resp, *args, **kwargs):
"""
Add (create) a new record to the collection.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
"""
data = self.deserialize(req.context['doc'] if 'doc' in req.context else None)
data, errors = self.clean(data)
if errors:
result = {'errors': errors}
status_code = falcon.HTTP_BAD_REQUEST
else:
result = self.create(req, resp, data)
status_code = falcon.HTTP_CREATED
self.render_response(result, req, resp, status_code)
def on_put(self, req, resp, *args, **kwargs):
"""
Add (create) a new record to the collection.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
"""
self.on_post(req, resp, *args, **kwargs)
class BaseSingleResource(BaseResource):
"""
Base resource class for working with a single record.
Allows to:
* GET - fetch a single record, filtered by using query params
* PUT - update a (whole) record
* PATCH - update parts of a single record
"""
def get_object(self, req, resp, path_params, for_update=False):
"""
Return a single object.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
:param path_params: positional params from the api route
:type path_params: dict
:param for_update: if the object is going to be updated or deleted
:type for_update: bool
:return: a query from `object_class`
"""
raise NotImplementedError
def on_get(self, req, resp, *args, **kwargs):
"""
Gets a single record.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
"""
obj = self.get_object(req, resp, kwargs)
self.render_response(self.serialize(obj), req, resp)
def on_head(self, req, resp, *args, **kwargs):
"""
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
"""
# call get_object to check if it exists
self.get_object(req, resp, kwargs)
resp.status = falcon.HTTP_NO_CONTENT
def delete(self, req, resp, obj):
"""
Delete an existing record.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
:param obj: the object to delete
"""
deleted = obj.delete()
if deleted == 0:
raise falcon.HTTPConflict('Conflict', 'Resource found but conditions violated')
def on_delete(self, req, resp, *args, **kwargs):
"""
Deletes a single record.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
"""
obj = self.get_object(req, resp, kwargs, for_update=True)
self.delete(req, resp, obj)
self.render_response({}, req, resp)
def update(self, req, resp, data, obj):
"""
Create a new or update an existing record using provided data.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
:param data:
:type data: dict
:param obj: the object to update
:return: created or updated object
"""
raise NotImplementedError
def on_put(self, req, resp, *args, **kwargs):
"""
Updates a single record.
This should set all missing fields to default values, but we're not going to be so strict.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
"""
obj = self.get_object(req, resp, kwargs, for_update=True)
data = self.deserialize(req.context['doc'] if 'doc' in req.context else None)
data, errors = self.clean(data)
if errors:
result = {'errors': errors}
status_code = falcon.HTTP_BAD_REQUEST
else:
result = self.update(req, resp, data, obj)
status_code = falcon.HTTP_OK
self.render_response(result, req, resp, status_code)
def on_patch(self, req, resp, *args, **kwargs):
"""
Updates a single record. Changes only specified fields.
:param req: Falcon request
:type req: falcon.request.Request
:param resp: Falcon response
:type resp: falcon.response.Response
"""
return self.on_put(req, resp, *args, **kwargs)
|
Opentopic/falcon-api
|
falcon_dbapi/resources/base.py
|
Python
|
mit
| 16,078
|
import os.path
# Don't write any files, just create and discard content?
DRY_RUN = False
# Just get one?
SINGLE_TOPIC = False
# output directory
PATH_PREFIX = "output" + os.path.sep
# MySQL table prefix
TABLE_PREFIX = "forumgr_"
# how to determine internal links
def isInternalLink( url ):
return u"darth-arth.de" in url
|
mrwonko/phpbb2_to_html
|
phpbb2_to_html/config.py
|
Python
|
mit
| 326
|
#!/usr/bin/python
# encoding: utf-8
import sys
import re
from workflow import Workflow, web
import sqlite3
import urllib2
import json
import difflib
__version__ = "0.3"
log = None
def rest(url):
base = "http://api.wormbase.org/rest/field/"
req = urllib2.Request(base + url)
req.add_header("Content-Type","application/json")
resp = urllib2.urlopen(req)
content = json.loads(resp.read())
return content
def main(wf):
args = wf.args[0].strip()
log.debug(args)
conn = sqlite3.connect('wb.db')
conn.row_factory = sqlite3.Row
c = conn.cursor()
q = '''SELECT * FROM idset WHERE idset MATCH "{q}*" ORDER BY sequence ASC LIMIT 50 '''.format(q=args)
c.execute(q)
rows = c.fetchall()
#log.debug(a)
# Exact hit?
row_match = [x for x in rows if x["match"] == args]
if len(rows) >= 1 and len(row_match) != 1:
# Display search results
rows = sorted(rows, key=lambda x: difflib.SequenceMatcher(None, x["match"], args).ratio(), reverse=True)
for row in rows:
wf.add_item(row["match"],row["WBID"], autocomplete=row["match"], valid=False, icon="icon.png")
elif row_match == 1 and len(row_match) != 1:
# Have user input changed to match column
row = rows[0]
wf.add_item(row["match"],row["WBID"], autocomplete=row["match"], valid=False, icon="icon.png")
elif len(row_match) == 1:
row = row_match[0]
if row["live"] == "Dead":
wf.add_item("Dead ID",row["match"], valid=False, icon="death.png")
else:
wormbase_url = "http://www.wormbase.org/species/c_elegans/gene/" + row["WBID"]
wf.add_item(row["sequence"],"Public Name", arg=wormbase_url, copytext=row["sequence"], valid=True, icon="icon.png")
wf.add_item(row["gene"],"Gene Name", arg=wormbase_url, copytext=row["gene"], valid=True, icon="icon.png")
wf.add_item(row["WBID"],"Wormbase ID", arg=wormbase_url, copytext=row["WBID"], valid=True, icon="icon.png")
# Position
pos = rest("gene/{WBID}/location".format(WBID=row[0]))
pos = pos["location"]["genomic_position"]["data"][0]["label"]
wormbrowse = "http://www.wormbase.org/tools/genome/gbrowse/c_elegans_PRJNA13758/?name=" + pos
wf.add_item(pos,"Genomic Position", arg=wormbrowse, valid=True, icon="loc.png")
# Description
desc = rest("gene/{WBID}/concise_description".format(WBID=row[0]))
desc = desc["concise_description"]["data"]["text"]
wf.add_item(desc,"Description", valid=False, icon="icon.png")
# Orthologs
q = '''SELECT * FROM orthodb WHERE WBID == "{WBID}" ORDER BY sequence ASC LIMIT 50 '''.format(WBID=row["WBID"])
c.execute(q)
ortho_set = c.fetchall()
for ortholog in ortho_set:
ortho_title = "{ortho_name} ({species})".format(ortho_name=ortholog["ortholog_name"],
species = ortholog["species"])
ortholog_link = "http://www.wormbase.org/db/get?name={ortholog};class=Gene".format(ortholog=ortholog["ortholog"])
wf.add_item(ortho_title,"Ortholog - " + ortholog["ortholog"], arg=ortholog_link, copytext=ortho_title, valid=True, icon="ortholog.png")
# Publications
pub = rest("gene/{WBID}/references".format(WBID=row[0]))
if pub["references"]["data"] is not None:
for i in pub["references"]["data"]:
first_author = i["author"][0]["label"]
pub_id = i["name"]["id"]
colsep = ""
try:
journal = i["journal"][0]
except:
journal = ""
try:
volume = i["volume"][0]
except:
volume = ""
try:
page = i["page"][0]
colsep = ":"
except:
page = ""
try:
year = i["year"]
except:
year = "-"
try:
title = i["title"][0]
except:
title = ""
URL = "http://www.wormbase.org/resources/paper/" + pub_id
subtitle = "{first_author} et al. {journal} {volume}{colsep} {page} ({year})".format(**locals())
wf.add_item(title, subtitle, arg=URL, valid=True, copytext=title, icon="document.png")
else:
wf.add_item("No Results", valid=False)
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow(update_settings={
'github_slug': 'danielecook/wormbase-alfred',
'version': __version__,
'frequency': 7})
# Assign Workflow logger to a global variable, so all module
# functions can access it without having to pass the Workflow
# instance around
log = wf.logger
sys.exit(wf.run(main))
|
danielecook/wormbase-alfred
|
query_wb.py
|
Python
|
mit
| 5,156
|
#!/usr/bin/env python
import sys
from optparse import OptionParser
from pypipeline import scrape
from pypipeline.scrape import Scraper
if __name__ == "__main__":
usage = "%prog [top_dir...]"
parser = OptionParser(usage=usage)
scrape.add_options(parser)
(options, args) = parser.parse_args(sys.argv)
if len(args) < 2:
parser.print_help()
sys.exit(1)
scraper = Scraper(options)
for top_dir in args[1:]:
scraper.scrape(top_dir)
|
mgormley/pypipeline
|
scripts/scrape_exps.py
|
Python
|
mit
| 486
|
NODE_GROUP_TYPE_CPU = "cpu"
NODE_GROUP_TYPE_GPU = "gpu"
NODE_GROUP_TYPE_SYSTEM = "system"
ALL_NODE_GROUP_TYPES = {
NODE_GROUP_TYPE_CPU,
NODE_GROUP_TYPE_GPU,
NODE_GROUP_TYPE_SYSTEM,
}
|
sigopt/sigopt-python
|
sigopt/orchestrate/node_groups.py
|
Python
|
mit
| 190
|
from __future__ import unicode_literals
__version__ = "5.0.0-beta"
|
indictranstech/internal-frappe
|
frappe/__version__.py
|
Python
|
mit
| 67
|
from django.db import models
from mptt.models import MPTTModel, TreeForeignKey
from django.db.models import ImageField, signals
from django.dispatch import dispatcher
import os
def get_image_path(instance, filename):
return os.path.join(str(instance.name), filename)
class Location(MPTTModel):
name = models.CharField(max_length=50, unique=True)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
description = models.TextField()
image = models.ImageField(upload_to=get_image_path , blank=True)
def __unicode__(self):
return self.name
class Category(models.Model):
category = models.CharField(max_length=50, unique=True)
def __unicode__(self):
return self.category
class PlaceOfInterest(models.Model):
location = TreeForeignKey(Location)
category = models.ForeignKey(Category, blank=True)
name = models.CharField(max_length=50)
description = models.TextField()
def __unicode__(self):
return self.name
|
Reinaldowijaya/explorind
|
explorind_project/locations/models.py
|
Python
|
mit
| 960
|
from django.conf.urls import patterns, url
from premises.views import (ContentionDetailView, HomeView,
ArgumentCreationView, PremiseCreationView,
PremiseDeleteView, ContentionJsonView,
PremiseEditView, ArgumentUpdateView,
ArgumentPublishView, ArgumentUnpublishView,
ArgumentDeleteView, AboutView, NewsView,
UpdatedArgumentsView, ReportView, RemoveReportView,
ControversialArgumentsView, TosView, SearchView,
NotificationsView, PremiseSupportView, PremiseUnsupportView,
StatsView)
urlpatterns = patterns('',
url(r'^$', HomeView.as_view(), name='home'),
url(r'^notifications$', NotificationsView.as_view(), name='notifications'),
url(r'^news$', NewsView.as_view(),
name='contentions_latest'),
url(r'^search', SearchView.as_view(),
name='contentions_search'),
url(r'^updated$', UpdatedArgumentsView.as_view(),
name='contentions_updated'),
url(r'^controversial', ControversialArgumentsView.as_view(),
name='contentions_controversial'),
url(r'^stats$', StatsView.as_view(),
name='contentions_stats'),
url(r'^about$',
AboutView.as_view(),
name='about'),
url(r'^tos$',
TosView.as_view(),
name='tos'),
url(r'^new-argument$',
ArgumentCreationView.as_view(),
name='new_argument'),
url(r'^(?P<slug>[\w-]+)/edit$',
ArgumentUpdateView.as_view(),
name='contention_edit'),
url(r'^(?P<slug>[\w-]+)\.json$',
ContentionJsonView.as_view(),
name='contention_detail_json'),
url(r'^(?P<slug>[\w-]+)$',
ContentionDetailView.as_view(),
name='contention_detail'),
url(r'^(?P<slug>[\w-]+)/(?P<premise_id>[\d+]+)$',
ContentionDetailView.as_view(),
name='premise_detail'),
url(r'^(?P<slug>[\w-]+)/premises/(?P<pk>[0-9]+)/unsupport',
PremiseUnsupportView.as_view(),
name='unsupport_premise'),
url(r'^(?P<slug>[\w-]+)/premises/(?P<pk>[0-9]+)/support',
PremiseSupportView.as_view(),
name='support_premise'),
url(r'^(?P<slug>[\w-]+)/premises/(?P<pk>[0-9]+)/delete',
PremiseDeleteView.as_view(),
name='delete_premise'),
url(r'^(?P<slug>[\w-]+)/premises/(?P<pk>[0-9]+)/report',
ReportView.as_view(),
name='report_premise'),
url(r'^(?P<slug>[\w-]+)/premises/(?P<pk>[0-9]+)/unreport',
RemoveReportView.as_view(),
name='unreport_premise'),
url(r'^(?P<slug>[\w-]+)/premises/(?P<pk>[0-9]+)/new',
PremiseCreationView.as_view(),
name='insert_premise'),
url(r'^(?P<slug>[\w-]+)/premises/(?P<pk>[0-9]+)',
PremiseEditView.as_view(),
name='edit_premise'),
url(r'^(?P<slug>[\w-]+)/premises/new',
PremiseCreationView.as_view(),
name='new_premise'),
url(r'^(?P<slug>[\w-]+)/publish',
ArgumentPublishView.as_view(),
name='contention_publish'),
url(r'^(?P<slug>[\w-]+)/unpublish',
ArgumentUnpublishView.as_view(),
name='contention_unpublish'),
url(r'^(?P<slug>[\w-]+)/delete',
ArgumentDeleteView.as_view(),
name='contention_delete'),
)
|
Arthur2e5/arguman.org
|
web/premises/urls.py
|
Python
|
mit
| 3,336
|
HTTP_CODE_OK = 200
HTTP_FORBIDDEN = 403
HTTP_TOO_MANY_REQUESTS = 429
BIZ_CODE_OK = 0
URL_PREFIX = "https://api.housecanary.com"
DEFAULT_VERSION = "v2"
|
housecanary/hc-api-python
|
housecanary/constants.py
|
Python
|
mit
| 153
|
# -*- coding: utf-8 -*-
"""
Datebook year views
"""
import datetime
import calendar
from django.views import generic
from braces.views import LoginRequiredMixin
from datebook.mixins import DateKwargsMixin
class DatebookYearView(LoginRequiredMixin, DateKwargsMixin, generic.TemplateView):
"""
Datebook year view
Display the twelve months of the given year with link and infos for the
existing datebooks
"""
template_name = "datebook/year.html"
def get_context_data(self, **kwargs):
context = super(DatebookYearView, self).get_context_data(**kwargs)
_curr = datetime.date.today()
# Get all datebooks for the given year
queryset = self.object.datebook_set.filter(period__year=self.year).order_by('period')[0:13]
_datebook_map = dict(map(lambda x: (x.period.month, x), queryset))
# Fill the finded datebooks in the month map, month without datebook will have
# None instead of a Datebook instance
datebooks_map = [(datetime.datetime(self.year, i, 1), _datebook_map.get(i)) for i in range(1,13)]
context.update({
'year_current': _curr.year,
'is_current_year': (self.year == _curr.year),
'datebooks_map': datebooks_map,
})
return context
def get(self, request, *args, **kwargs):
self.object = self.author
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
|
sveetch/django-datebook
|
datebook/views/year.py
|
Python
|
mit
| 1,523
|
"""Module responsible for accessing the Run data in the FileStore backend."""
import datetime
import json
import os
from sacredboard.app.data.filestorage.filestorecursor import FileStoreCursor
from sacredboard.app.data.rundao import RunDAO
class FileStoreRunDAO(RunDAO):
"""Implements the Data Access Object for File Storage."""
def __init__(self, directory: str):
self.directory = directory
def delete(self, run_id):
"""Delete run."""
raise NotImplementedError("Deleting runs is not supported with the FileStore backend yet.")
def get_runs(self, sort_by=None, sort_direction=None, start=0, limit=None, query={"type": "and", "filters": []}):
"""
Return all runs in the file store.
If a run is corrupt, e.g. missing files, it is skipped.
:param sort_by: NotImplemented
:param sort_direction: NotImplemented
:param start: NotImplemented
:param limit: NotImplemented
:param query: NotImplemented
:return: FileStoreCursor
"""
all_run_ids = os.listdir(self.directory)
def run_iterator():
blacklist = set(["_sources"])
for id in all_run_ids:
if id in blacklist:
continue
try:
yield self.get(id)
except FileNotFoundError:
# An incomplete experiment is a corrupt experiment.
# Skip it for now.
# TODO
pass
count = len(all_run_ids)
return FileStoreCursor(count, run_iterator())
def get(self, run_id):
"""
Return the run associated with a particular `run_id`.
:param run_id:
:return: dict
:raises FileNotFoundError
"""
config = _read_json(_path_to_config(self.directory, run_id))
run = _read_json(_path_to_run(self.directory, run_id))
try:
info = _read_json(_path_to_info(self.directory, run_id))
except IOError:
info = {}
return _create_run(run_id, run, config, info)
def _create_run(run_id, runjson, configjson, infojson):
runjson["_id"] = run_id
runjson["config"] = configjson
runjson["info"] = infojson
# TODO probably want a smarter way of detecting
# which values have type "time."
for k in ["start_time", "stop_time", "heartbeat"]:
if k in runjson:
runjson[k] = datetime.datetime.strptime(runjson[k],
'%Y-%m-%dT%H:%M:%S.%f')
return runjson
CONFIG_JSON = "config.json"
RUN_JSON = "run.json"
INFO_JSON = "info.json"
def _path_to_file(basepath, run_id, file_name):
return os.path.join(basepath, str(run_id), file_name)
def _path_to_config(basepath, run_id):
return _path_to_file(basepath, str(run_id), CONFIG_JSON)
def _path_to_info(basepath, run_id):
return _path_to_file(basepath, str(run_id), INFO_JSON)
def _path_to_run(basepath, run_id):
return os.path.join(basepath, str(run_id), RUN_JSON)
def _read_json(path_to_json):
with open(path_to_json) as f:
return json.load(f)
|
chovanecm/sacredboard
|
sacredboard/app/data/filestorage/rundao.py
|
Python
|
mit
| 3,187
|
#pvprograms.weebly.com
#Say 100
import http.client
data = "token: Q37PK3OVsjXJGSEWjQ8ANjc5\n"
conn = http.client.HTTPConnection("codeabbey.sourceforge.net")
conn.request("POST", "/say-100.php", data)
response = conn.getresponse()
#print(str(response.status) + " " + response.reason)
test = response.read()
print(test)
test = str(test)
test = test.split(' ')
a = ''
i = 0
while True:
if test[1][i].isdigit():
a += test[1][i]
else:
break
i += 1
answer = 100 - int(a)
answer = "answer: " + str(answer) + "\r\n"
conn.request("POST", "/say-100.php", data + answer)
test = conn.getresponse()
print(test.read())
|
paolo215/problems
|
PY/Say 100.py
|
Python
|
mit
| 637
|
"""
Copyright (c) 2013 Wei-Cheng Pan <legnaleurc@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
class Logger(object):
def __init__(self):
self._logger = logging.getLogger(__name__)
self._fh = logging.FileHandler(u'/tmp/kczzz.log')
fmt = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
self._fh.setFormatter(fmt)
self._logger.setLevel(logging.DEBUG)
self._logger.addHandler(self._fh)
def debug(self, msg):
self._logger.debug(msg)
self._flush()
def info(self, msg):
self._logger.info(msg)
self._flush()
def warn(self, msg):
self._logger.warn(msg)
self._flush()
def error(self, msg):
self._logger.error(msg)
self._flush()
def _flush(self):
self._fh.flush()
|
legnaleurc/kczzz
|
util.sikuli/util.py
|
Python
|
mit
| 1,841
|
import struct
import socket
from time import sleep
class Client:
def __init__(self, host, port):
self.host = host
self.port = port
self.connection = None
self.response = None
def connect(self):
data = 'hello'
message = struct.pack('>i', len(data)) + str.encode(data)
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connection.connect((self.host, self.port))
self.connection.send(message)
res = self.connection.recv(512)
#print('1: '+ res[4:].decode('utf-8'))
self.response = res[4:].decode('utf-8')
def close(self):
self.connection.close()
|
amol9/mayloop
|
mayloop/test/mock_client.py
|
Python
|
mit
| 604
|
"""tests for lineage.py"""
# pylint: disable=missing-function-docstring, protected-access
from unittest.mock import MagicMock, patch
import pytest
from ncbitax2lin import lineage
@patch("multiprocessing.cpu_count", return_value=999, autospec=True)
def test__calc_num_procs(mock_cpu_count: MagicMock) -> None:
actual = lineage._calc_num_procs()
expected = 6
assert actual == expected
mock_cpu_count.assert_called_once_with()
@pytest.mark.parametrize(
"num_vals, num_chunks, chunk_size",
[
(10, 3, 4),
(11, 3, 4),
(12, 3, 4),
(13, 3, 5),
(14, 3, 5),
(15, 3, 5),
(16, 3, 6),
],
)
def test__calc_chunk_size_procs(
num_vals: int, num_chunks: int, chunk_size: int
) -> None:
actual = lineage._calc_chunk_size(num_vals, num_chunks)
expected = chunk_size
assert actual == expected
assert isinstance(chunk_size, int)
|
zyxue/ncbitax2lin
|
tests/test_lineage.py
|
Python
|
mit
| 919
|
"""
Revision ID: 0193_add_ft_billing_timestamps
Revises: 0192_drop_provider_statistics
Create Date: 2018-05-22 10:23:21.937262
"""
from alembic import op
import sqlalchemy as sa
revision = '0193_add_ft_billing_timestamps'
down_revision = '0192_drop_provider_statistics'
def upgrade():
op.add_column('ft_billing', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('ft_billing', sa.Column('created_at', sa.DateTime(), nullable=True))
def downgrade():
op.drop_column('ft_billing', 'created_at')
op.drop_column('ft_billing', 'updated_at')
|
alphagov/notifications-api
|
migrations/versions/0193_add_ft_billing_timestamps.py
|
Python
|
mit
| 578
|
import logging
from protorpc import remote
from protorpc.wsgi import service
from tbans.models.service.messages import TBANSResponse, PingRequest, VerificationRequest, VerificationResponse
package = 'tbans'
class TBANSService(remote.Service):
""" The Blue Alliance Notification Service.... Service """
def __init__(self):
import firebase_admin
try:
self._firebase_app = firebase_admin.get_app('tbans')
except ValueError:
self._firebase_app = firebase_admin.initialize_app(name='tbans')
def _validate_authentication(self):
import tba_config
# Allow all requests in debug mode
if tba_config.DEBUG:
return
incoming_app_id = self.request_state.headers.get('X-Appengine-Inbound-Appid', None)
if incoming_app_id is None:
raise remote.ApplicationError('Unauthenticated')
from google.appengine.api.app_identity import app_identity
if not app_identity.get_application_id() == incoming_app_id:
raise remote.ApplicationError('Unauthenticated')
def _application_error(self, message):
""" Helper method to log and return a 400 TBANSResponse """
# TODO: Monitor these
logging.error(message)
return TBANSResponse(code=400, message=message)
@remote.method(PingRequest, TBANSResponse)
def ping(self, request):
""" Immediately dispatch a Ping to either FCM or a webhook """
self._validate_authentication()
if request.fcm and request.webhook:
return self._application_error('Cannot ping both FCM and webhook')
from tbans.models.notifications.ping import PingNotification
notification = PingNotification()
if request.fcm:
# An FCM request can still exist, I believe. It can take some notification and delivery options
from tbans.requests.fcm_request import FCMRequest
fcm_request = FCMRequest(self._firebase_app, notification, token=request.fcm.token, topic=request.fcm.topic, condition=request.fcm.condition)
logging.info('Ping - {}'.format(str(fcm_request)))
message_id = fcm_request.send()
logging.info('Ping Sent - {}'.format(str(message_id)))
return TBANSResponse(code=200, message=message_id)
elif request.webhook:
from tbans.requests.webhook_request import WebhookRequest
webhook_request = WebhookRequest(notification, request.webhook.url, request.webhook.secret)
logging.info('Ping - {}'.format(str(webhook_request)))
webhook_request.send()
logging.info('Ping Sent')
return TBANSResponse(code=200)
else:
return self._application_error('Did not specify FCM or webhook to ping')
@remote.method(VerificationRequest, VerificationResponse)
def verification(self, request):
""" Immediately dispatch a Verification to a webhook """
self._validate_authentication()
from tbans.models.notifications.verification import VerificationNotification
notification = VerificationNotification(request.webhook.url, request.webhook.secret)
from tbans.requests.webhook_request import WebhookRequest
webhook_request = WebhookRequest(notification, request.webhook.url, request.webhook.secret)
logging.info('Verification - {}'.format(str(webhook_request)))
webhook_request.send()
logging.info('Verification Key - {}'.format(notification.verification_key))
return VerificationResponse(code=200, verification_key=notification.verification_key)
app = service.service_mappings([('/tbans.*', TBANSService)])
|
jaredhasenklein/the-blue-alliance
|
tbans/tbans_service.py
|
Python
|
mit
| 3,719
|
# -*- coding: utf-8 -*-
""" Utilities """
super_entity = s3mgr.model.super_entity
super_link = s3mgr.model.super_link
super_key = s3mgr.model.super_key
s3_action_buttons = s3base.S3CRUD.action_buttons
# -----------------------------------------------------------------------------
def s3_register_validation():
""" JavaScript client-side validation """
# Client-side validation (needed to check for passwords being same)
if request.cookies.has_key("registered"):
password_position = "last"
else:
password_position = "first"
if deployment_settings.get_auth_registration_mobile_phone_mandatory():
mobile = """
mobile: {
required: true
},
"""
else:
mobile = ""
if deployment_settings.get_auth_registration_organisation_mandatory():
org1 = """
organisation_id: {
required: true
},
"""
org2 = "".join(( """,
organisation_id: '""", str(T("Enter your organization")), """',
""" ))
else:
org1 = ""
org2 = ""
domains = ""
if deployment_settings.get_auth_registration_organisation_hidden() and \
request.controller != "admin":
table = auth.settings.table_user
table.organisation_id
table = db.auth_organisation
query = (table.organisation_id != None) & \
(table.domain != None)
whitelists = db(query).select(table.organisation_id,
table.domain)
if whitelists:
domains = """$( '#auth_user_organisation_id__row' ).hide();
S3.whitelists = {
"""
count = 0
for whitelist in whitelists:
count += 1
domains += "'%s': %s" % (whitelist.domain,
whitelist.organisation_id)
if count < len(whitelists):
domains += ",\n"
else:
domains += "\n"
domains += """};
$( '#regform #auth_user_email' ).blur( function() {
var email = $( '#regform #auth_user_email' ).val();
var domain = email.split('@')[1];
if (undefined != S3.whitelists[domain]) {
$( '#auth_user_organisation_id' ).val(S3.whitelists[domain]);
} else {
$( '#auth_user_organisation_id__row' ).show();
}
});
"""
# validate signup form on keyup and submit
# @ToDo: //remote: 'emailsurl'
script = "".join(( domains, """
$('#regform').validate({
errorClass: 'req',
rules: {
first_name: {
required: true
},""", mobile, """
email: {
required: true,
email: true
},""", org1, """
password: {
required: true
},
password_two: {
required: true,
equalTo: '.password:""", password_position, """'
}
},
messages: {
firstname: '""", str(T("Enter your firstname")), """',
password: {
required: '""", str(T("Provide a password")), """'
},
password_two: {
required: '""", str(T("Repeat your password")), """',
equalTo: '""", str(T("Enter the same password as above")), """'
},
email: {
required: '""", str(T("Please enter a valid email address")), """',
minlength: '""", str(T("Please enter a valid email address")), """'
}""", org2, """
},
errorPlacement: function(error, element) {
error.appendTo( element.parent().next() );
},
submitHandler: function(form) {
form.submit();
}
});""" ))
response.s3.jquery_ready.append( script )
# -----------------------------------------------------------------------------
def s3_get_utc_offset():
""" Get the current UTC offset for the client """
offset = None
if auth.is_logged_in():
# 1st choice is the personal preference (useful for GETs if user wishes to see times in their local timezone)
offset = session.auth.user.utc_offset
if offset:
offset = offset.strip()
if not offset:
# 2nd choice is what the client provides in the hidden field (for form POSTs)
offset = request.post_vars.get("_utc_offset", None)
if offset:
offset = int(offset)
utcstr = offset < 0 and "UTC +" or "UTC -"
hours = abs(int(offset/60))
minutes = abs(int(offset % 60))
offset = "%s%02d%02d" % (utcstr, hours, minutes)
# Make this the preferred value during this session
if auth.is_logged_in():
session.auth.user.utc_offset = offset
if not offset:
# 3rd choice is the server default (what most clients should see the timezone as)
offset = deployment_settings.L10n.utc_offset
return offset
# Store last value in session
session.s3.utc_offset = s3_get_utc_offset()
# -----------------------------------------------------------------------------
# Phone number requires
# (defined in s3validators.py)
s3_single_phone_requires = IS_MATCH(single_phone_number_pattern)
s3_phone_requires = IS_MATCH(multi_phone_number_pattern,
error_message=T("Invalid phone number!"))
# -----------------------------------------------------------------------------
# Shorteners
# Names - e.g. when used in Dropdowns
# - unused currently?
repr_select = lambda l: len(l.name) > 48 and "%s..." % l.name[:44] or l.name
# Comments Fields
def comments_represent(text, showlink=True):
if len(text) < 80:
return text
elif not showlink:
return "%s..." % text[:36]
else:
import uuid
unique = uuid.uuid4()
represent = DIV(
DIV(text,
_id=unique,
_class="hidden popup",
_onmouseout="$('#%s').hide();" % unique
),
A("%s..." % text[:36],
_onmouseover="$('#%s').removeClass('hidden').show();" % unique,
),
)
return represent
# -----------------------------------------------------------------------------
# Make URLs clickable
s3_url_represent = lambda url: (url and [A(url, _href=url, _target="blank")] or [""])[0]
# -----------------------------------------------------------------------------
# Date/Time representation functions
s3_date_represent = S3DateTime.date_represent
s3_time_represent = S3DateTime.time_represent
s3_datetime_represent = S3DateTime.datetime_represent
s3_utc_represent = lambda dt: s3_datetime_represent(dt, utc=True)
s3_date_represent_utc = lambda date: s3_date_represent(date, utc=True)
# -----------------------------------------------------------------------------
def s3_filename(filename):
"""
Convert a string into a valid filename on all OS
http://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename-in-python/698714#698714
"""
import string
import unicodedata
validFilenameChars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = unicode(filename)
cleanedFilename = unicodedata.normalize("NFKD",
filename).encode("ASCII", "ignore")
return "".join(c for c in cleanedFilename if c in validFilenameChars)
# -----------------------------------------------------------------------------
def s3_component_form(r, **attr):
""" Custom Method to create a PDF for a component form """
exporter = s3base.S3PDF()
return exporter(r, **attr)
# -----------------------------------------------------------------------------
def s3_include_debug():
"""
Generates html to include:
the js scripts listed in ../static/scripts/tools/sahana.js.cfg
the css listed in ../static/scripts/tools/sahana.css.cfg
"""
# Disable printing
class dummyStream:
""" dummyStream behaves like a stream but does nothing. """
def __init__(self): pass
def write(self,data): pass
def read(self,data): pass
def flush(self): pass
def close(self): pass
save_stdout = sys.stdout
# redirect all print deals
sys.stdout = dummyStream()
scripts_dir_path = "applications/%s/static/scripts" % request.application
# Get list of script files
sys.path.append( "%s/tools" % scripts_dir_path)
import mergejsmf
configDictCore = {
"web2py": scripts_dir_path,
"T2": scripts_dir_path,
"S3": scripts_dir_path
}
configFilename = "%s/tools/sahana.js.cfg" % scripts_dir_path
(fs, files) = mergejsmf.getFiles(configDictCore, configFilename)
# Enable print
sys.stdout = save_stdout
include = ""
for file in files:
include = '%s\n<script src="/%s/static/scripts/%s" type="text/javascript"></script>' \
% ( include,
request.application,
file)
include = "%s\n <!-- CSS Syles -->" % include
f = open("%s/tools/sahana.css.cfg" % scripts_dir_path, "r")
files = f.readlines()
for file in files[:-1]:
include = '%s\n<link href="/%s/static/styles/%s" rel="stylesheet" type="text/css" />' \
% ( include,
request.application,
file[:-1]
)
f.close()
return XML(include)
# -----------------------------------------------------------------------------
def s3_represent_multiref(table, opt, represent=None, separator=", "):
""" Produce a representation for a list:reference field. """
if represent is None:
if "name" in table.fields:
represent = lambda r: r and r.name or UNKNOWN_OPT
if isinstance(opt, (int, long, str)):
query = (table.id == opt)
else:
query = (table.id.belongs(opt))
if "deleted" in table.fields:
query = query & (table.deleted == False)
records = db(query).select()
if records:
try:
first = represent(records[0])
rep_function = represent
except TypeError:
first = represent % records[0]
rep_function = lambda r: represent % r
# NB join only operates on strings, and some callers provide A().
results = [first]
for record in records[1:]:
results.append(separator)
results.append(rep_function(record))
# Wrap in XML to allow showing anchors on read-only pages, else
# Web2py will escape the angle brackets, etc. The single-record
# location represent produces A() (unless told not to), and we
# want to show links if we can.
return XML(DIV(*results))
else:
return UNKNOWN_OPT
# -----------------------------------------------------------------------------
def s3_table_links(reference):
"""
Return a dict of tables & their fields which have references to the
specified table
@deprecated: to be replaced by db[tablename]._referenced_by
- used by controllers/gis.py & pr.py
"""
tables = {}
for table in db.tables:
count = 0
for field in db[table].fields:
if str(db[table][field].type) == "reference %s" % reference:
if count == 0:
tables[table] = {}
tables[table][count] = field
count += 1
return tables
# -----------------------------------------------------------------------------
def s3_rheader_tabs(r, tabs=[], paging=False):
"""
Constructs a DIV of component links for a S3RESTRequest
@param tabs: the tabs as list of tuples (title, component_name, vars),
where vars is optional
@param paging: add paging buttons previous/next to the tabs
@todo: move into S3CRUD
"""
rheader_tabs = []
tablist = []
previous = next = None
# Check for r.method tab
mtab = r.component is None and \
[t[1] for t in tabs if t[1] == r.method] and True or False
for i in xrange(len(tabs)):
record_id = r.id
title, component = tabs[i][:2]
vars_in_request = True
if len(tabs[i]) > 2:
_vars = Storage(tabs[i][2])
for k,v in _vars.iteritems():
if r.get_vars.get(k) != v:
vars_in_request = False
break
if "viewing" in r.get_vars:
_vars.viewing = r.get_vars.viewing
else:
_vars = r.get_vars
here = False
if component and component.find("/") > 0:
function, component = component.split("/", 1)
if not component:
component = None
else:
if "viewing" in _vars:
tablename, record_id = _vars.viewing.split(".", 1)
function = tablename.split("_", 1)[1]
else:
function = r.function
record_id = r.id
if function == r.name or \
(function == r.function and "viewing" in _vars):
here = r.method == component or not mtab
if i == len(tabs)-1:
tab = Storage(title=title, _class = "tab_last")
else:
tab = Storage(title=title, _class = "tab_other")
if i > 0 and tablist[i-1]._class == "tab_here":
next = tab
if component:
if r.component and r.component.alias == component and vars_in_request or \
r.custom_action and r.method == component:
tab.update(_class = "tab_here")
previous = i and tablist[i-1] or None
if record_id:
args = [record_id, component]
else:
args = [component]
vars = Storage(_vars)
if "viewing" in vars:
del vars["viewing"]
tab.update(_href=URL(function, args=args, vars=vars))
else:
if not r.component and len(tabs[i]) <= 2 and here:
tab.update(_class = "tab_here")
previous = i and tablist[i-1] or None
vars = Storage(_vars)
args = []
if function != r.name:
if "viewing" not in vars and r.id:
vars.update(viewing="%s.%s" % (r.tablename, r.id))
#elif "viewing" in vars:
elif not tabs[i][1]:
if "viewing" in vars:
del vars["viewing"]
args = [record_id]
else:
if "viewing" not in vars and record_id:
args = [record_id]
tab.update(_href=URL(function, args=args, vars=vars))
tablist.append(tab)
rheader_tabs.append(SPAN(A(tab.title, _href=tab._href), _class=tab._class))
if rheader_tabs:
if paging:
if next:
rheader_tabs.insert(0, SPAN(A(">", _href=next._href), _class="tab_next_active"))
else:
rheader_tabs.insert(0, SPAN(">", _class="tab_next_inactive"))
if previous:
rheader_tabs.insert(0, SPAN(A("<", _href=previous._href), _class="tab_prev_active"))
else:
rheader_tabs.insert(0, SPAN("<", _class="tab_prev_inactive"))
rheader_tabs = DIV(rheader_tabs, _class="tabs")
else:
rheader_tabs = ""
return rheader_tabs
# -----------------------------------------------------------------------------
def s3_rheader_resource(r):
"""
Identify the tablename and record ID for the rheader
@param r: the current S3Request
"""
_vars = r.get_vars
if "viewing" in _vars:
tablename, record_id = _vars.viewing.rsplit(".", 1)
record = db[tablename][record_id]
else:
tablename = r.tablename
record = r.record
return (tablename, record)
# -----------------------------------------------------------------------------
def sort_dict_by_values(adict):
"""
Sort a dict by value and return an OrderedDict.
- used by models/05_irs.py
"""
return OrderedDict(sorted(adict.items(), key = lambda item: item[1]))
# -----------------------------------------------------------------------------
# CRUD functions
# -----------------------------------------------------------------------------
def s3_barchart(r, **attr):
"""
Provide simple barcharts for resource attributes
SVG representation uses the SaVaGe library
Need to request a specific value to graph in request.vars
used as REST method handler for S3Resources
@todo: replace by a S3MethodHandler
"""
# Get all the variables and format them if needed
valKey = r.vars.get("value")
nameKey = r.vars.get("name")
if not nameKey and r.table.get("name"):
# Try defaulting to the most-commonly used:
nameKey = "name"
# The parameter value is required; it must be provided
# The parameter name is optional; it is useful, but we don't need it
# Here we check to make sure we can find value in the table,
# and name (if it was provided)
if not r.table.get(valKey):
raise HTTP (400, s3mgr.xml.json_message(success=False, status_code="400", message="Need a Value for the Y axis"))
elif nameKey and not r.table.get(nameKey):
raise HTTP (400, s3mgr.xml.json_message(success=False, status_code="400", message=nameKey + " attribute not found in this resource."))
start = request.vars.get("start")
if start:
start = int(start)
limit = r.vars.get("limit")
if limit:
limit = int(limit)
settings = r.vars.get("settings")
if settings:
settings = json.loads(settings)
else:
settings = {}
if r.representation.lower() == "svg":
r.response.headers["Content-Type"] = "image/svg+xml"
from savage import graph
bar = graph.BarGraph(settings=settings)
title = deployment_settings.modules.get(module).name_nice
bar.setTitle(title)
if nameKey:
xlabel = r.table.get(nameKey).label
if xlabel:
bar.setXLabel(str(xlabel))
else:
bar.setXLabel(nameKey)
ylabel = r.table.get(valKey).label
if ylabel:
bar.setYLabel(str(ylabel))
else:
bar.setYLabel(valKey)
try:
records = r.resource.load(start, limit)
for entry in r.resource:
val = entry[valKey]
# Can't graph None type
if not val is None:
if nameKey:
name = entry[nameKey]
else:
name = None
bar.addBar(name, val)
return bar.save()
# If the field that was provided was not numeric, we have problems
except ValueError:
raise HTTP(400, "Bad Request")
else:
raise HTTP(501, body=BADFORMAT)
# -----------------------------------------------------------------------------
def s3_copy(r, **attr):
"""
Copy a record
used as REST method handler for S3Resources
@todo: move into S3CRUDHandler
"""
redirect(URL(args="create", vars={"from_record":r.id}))
# -----------------------------------------------------------------------------
def s3_import_prep(import_data):
"""
Example for an import pre-processor
@param import_data: a tuple of (resource, tree)
"""
resource, tree = import_data
#print "Import to %s" % resource.tablename
#print s3mgr.xml.tostring(tree, pretty_print=True)
# Use this to skip the import:
#resource.skip_import = True
# Import pre-process
# This can also be a Storage of {tablename = function}*
s3mgr.import_prep = s3_import_prep
# -----------------------------------------------------------------------------
def s3_rest_controller(prefix=None, resourcename=None, **attr):
"""
Helper function to apply the S3Resource REST interface
@param prefix: the application prefix
@param resourcename: the resource name (without prefix)
@param attr: additional keyword parameters
Any keyword parameters will be copied into the output dict (provided
that the output is a dict). If a keyword parameter is callable, then
it will be invoked, and its return value will be added to the output
dict instead. The callable receives the S3Request as its first and
only parameter.
CRUD can be configured per table using:
s3mgr.configure(tablename, **attr)
*** Redirection:
create_next URL to redirect to after a record has been created
update_next URL to redirect to after a record has been updated
delete_next URL to redirect to after a record has been deleted
*** Form configuration:
list_fields list of names of fields to include into list views
subheadings Sub-headings (see separate documentation)
listadd Enable/Disable add-form in list views
*** CRUD configuration:
editable Allow/Deny record updates in this table
deletable Allow/Deny record deletions in this table
insertable Allow/Deny record insertions into this table
copyable Allow/Deny record copying within this table
*** Callbacks:
create_onvalidation Function/Lambda for additional record validation on create
create_onaccept Function/Lambda after successful record insertion
update_onvalidation Function/Lambda for additional record validation on update
update_onaccept Function/Lambda after successful record update
onvalidation Fallback for both create_onvalidation and update_onvalidation
onaccept Fallback for both create_onaccept and update_onaccept
ondelete Function/Lambda after record deletion
"""
# Parse the request
r = s3mgr.parse_request(prefix, resourcename)
# Set method handlers
r.set_handler("copy", s3_copy)
r.set_handler("barchart", s3_barchart)
r.set_handler("analyze", s3base.S3Cube())
r.set_handler("import", s3base.S3PDF(),
http = ["GET", "POST"],
representation="pdf")
r.set_handler("import", s3base.S3Importer(), transform=True)
# Execute the request
output = r(**attr)
if isinstance(output, dict) and (not r.method or r.method in ("analyze", "search")):
if response.s3.actions is None:
# Add default action buttons
prefix, name, table, tablename = r.target()
authorised = s3_has_permission("update", tablename)
# If the component has components itself, then use the
# component's native controller for CRU(D) => make sure
# you have one, or override by native=False
if r.component and s3mgr.model.has_components(table):
native = output.get("native", True)
else:
native = False
# Get table config
model = s3mgr.model
listadd = model.get_config(tablename, "listadd", True)
editable = model.get_config(tablename, "editable", True) and \
not auth.permission.ownership_required(table, "update")
deletable = model.get_config(tablename, "deletable", True)
copyable = model.get_config(tablename, "copyable", False)
# URL to open the resource
open_url = r.resource.crud._linkto(r,
authorised=authorised,
update=editable,
native=native)("[id]")
# Add action buttons for Open/Delete/Copy as appropriate
s3_action_buttons(r,
deletable=deletable,
copyable=copyable,
editable=editable,
read_url=open_url,
update_url=open_url)
# Override Add-button, link to native controller and put
# the primary key into vars for automatic linking
if native and not listadd and \
s3_has_permission("create", tablename):
label = s3base.S3CRUD.crud_string(tablename,
"label_create_button")
hook = r.resource.components[name]
fkey = "%s.%s" % (name, hook.fkey)
vars = request.vars.copy()
vars.update({fkey: r.id})
url = URL(prefix, name, args=["create"], vars=vars)
add_btn = A(label, _href=url, _class="action-btn")
output.update(add_btn=add_btn)
elif r.method != "import":
response.s3.actions = None
return output
# END =========================================================================
|
flavour/helios
|
models/00_utils.py
|
Python
|
mit
| 25,333
|
import math
import fvh2, fvh
import supercircle
masterCircleSet=set()
circlecalled = 0
checkcirclescalled = 0
MINOFFSET=5
class Circle():
def __init__(self,x,y,r,lm=None, keep=True):
global circlecalled
circlecalled+=1
self.keep = keep
self.center=(x,y)
self.radius=r
self.checkString=(int(x)/MINOFFSET*MINOFFSET,int(y)/MINOFFSET*MINOFFSET,r)
masterCircleSet.add(self.checkString)
self.color="black"
if not lm:
self.lm=fvh2.fvh.MyTurtle()
self.lm.tracer(False)
else:
self.lm=lm
#self.draw()
def draw(self):
#self.lm=fvh2.fvh.MyTurtle()
self.lm.pencolor(self.color)
self.lm.setup()
self.lm.penup()
fvh2.circlearound(self.center, self.radius,self.lm)
if not self.keep:
self.lm.undo()
self.lm.undo()
def drawred(self):
self.lm.pencolor('red')
self.lm.penup()
fvh2.circlearound(self.center, self.radius,self.lm)
def drawwhite(self):
self.lm.pencolor('white')
self.lm.penup()
fvh2.circlearound(self.center, self.radius,self.lm)
def setcolor(self, color):
self.color=color
def realCards(self):
self.realcards=[]
self.lm.pu()
for x in range(4):
self.lm.goto(self.center)
self.lm.seth(self.lm.towards(0,0)+90*x)
self.lm.fd(self.radius)
self.realcards.append(Circle(self.lm.xcor(), self.lm.ycor(), self.radius/2))
def extendedCards(self, numberOfexteriorCircles):
self.cardinals=[]
angle=360.0/numberOfexteriorCircles
for x in range(numberOfexteriorCircles):
self.lm.pu()
self.lm.goto(self.center)
self.lm.seth(self.lm.towards(0,0)+180+x*angle)
self.lm.fd(self.radius)
a=Circle(self.lm.xcor(), self.lm.ycor(), self.radius/2, self.lm, self.keep)
self.cardinals.append(a)
if (self.radius/2>=4):
a.extendedCards(numberOfexteriorCircles)
for card in a.cardinals:
self.cardinals.append(card)
def innerextendedCards(self, numberOfexteriorCircles):
self.cardinals=[]
angle=360.0/numberOfexteriorCircles
for x in range(numberOfexteriorCircles):
self.lm.pu()
self.lm.goto(self.center)
self.lm.seth(self.lm.towards(0,0)+x*angle)
self.lm.fd(self.radius)
a=Circle(self.lm.xcor(), self.lm.ycor(), self.radius/2, self.lm, self.keep)
self.cardinals.append(a)
if (self.radius/2>=4):
a.innerextendedCards(numberOfexteriorCircles)
for card in a.cardinals:
self.cardinals.append(card)
def differentcards(self, numberOfexteriorCircles):
self.cardinals=[]
angle=360.0/numberOfexteriorCircles
for x in range(numberOfexteriorCircles):
self.lm.pu()
self.lm.goto(self.center)
self.lm.seth(self.lm.towards(0,0)+180+x*angle)
self.lm.fd(self.radius)
self.cardinals.append(Circle(self.lm.xcor(), self.lm.ycor(), self.radius/2, self.lm, self.keep))
def addCardinals(self):
self.cardinals=[]
self.cardinals.append(Circle(self.center[0]+self.radius, self.center[1], self.radius/2))
self.cardinals.append(Circle(self.center[0]-self.radius, self.center[1], self.radius/2))
self.cardinals.append(Circle(self.center[0], self.center[1]+self.radius, self.radius/2))
self.cardinals.append(Circle(self.center[0], self.center[1]-self.radius, self.radius/2))
#for eachcircle in self.cardinals:
# eachcircle.draw()
def comparetoCardinals(self):
self.primarytocardinals=[]
for eachcircle in self.cardinals:
intersectionpoints=circleinter(self.center, self.radius, eachcircle.center, eachcircle.radius)
self.primarytocardinals.append(Circle(intersectionpoints[0][0], intersectionpoints[0][1], self.radius))
self.primarytocardinals.append(Circle(intersectionpoints[1][0], intersectionpoints[1][1], self.radius))
def checkCircles(circle1, circle2):
global checkcirclescalled
checkcirclescalled+=1
points=circleinter(circle1.center, circle1.radius, circle2.center, circle2.radius)
if points:
points=((float("%.2f" % points[0][0]),float("%.2f" % points[0][1])),(float("%.2f" % points[1][0]),float("%.2f" % points[1][1])))
return points
def circleinter((x0, y0), r0, (x1, y1), r1):
"""
This modules accepts two circles and then determines where they meet.
the circles are submitted as x,y,r where x,y is the center of the circle
and r is the radius.
"""
dx=float(x1-x0)
dy=float(y1-y0)
d=(dx**2+dy**2)**0.5
if (d>(r0+r1)):
return None
if (d< math.fabs(r0-r1)):
return None
if (d==0):
return None
a = ((r0*r0) - (r1*r1) + (d*d)) / (2.0 * d)
x2 = x0 + (dx * a/d)
y2 = y0 + (dy * a/d)
h = ((r0*r0) - (a*a))**0.5
rx = -dy * (h/d)
ry = dx * (h/d)
xi = x2 + rx
xi_prime = x2 - rx
yi = y2 + ry
yi_prime = y2 - ry
return (xi,yi),(xi_prime,yi_prime)
def differentCircles(primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta,lm=None):
filenameStrings=['primaryCircleRadius','secondaryCircleRadius','numberOfSecondaryCircles','secondaryCircleTheta']
filenameValues=[primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta]
filenameZip=zip(filenameStrings,filenameValues)
filename=''
for values in filenameZip:
filename=filename+values[0]+str(values[1])
filename='circles/'+filename+'.eps'
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
ts=lm.getscreen()
circlelist=[]
newlist=[]
primaryCircle=Circle(0,0,primaryCircleRadius,lm)
primaryCircle.draw()
circlelist.append(primaryCircle)
for circle in range(numberOfSecondaryCircles):
lm.pu()
lm.goto(primaryCircle.center)
lm.seth(circle*secondaryCircleTheta)
lm.fd(primaryCircleRadius)
temp=Circle(lm.xcor(), lm.ycor(), secondaryCircleRadius, lm)
temp.draw()
circlelist.append(temp)
totalbefore=len(circlelist)
totalafter=0
counter=0
while(totalbefore!=totalafter):
totalbefore=len(circlelist)
for firstCircleplace in range(len(circlelist)):
firstCircle=circlelist[firstCircleplace]
for secondCircleplace in range(firstCircleplace,len(circlelist)):
secondCircle=circlelist[secondCircleplace]
thisRadius=min(firstCircle.radius, secondCircle.radius)/2
if (thisRadius<10):
continue
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
if ((int(newCircles[0][0])/MINOFFSET*MINOFFSET,int(newCircles[0][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[0][0], newCircles[0][1], thisRadius,lm)
temp.draw()
newlist.append(temp)
if ((int(newCircles[1][0])/MINOFFSET*MINOFFSET,int(newCircles[1][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[1][0], newCircles[1][1], thisRadius,lm)
temp.draw()
newlist.append(temp)
ts.update()
counter=len(circlelist)
for item in newlist:
item.draw()
circlelist.append(item)
ts.update()
newlist=[]
totalafter=len(circlelist)
fvh2.savetocircles(lm,filename)
def differentCirclesforViewing(primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta,lm=None):
"""
This is designed with something like the following in mind:
lm=circleint.fvh2.fvh.MyTurtle()
for a in range(2,100):
for b in range(3600):
circleint.differentCirclesforAnimation(200,15,a,b/10.0,lm)
lm.clear()
and then make a gif of the results
"""
global masterCircleSet
masterCircleSet=set()
filenameStrings=['primaryCircleRadius','secondaryCircleRadius','numberOfSecondaryCircles','secondaryCircleTheta']
filenameValues=[primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta]
filenameZip=zip(filenameStrings,filenameValues)
filename=''
for values in filenameZip:
filename=filename+values[0]+'%03d' % values[1]
filename='circles/testa/'+filename+'.eps'
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
ts=lm.getscreen()
circlelist=[]
newlist=[]
primaryCircle=Circle(0,0,primaryCircleRadius,lm)
primaryCircle.draw()
circlelist.append(primaryCircle)
colorcounter=0
for circle in range(numberOfSecondaryCircles):
lm.pu()
lm.goto(primaryCircle.center)
lm.seth((secondaryCircleTheta+(circle*secondaryCircleTheta))%360)
lm.fd(primaryCircleRadius)
temp=Circle(lm.xcor(), lm.ycor(), secondaryCircleRadius, lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
circlelist.append(temp)
totalbefore=len(circlelist)
totalafter=0
counter=0
while(totalbefore!=totalafter):
totalbefore=len(circlelist)
for firstCircleplace in range(len(circlelist)):
firstCircle=circlelist[firstCircleplace]
for secondCircleplace in range(len(circlelist)):
secondCircle=circlelist[secondCircleplace]
thisRadius=min(firstCircle.radius, secondCircle.radius)/2
if (thisRadius<10):
continue
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
if ((int(newCircles[0][0])/MINOFFSET*MINOFFSET,int(newCircles[0][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[0][0], newCircles[0][1], thisRadius,lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
newlist.append(temp)
if ((int(newCircles[1][0])/MINOFFSET*MINOFFSET,int(newCircles[1][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[1][0], newCircles[1][1], thisRadius,lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
newlist.append(temp)
ts.update()
#masterCircleSet=set()
counter=len(circlelist)
for item in newlist:
#item.draw()
circlelist.append(item)
ts.update()
newlist=[]
totalafter=len(circlelist)
#fvh2.savetocircles(lm,filename,aheight=(primaryCircleRadius+secondaryCircleRadius),awidth=(primaryCircleRadius+secondaryCircleRadius),ax=-(primaryCircleRadius+secondaryCircleRadius)/2.0, ay=-(primaryCircleRadius+secondaryCircleRadius)/2.0 )
fvh2.savetocircles(lm,filename,togif=True)#,aheight=(primaryCircleRadius+secondaryCircleRadius),awidth=(primaryCircleRadius+secondaryCircleRadius))#,ax=-(primaryCircleRadius+secondaryCircleRadius)/2.0, ay=-(primaryCircleRadius+secondaryCircleRadius)/2.0 )
def differentCirclesforAnimation(primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta,lm=None):
"""
This is designed with something like the following in mind:
lm=circleint.fvh2.fvh.MyTurtle()
for a in range(2,100):
for b in range(3600):
circleint.differentCirclesforAnimation(200,15,a,b/10.0,lm)
lm.clear()
and then make a gif of the results
"""
filenameStrings=['primaryCircleRadius','secondaryCircleRadius','numberOfSecondaryCircles','secondaryCircleTheta']
filenameValues=[primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta]
filenameZip=zip(filenameStrings,filenameValues)
filename=''
for values in filenameZip:
filename=filename+values[0]+str(values[1])
filename='circles/neatani/'+filename+'.eps'
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
ts=lm.getscreen()
circlelist=[]
newlist=[]
primaryCircle=Circle(0,0,primaryCircleRadius,lm)
#primaryCircle.draw()
circlelist.append(primaryCircle)
colorcounter=0
for circle in range(numberOfSecondaryCircles):
lm.pu()
lm.goto(primaryCircle.center)
lm.seth((secondaryCircleTheta+(circle*secondaryCircleTheta))%360)
lm.fd(primaryCircleRadius)
temp=Circle(lm.xcor(), lm.ycor(), secondaryCircleRadius, lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
circlelist.append(temp)
totalbefore=len(circlelist)
totalafter=0
counter=0
while(totalbefore!=totalafter):
totalbefore=len(circlelist)
for firstCircleplace in range(len(circlelist)):
firstCircle=circlelist[firstCircleplace]
for secondCircleplace in range(firstCircleplace,len(circlelist)):
secondCircle=circlelist[secondCircleplace]
thisRadius=min(firstCircle.radius, secondCircle.radius)/2
if (thisRadius<10):
continue
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
if ((int(newCircles[0][0])/MINOFFSET*MINOFFSET,int(newCircles[0][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[0][0], newCircles[0][1], thisRadius,lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
newlist.append(temp)
if ((int(newCircles[1][0])/MINOFFSET*MINOFFSET,int(newCircles[1][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[1][0], newCircles[1][1], thisRadius,lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
newlist.append(temp)
ts.update()
counter=len(circlelist)
for item in newlist:
#item.draw()
circlelist.append(item)
ts.update()
newlist=[]
totalafter=len(circlelist)
#fvh2.savetocircles(lm,filename)
def createDrawing(bigdiameter,diameter):
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
a=Circle(0,0,bigdiameter,lm)
b=Circle(bigdiameter,0,diameter,lm)
circlelist=[a,b]
totalbefore=len(masterCircleSet)
totalafter=0
newlist=[]
counter=0
#print totalbefore
while((totalbefore!=totalafter) and (len(masterCircleSet)<750)):
#print (circlecalled, checkcirclescalled)
#print totalbefore, totalafter
#raw_input()
print len(masterCircleSet)
totalbefore=len(masterCircleSet)
for firstCircleplace in range(counter,len(circlelist)):
firstCircle=circlelist[firstCircleplace]
for secondCircleplace in range(len(circlelist)):
secondCircle=circlelist[secondCircleplace]
newCircles=checkCircles(firstCircle, secondCircle)
#print newCircles, len(newlist)
#raw_input((totalbefore,totalafter))
if newCircles:
if ((int(newCircles[0][0])/MINOFFSET*MINOFFSET,int(newCircles[0][1])/MINOFFSET*MINOFFSET,diameter) not in masterCircleSet):
newlist.append(Circle(newCircles[0][0], newCircles[0][1], diameter,lm))
else:
print newCircles[0]
if ((int(newCircles[1][0])/MINOFFSET*MINOFFSET,int(newCircles[1][1])/MINOFFSET*MINOFFSET,diameter) not in masterCircleSet):
newlist.append(Circle(newCircles[1][0], newCircles[1][1], diameter,lm))
else:
print newCircles[1]
counter=len(circlelist)
for item in newlist:
item.draw()
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
lm.tracer(True)
a.lm.tracer(True)
fvh2.savetocircles(a.lm)
def createanotherdrawing(startSize):
a=Circle(0,0,startSize)
smallestsize=startSize
a.addCardinals()
a.lm.undo()
a.lm.undo()
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
circlelist.append(eachitem)
eachitem.lm.undo()
eachitem.lm.undo()
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
for secondCircle in circlelist:
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2
if (thisDiameter<=1):
#print "first break"
break
if thisDiameter<smallestsize:
smallestsize=thisDiameter
print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter)
newCircle.draw()
circlelist.append(newCircle)
#for eachCard in newCircle.cardinals:
#circlelist.append(eachCard)
#if (thisDiameter<=1):
#print "second break"
for item in newlist:
circlelist.append(item)
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
fvh2.savetocircles(a.lm)
def yetanotherdrawing(startdiameter,numberofoutsidecircles):
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm)
a.lm.undo()
a.lm.undo()
a.differentcards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
eachitem.lm.undo()
eachitem.lm.undo()
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
print "new firstCircle : " + str(firstCircle.checkString)
print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2.0
if (thisDiameter<=1):
#print "first break"
#secondCircle.draw()
break
if thisDiameter<smallestsize:
smallestsize=thisDiameter
print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
#newCircle.realCards()
circlelist.append(newCircle)
#for eachCard in newCircle.realcards:
# circlelist.append(eachCard)
#secondCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
for acircle in circlelist:
acircle.draw()
lm.tracer(True)
fvh2.savetocircles(a.lm)
def yetanotherdrawingagain(startdiameter,numberofoutsidecircles, recursive=False, lm=None):
global masterCircleSet
masterCircleSet=set()
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm)
# a.lm.undo()
# a.lm.undo()
a.differentcards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
#eachitem.lm.undo()
#eachitem.lm.undo()
eachitem.differentcards(numberofoutsidecircles)
for subitem in eachitem.cardinals:
#subitem.lm.undo()
#subitem.lm.undo()
circlelist.append(subitem)
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
#print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
#print "new firstCircle : " + str(firstCircle.checkString)
#print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2.0
if (min(firstCircle.radius, secondCircle.radius)<=1):
#print "first break"
#secondCircle.draw()
break
if thisDiameter<smallestsize:
smallestsize=thisDiameter
#print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
newlist.append(newCircle)
if recursive:
newCircle.differentcards(numberofoutsidecircles)
for eachCard in newCircle.cardinals:
circlelist.append(eachCard)
#secondCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
item.draw()
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
lm.tracer(True)
fvh2.savetocircles(a.lm)
def yetanotherdrawingagainwithmax(startdiameter,numberofoutsidecircles, recursive=False, lm=None,stepsize=2):
global masterCircleSet
masterCircleSet=set()
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm,False)
# a.lm.undo()
# a.lm.undo()
a.differentcards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
#eachitem.lm.undo()
#eachitem.lm.undo()
eachitem.differentcards(numberofoutsidecircles)
for subitem in eachitem.cardinals:
#subitem.lm.undo()
#subitem.lm.undo()
circlelist.append(subitem)
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
# print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
#print "new firstCircle : " + str(firstCircle.checkString)
#print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#firstCircle.drawred()
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/float(stepsize)
if (min(firstCircle.radius, secondCircle.radius)<=1):
#print "first break"
#secondCircle.draw()
break
if thisDiameter<smallestsize:
smallestsize=thisDiameter
#print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
newCircle.draw()
circlelist.append(newCircle)
if recursive:
newCircle.differentcards(numberofoutsidecircles)
for eachCard in newCircle.cardinals:
eachCard.draw()
circlelist.append(eachCard)
#secondCircle.draw()
#firstCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
lm.tracer(True)
fvh2.savetocircles(a.lm)
def yadwm(startdiameter):
smallestsize=startdiameter
a=Circle(0,0,startdiameter)
a.addCardinals()
a.lm.undo()
a.lm.undo()
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
eachitem.lm.undo()
eachitem.lm.undo()
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
for secondCircle in circlelist:
thisDiameter=max(firstCircle.radius, secondCircle.radius)/2.0
if (thisDiameter<=32):
#print "first break"
break
if thisDiameter<smallestsize:
smallestsize=thisDiameter
print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
#lm.tracer(False)
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter)
newCircle.addCardinals()
newCircle.draw()
circlelist.append(newCircle)
for eachCard in newCircle.cardinals:
eachCard.draw()
circlelist.append(eachCard)
#lm.tracer(True)
#if (thisDiameter<=1):
#print "second break"
for item in newlist:
circlelist.append(item)
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
fvh2.savetocircles(a.lm)
def makeart1():
for size in range(7,11):
for numberofsides in range(1,10):
for recursive in (False, True):
print 2**size,numberofsides,recursive
lm=fvh2.fvh.MyTurtle()
ts=lm.getscreen()
ts.screensize(2**(size+2),2**(size+2),'grey50')
ts.setup(2**(size+3),2**(size+3),0,0)
yetanotherdrawingagain(2**size,numberofsides,recursive,lm)
tc=ts.getcanvas()
filename="circles/startSize"+str(size)+"numberofsides"+str(numberofsides)+str(recursive)+'.eps'
ts.update()
tc.postscript(file=filename, height=2**(size+2), width=2**(size+2),x=-2**(size+1),y=-2**(size+1))
ts.bye()
def makeart2():
for size in range(8,11):
for numberofsides in range(6,10):
for recursive in (False, True):
for stepsize in range(2,4):
print stepsize**size,numberofsides,recursive
lm=fvh2.fvh.MyTurtle()
ts=lm.getscreen()
ts.screensize(stepsize**(size+2),stepsize**(size+2),'grey50')
ts.setup(stepsize**(size+3),stepsize**(size+3),0,0)
yetanotherdrawingagainwithmax(stepsize**size,numberofsides,recursive,lm,stepsize)
tc=ts.getcanvas()
filename="circles/max"+str(size)+str(numberofsides)+str(recursive)+'.eps'
tc.postscript(file=filename, height=stepsize**(size+2), width=stepsize**(size+2),x=-stepsize**(size+1),y=-stepsize**(size+1))
ts.bye()
def yetanotherdrawingagainwithcontinue(startdiameter,numberofoutsidecircles, recursive=False, lm=None):
global masterCircleSet
masterCircleSet=set()
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm)
a.draw()
a.lm.undo()
a.lm.undo()
a.differentcards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
eachitem.draw()
eachitem.lm.undo()
eachitem.lm.undo()
#eachitem.draw()
eachitem.differentcards(numberofoutsidecircles)
for subitem in eachitem.cardinals:
subitem.draw()
subitem.lm.undo()
subitem.lm.undo()
circlelist.append(subitem)
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
#print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
#print "new firstCircle : " + str(firstCircle.checkString)
#print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2.0
if (min(firstCircle.radius, secondCircle.radius)<=4):
#print "first break"
#secondCircle.draw()
continue
if thisDiameter<smallestsize:
smallestsize=thisDiameter
#print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
newCircle.draw()
newlist.append(newCircle)
if recursive:
newCircle.differentcards(numberofoutsidecircles)
for eachCard in newCircle.cardinals:
eachCard.draw()
circlelist.append(eachCard)
#secondCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
lm.tracer(True)
fvh2.savetocircles(a.lm)
def yetanotherdrawingagainwithcontinueandextended(startdiameter,numberofoutsidecircles, recursive=False, lm=None):
global masterCircleSet
masterCircleSet=set()
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm)
# a.lm.undo()
# a.lm.undo()
a.extendedCards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
#eachitem.lm.undo()
#eachitem.lm.undo()
#eachitem.differentcards(numberofoutsidecircles)
#for subitem in eachitem.cardinals:
#subitem.lm.undo()
#subitem.lm.undo()
#circlelist.append(subitem)
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
#print "new firstCircle : " + str(firstCircle.checkString)
#print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2.0
if (min(firstCircle.radius, secondCircle.radius)<=4):
#print "first break"
#secondCircle.draw()
continue
if thisDiameter<smallestsize:
smallestsize=thisDiameter
#print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
newlist.append(newCircle)
if recursive:
newCircle.extendedCards(numberofoutsidecircles)
for eachCard in newCircle.cardinals:
circlelist.append(eachCard)
#secondCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
fvh2.savetocircles(a.lm)
return circlelist
def yadei(startdiameter,numberofoutsidecircles, recursive=False, lm=None):
global masterCircleSet
masterCircleSet=set()
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm)
# a.lm.undo()
# a.lm.undo()
a.innerextendedCards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
#for eachitem in a.cardinals:
#eachitem.lm.undo()
#eachitem.lm.undo()
#eachitem.differentcards(numberofoutsidecircles)
#for subitem in eachitem.cardinals:
#subitem.lm.undo()
#subitem.lm.undo()
#circlelist.append(subitem)
#circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
#print "new firstCircle : " + str(firstCircle.checkString)
#print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2.0
if (min(firstCircle.radius, secondCircle.radius)<=4):
#print "first break"
#secondCircle.draw()
continue
if thisDiameter<smallestsize:
smallestsize=thisDiameter
#print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
newlist.append(newCircle)
if recursive:
newCircle.innerextendedCards(numberofoutsidecircles)
for eachCard in newCircle.cardinals:
circlelist.append(eachCard)
#secondCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
fvh2.savetocircles(a.lm)
return circlelist
def itsOct():
pass
|
jeremiahmarks/dangerzone
|
scripts/python/turtleRelated/circleint.py
|
Python
|
mit
| 34,847
|
#!/usr/bin/env python
import os
import json
import requests
import time
import sys
from jwcrypto import jwk, jws as cryptoJWS, jwe
from jwcrypto.common import json_encode, json_decode
from jwcrypto.common import base64url_decode, base64url_encode
from jose import jws
from hyperwallet.exceptions import HyperwalletException
from six.moves.urllib.parse import urlparse
class Encryption(object):
'''
The Hyperwallet API Client.
:param clientPrivateKeySetLocation:
The location(url or path to file) of client's private JWK key set. **REQUIRED**
:param hyperwalletKeySetLocation:
The location(url or path to file) of hyperwallet public JWK key set. **REQUIRED**
:param encryptionAlgorithm:
JWE encryption algorithm.
:param signAlgorithm:
JWS signature algorithm.
:param encryptionMethod:
JWE body encryption method.
:param jwsExpirationMinutes:
Time in minutes when JWS signature is valid after creation.
'''
def __init__(self,
clientPrivateKeySetLocation,
hyperwalletKeySetLocation,
encryptionAlgorithm='RSA-OAEP-256',
signAlgorithm='RS256',
encryptionMethod='A256CBC-HS512',
jwsExpirationMinutes=5):
'''
Encryption service for hyperwallet client
'''
self.clientPrivateKeySetLocation = clientPrivateKeySetLocation
self.hyperwalletKeySetLocation = hyperwalletKeySetLocation
self.encryptionAlgorithm = encryptionAlgorithm
self.signAlgorithm = signAlgorithm
self.encryptionMethod = encryptionMethod
self.jwsExpirationMinutes = jwsExpirationMinutes
self.integer_types = (int, long,) if sys.version_info < (3,) else (int,)
def encrypt(self, body):
'''
:param body:
Body message to be 1) signed and 2) encrypted. **REQUIRED**
:returns:
String as a result of signature and encryption of input message body
'''
jwsKeySet = self.__getJwkKeySet(location=self.clientPrivateKeySetLocation)
jwkSignKey = self.__findJwkKeyByAlgorithm(jwkKeySet=jwsKeySet, algorithm=self.signAlgorithm)
privateKeyToSign = jwk.JWK(**jwkSignKey)
jwsToken = cryptoJWS.JWS(body.encode('utf-8'))
jwsToken.add_signature(privateKeyToSign, None, json_encode({
"alg": self.signAlgorithm,
"kid": jwkSignKey['kid'],
"exp": self.__getJwsExpirationTime()
}))
signedBody = jwsToken.serialize(True)
jweKeySet = self.__getJwkKeySet(location=self.hyperwalletKeySetLocation)
jwkEncryptKey = self.__findJwkKeyByAlgorithm(jwkKeySet=jweKeySet, algorithm=self.encryptionAlgorithm)
publicKeyToEncrypt = jwk.JWK(**jwkEncryptKey)
protected_header = {
"alg": self.encryptionAlgorithm,
"enc": self.encryptionMethod,
"typ": "JWE",
"kid": jwkEncryptKey['kid'],
}
jweToken = jwe.JWE(signedBody.encode('utf-8'), recipient=publicKeyToEncrypt, protected=protected_header)
return jweToken.serialize(True)
def decrypt(self, body):
'''
:param body:
Body message to be 1) decrypted and 2) check for correct signature. **REQUIRED**
:returns:
Decrypted body message
'''
jweKeySet = self.__getJwkKeySet(location=self.clientPrivateKeySetLocation)
jwkDecryptKey = self.__findJwkKeyByAlgorithm(jwkKeySet=jweKeySet, algorithm=self.encryptionAlgorithm)
privateKeyToDecrypt = jwk.JWK(**jwkDecryptKey)
jweToken = jwe.JWE()
try:
jweToken.deserialize(body, key=privateKeyToDecrypt)
except Exception as e:
raise HyperwalletException(str(e))
payload = jweToken.payload
self.checkJwsExpiration(payload)
jwsKeySet = self.__getJwkKeySet(location=self.hyperwalletKeySetLocation)
jwkCheckSignKey = self.__findJwkKeyByAlgorithm(jwkKeySet=jwsKeySet, algorithm=self.signAlgorithm)
try:
return jws.verify(payload, json.dumps(jwkCheckSignKey), algorithms=self.signAlgorithm)
except Exception as e:
raise HyperwalletException(str(e))
def __getJwkKeySet(self, location):
'''
Retrieves JWK key data from given location.
:param location:
Location(can be a URL or path to file) of JWK key data. **REQUIRED**
:returns:
JWK key set found at given location.
'''
try:
url = urlparse(location)
if url.scheme and url.netloc and url.path:
return requests.get(location).text
raise HyperwalletException('Failed to parse url from string = ' + location)
except Exception as e:
if os.path.isfile(location):
with open(location) as f:
return f.read()
else:
raise HyperwalletException('Wrong JWK key set location path = ' + location)
def __findJwkKeyByAlgorithm(self, jwkKeySet, algorithm):
'''
Finds JWK key by given algorithm.
:param jwkKeySet:
JSON representation of JWK key set. **REQUIRED**
:param algorithm:
Algorithm of the JWK key to be found in key set. **REQUIRED**
:returns:
JWK key with given algorithm.
'''
try:
keySet = json.loads(jwkKeySet)
except ValueError:
raise HyperwalletException('Wrong JWK key set ' + jwkKeySet)
for key in keySet['keys']:
if key['alg'] == algorithm:
return key
raise HyperwalletException('JWK set doesn\'t contain key with algorithm = ' + algorithm)
def __getJwsExpirationTime(self):
'''
Calculates the expiration time (in seconds) of JWS signature.
:returns:
JWS expiration time in seconds since the UNIX epoch (January 1, 1970 00:00:00 UTC).
'''
secondsInMinute = 60
return int(time.time() + self.jwsExpirationMinutes * secondsInMinute)
def checkJwsExpiration(self, payload):
'''
Check if JWS signature has not expired.
'''
header = jws.get_unverified_header(payload)
if 'exp' not in header:
raise HyperwalletException('While trying to verify JWS signature no [exp] header is found')
exp = header['exp']
if not isinstance(exp, self.integer_types):
raise HyperwalletException('Wrong value in [exp] header of JWS signature, must be integer')
if exp < time.time():
raise HyperwalletException('JWS signature has expired, checked by [exp] JWS header')
|
hyperwallet/python-sdk
|
hyperwallet/utils/encryption.py
|
Python
|
mit
| 6,786
|
from __future__ import division
import numpy as np
__author__ = "Eric Chiang"
__email__ = "eric[at]yhathq.com"
"""
Measurements inspired by Philip Tetlock's "Expert Political Judgment"
Equations take from Yaniv, Yates, & Smith (1991):
"Measures of Descrimination Skill in Probabilistic Judgement"
"""
def calibration(prob,outcome,n_bins=10):
"""Calibration measurement for a set of predictions.
When predicting events at a given probability, how far is frequency
of positive outcomes from that probability?
NOTE: Lower scores are better
prob: array_like, float
Probability estimates for a set of events
outcome: array_like, bool
If event predicted occurred
n_bins: int
Number of judgement categories to prefrom calculation over.
Prediction are binned based on probability, since "descrete"
probabilities aren't required.
"""
prob = np.array(prob)
outcome = np.array(outcome)
c = 0.0
# Construct bins
judgement_bins = np.arange(n_bins + 1) / n_bins
# Which bin is each prediction in?
bin_num = np.digitize(prob,judgement_bins)
for j_bin in np.unique(bin_num):
# Is event in bin
in_bin = bin_num == j_bin
# Predicted probability taken as average of preds in bin
predicted_prob = np.mean(prob[in_bin])
# How often did events in this bin actually happen?
true_bin_prob = np.mean(outcome[in_bin])
# Squared distance between predicted and true times num of obs
c += np.sum(in_bin) * ((predicted_prob - true_bin_prob) ** 2)
return c / len(prob)
def discrimination(prob,outcome,n_bins=10):
"""Discrimination measurement for a set of predictions.
For each judgement category, how far from the base probability
is the true frequency of that bin?
NOTE: High scores are better
prob: array_like, float
Probability estimates for a set of events
outcome: array_like, bool
If event predicted occurred
n_bins: int
Number of judgement categories to prefrom calculation over.
Prediction are binned based on probability, since "descrete"
probabilities aren't required.
"""
prob = np.array(prob)
outcome = np.array(outcome)
d = 0.0
# Base frequency of outcomes
base_prob = np.mean(outcome)
# Construct bins
judgement_bins = np.arange(n_bins + 1) / n_bins
# Which bin is each prediction in?
bin_num = np.digitize(prob,judgement_bins)
for j_bin in np.unique(bin_num):
in_bin = bin_num == j_bin
true_bin_prob = np.mean(outcome[in_bin])
# Squared distance between true and base times num of obs
d += np.sum(in_bin) * ((true_bin_prob - base_prob) ** 2)
return d / len(prob)
|
steinam/teacher
|
jup_notebooks/data-science-ipython-notebooks-master/analyses/churn_measurements.py
|
Python
|
mit
| 2,795
|
import _plotly_utils.basevalidators
class BordercolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bordercolorsrc", parent_name="cone.hoverlabel", **kwargs
):
super(BordercolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/cone/hoverlabel/_bordercolorsrc.py
|
Python
|
mit
| 436
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2009 Benoit Chesneau <benoitc@e-engura.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
""" Wrapper of couchdbkit Document and Properties for django. It also
add possibility to a document to register itself in CouchdbkitHandler
"""
import re
import sys
from django.conf import settings
from django.db.models.options import get_verbose_name
from django.utils.translation import activate, deactivate_all, get_language, \
string_concat
from django.utils.encoding import smart_str, force_unicode
from couchdbreq import schema
from couchdbreq.ext.django.loading import get_schema, register_schema, \
get_db
__all__ = ['Property', 'StringProperty', 'IntegerProperty',
'DecimalProperty', 'BooleanProperty', 'FloatProperty',
'DateTimeProperty', 'DateProperty', 'TimeProperty',
'dict_to_json', 'list_to_json', 'value_to_json',
'value_to_python', 'dict_to_python', 'list_to_python',
'convert_property', 'DocumentSchema', 'Document',
'SchemaProperty', 'SchemaListProperty', 'ListProperty',
'DictProperty', 'StringListProperty', 'SchemaDictProperty',
'SetProperty',]
DEFAULT_NAMES = ('verbose_name', 'db_table', 'ordering',
'app_label')
class Options(object):
""" class based on django.db.models.options. We only keep
useful bits."""
def __init__(self, meta, app_label=None):
self.module_name, self.verbose_name = None, None
self.verbose_name_plural = None
self.object_name, self.app_label = None, app_label
self.meta = meta
self.admin = None
def contribute_to_class(self, cls, name):
cls._meta = self
self.installed = re.sub('\.models$', '', cls.__module__) in settings.INSTALLED_APPS
# First, construct the default values for these options.
self.object_name = cls.__name__
self.module_name = self.object_name.lower()
self.verbose_name = get_verbose_name(self.object_name)
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
# verbose_name_plural is a special case because it uses a 's'
# by default.
setattr(self, 'verbose_name_plural', meta_attrs.pop('verbose_name_plural', string_concat(self.verbose_name, 's')))
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
def __str__(self):
return "%s.%s" % (smart_str(self.app_label), smart_str(self.module_name))
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
lang = get_language()
deactivate_all()
raw = force_unicode(self.verbose_name)
activate(lang)
return raw
verbose_name_raw = property(verbose_name_raw)
class DocumentMeta(schema.SchemaProperties):
def __new__(cls, name, bases, attrs):
super_new = super(DocumentMeta, cls).__new__
parents = [b for b in bases if isinstance(b, DocumentMeta)]
if not parents:
return super_new(cls, name, bases, attrs)
new_class = super_new(cls, name, bases, attrs)
attr_meta = attrs.pop('Meta', None)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
if getattr(meta, 'app_label', None) is None:
document_module = sys.modules[new_class.__module__]
app_label = document_module.__name__.split('.')[-2]
else:
app_label = getattr(meta, 'app_label')
new_class.add_to_class('_meta', Options(meta, app_label=app_label))
register_schema(app_label, new_class)
return get_schema(app_label, name)
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
class Document(schema.Document):
""" Document object for django extension """
__metaclass__ = DocumentMeta
get_id = property(lambda self: self['_id'])
get_rev = property(lambda self: self['_rev'])
@classmethod
def get_db(cls):
db = getattr(cls, '_db', None)
if db is None:
app_label = getattr(cls._meta, "app_label")
db = get_db(app_label)
cls._db = db
return db
DocumentSchema = schema.DocumentSchema
# properties
Property = schema.Property
StringProperty = schema.StringProperty
IntegerProperty = schema.IntegerProperty
DecimalProperty = schema.DecimalProperty
BooleanProperty = schema.BooleanProperty
FloatProperty = schema.FloatProperty
DateTimeProperty = schema.DateTimeProperty
DateProperty = schema.DateProperty
TimeProperty = schema.TimeProperty
SchemaProperty = schema.SchemaProperty
SchemaListProperty = schema.SchemaListProperty
ListProperty = schema.ListProperty
DictProperty = schema.DictProperty
StringListProperty = schema.StringListProperty
SchemaDictProperty = schema.SchemaDictProperty
SetProperty = schema.SetProperty
# some utilities
dict_to_json = schema.dict_to_json
list_to_json = schema.list_to_json
value_to_json = schema.value_to_json
value_to_python = schema.value_to_python
dict_to_python = schema.dict_to_python
list_to_python = schema.list_to_python
convert_property = schema.convert_property
|
adamlofts/couchdb-requests
|
couchdbreq/ext/django/schema.py
|
Python
|
mit
| 7,084
|
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
athlete = Table('athlete', pre_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('name_first', VARCHAR(length=64)),
Column('name_last', VARCHAR(length=64)),
Column('nickname', VARCHAR(length=64)),
Column('phone_number', VARCHAR(length=10)),
Column('ice_name', VARCHAR(length=64)),
Column('ice_phone', VARCHAR(length=10)),
Column('address_city', VARCHAR(length=64)),
Column('address_state', VARCHAR(length=2)),
Column('address_street', VARCHAR(length=64)),
Column('address_zip', VARCHAR(length=5)),
Column('date_birth', DATE),
Column('disability', VARCHAR(length=64)),
Column('email', VARCHAR(length=64)),
Column('notes', TEXT),
Column('pace', FLOAT),
)
athlete = Table('athlete', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('name_first', String(length=64)),
Column('name_last', String(length=64)),
Column('nickname', String(length=64)),
Column('phone_number', String(length=10)),
Column('ice_name', String(length=64)),
Column('ice_phone', String(length=10)),
Column('note', Text),
Column('date_birth', Date),
Column('email', String(length=64)),
Column('disability', String(length=64)),
Column('pace', Float),
Column('address_street', String(length=64)),
Column('address_city', String(length=64)),
Column('address_state', String(length=2)),
Column('address_zip', String(length=5)),
)
workout = Table('workout', pre_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('athlete_id', INTEGER),
Column('distance', FLOAT),
Column('speed', FLOAT),
Column('date', DATE),
Column('notes', TEXT),
)
workout = Table('workout', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('athlete_id', Integer),
Column('date', Date),
Column('distance', Float),
Column('speed', Float),
Column('note', Text),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['athlete'].columns['notes'].drop()
post_meta.tables['athlete'].columns['note'].create()
pre_meta.tables['workout'].columns['notes'].drop()
post_meta.tables['workout'].columns['note'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['athlete'].columns['notes'].create()
post_meta.tables['athlete'].columns['note'].drop()
pre_meta.tables['workout'].columns['notes'].create()
post_meta.tables['workout'].columns['note'].drop()
|
ericgarig/runner_tracker
|
db_repository/versions/013_migration.py
|
Python
|
mit
| 2,906
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pronto_praise.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
prontotools/pronto-praise
|
pronto_praise/manage.py
|
Python
|
mit
| 817
|
from django import forms
from django.forms import ModelForm
from budget.models import Account, AccountType
class AccountForm(ModelForm):
class Meta:
model = Account
account_type = forms.ModelChoiceField(queryset=AccountType.objects.all())
fields = [
'name',
'account_type',
]
|
tom08/BudgetTracker
|
budget/forms.py
|
Python
|
mit
| 349
|
import hmac
import hashlib
import os
from rest_framework import permissions
from rest_framework.exceptions import NotFound
from .api import get_repo_permissions
from builder.models import Site
from core.exceptions import BadRequest
class GithubOnly(permissions.BasePermission):
""" Security Check - Certain API endpoints only called by Github."""
def has_permission(self, request, view):
secret = request.META.get("HTTP_X_HUB_SIGNATURE")
if secret:
# must convert to bytes for python 3.5 bug in hmac library
key = bytes(os.environ['GITHUB_SECRET'].encode('ascii'))
computed_secret = 'sha1=' + hmac.new(
key, request.body, hashlib.sha1).hexdigest()
return hmac.compare_digest(computed_secret, secret)
return False
class UserHasProjectWritePermission(permissions.BasePermission):
""" Security Check - User is an admin for the project; can create/delete"""
def check_perms(self, user, repo, owner):
if user and repo and owner:
# Call github and confirm user is an admin for this project
perms = get_repo_permissions(owner, repo, user)
if perms.get('admin', False):
return True
return False
def has_permission(self, request, view):
# Read-Only ops (GET, OPTIONS, HEAD) pass for logged in users
if request.method in permissions.SAFE_METHODS:
return True
try:
if request.method == 'POST':
obj = request.data['github'].split('/')
return self.check_perms(request.user, obj[1], obj[0])
elif request.method == 'DELETE':
obj = Site.objects.get(github_id=view.kwargs['repo'])
return self.check_perms(request.user, obj.name, obj.owner.name)
except (KeyError, IndexError):
raise BadRequest()
except Site.DoesNotExist as e:
raise NotFound(detail=e)
return False
def has_object_permission(self, request, view, site):
return self.check_perms(request.user, site.name, site.owner.name)
class IsWhitelistedProject(permissions.BasePermission):
""" Security Check - Only allow projects owned by owners on the whitelist
"""
def has_permission(self, request, view):
# Read-Only ops (GET, OPTIONS, HEAD) pass for logged in users
if request.method in permissions.SAFE_METHODS:
return True
try:
if request.method == 'POST':
project = request.data['github']
owner, repo = project.split('/')
whitelist = os.environ.get('OWNER_WHITELIST', None)
if not whitelist or owner in whitelist.split(','):
return True
except:
raise BadRequest()
return False
|
istrategylabs/franklin-api
|
franklin/github/permissions.py
|
Python
|
mit
| 2,864
|