text stringlengths 4 1.02M | meta dict |
|---|---|
from restclients.sws.v5.term import get_term_by_year_and_quarter
from restclients.sws.v5.term import get_current_term
from restclients.sws.v5.term import get_next_term
from restclients.sws.v5.term import get_previous_term
from restclients.sws.v5.term import get_term_before
from restclients.sws.v5.term import get_term_after
from restclients.sws.v5.term import get_term_by_date
| {
"content_hash": "d45dc3851f649300d433a36a3a3c5615",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 64,
"avg_line_length": 54,
"alnum_prop": 0.8227513227513228,
"repo_name": "jeffFranklin/uw-restclients",
"id": "c098bd5174840529282e78a628cb0b64bc5a7bd7",
"size": "378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "restclients/sws/term.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "76993"
},
{
"name": "Python",
"bytes": "778217"
},
{
"name": "Shell",
"bytes": "584"
}
],
"symlink_target": ""
} |
class Country:
index = {'cname':0,'population':1,'capital':2,'citypop':3,'continent':4,
'ind_date':5,'currency':6,'religion':7,'language':8}
def __init__(self, row):
self.__attr = row.split(',')
# Added to support + and -
self.__attr[Country.index['population']] = \
int(self.__attr[Country.index['population']])
def __str__(self):
return "{:<10} {:<10} {:>010}".format(self.cname, self.capital, self.population)
def __add__(self,amount):
self.__attr[Country.index['population']] += amount
return self
def __sub__(self,amount):
self.__attr[Country.index['population']] -= amount
return self
def __eq__(self, key):
return (key == self.cname)
# TODO: implement an attribute get function
# TODO: implement an attribute delete function
######################################################################################
if __name__ == "__main__":
belgium = Country("Belgium,10445852,Brussels,737966,Europe,1830,Euro,Catholicism,Dutch,French,German")
japan = Country("Japan,127920000,Tokyo,31139900,Orient,-660,Yen,Shinto;Buddhism,Japanese")
myanmar = Country("Myanmar,42909464,Yangon,4344100,Asia,1948,Kyat,Buddhism,Burmese")
sweden = Country("Sweden,9001774,Stockholm,1622300,Europe,1523,Swedish Krona,Lutheran,Swedish")
# Tests for question 1
"""
for place in belgium, japan, myanmar, sweden:
print place,
print place.population
print "\nPopulation before:", japan.population
japan += 10
print "After adding 10 :", japan.population
"""
# Test for question 2
"""
print "\nBefore:", myanmar.capital
myanmar.capital = "Naypyidaw"
print "After :", myanmar.capital
"""
# Tests for question 3
"""
print "\nBefore:", belgium
del(belgium.capital)
del(belgium.population)
belgium += 100
print "After :", belgium
"""
| {
"content_hash": "d7fc6e368f887e5d7b110f30758ae716",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 106,
"avg_line_length": 30.818181818181817,
"alnum_prop": 0.5698131760078663,
"repo_name": "rbprogrammer/advanced_python_topics",
"id": "5297ee46acbc7259d98552f377853194f7d8b2f7",
"size": "2070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "course-material/py2/country.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "429"
},
{
"name": "C",
"bytes": "163248"
},
{
"name": "C++",
"bytes": "25126"
},
{
"name": "Makefile",
"bytes": "1222"
},
{
"name": "Python",
"bytes": "292085"
},
{
"name": "Shell",
"bytes": "2515"
},
{
"name": "VimL",
"bytes": "10757"
}
],
"symlink_target": ""
} |
import gym
from gym.envs.registration import register
import readchar
LEFT = 0
DOWN = 1
RIGHT = 2
UP = 3
arrow_keys = {
'\x1b[A':UP,
'\x1b[B':DOWN,
'\x1b[C':RIGHT,
'\x1b[D':LEFT
}
register(
id = 'FrozenLake-v3',
entry_point = 'gym.envs.toy_text:FrozenLakeEnv',
kwargs = {'map_name':"4x4",'is_slippery':False}
)
env = gym.make('FrozenLake-v3')
env.render()
while(True):
key = readchar.readkey()
if key not in arrow_keys.keys():
print("Game aborted!")
break
action = arrow_keys[key]
state,reward,done,info = env.step(action)
env.render()
print("Done: ", done,"State: ",state,"Action: ",action,"Reward: ",reward,"Info: ",info)
if done:
print("Finished with reward", reward)
break
| {
"content_hash": "b68f269eae9194a4b0d89304bdd17a1d",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 95,
"avg_line_length": 25.026315789473685,
"alnum_prop": 0.4889589905362776,
"repo_name": "TeamLab/lab_study_group",
"id": "9f7b3bf34145f8691c41fff45d027123c91dd9a3",
"size": "951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2017/coursera/code/0403/OpenAI_GYM_example_code.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2744178"
},
{
"name": "Python",
"bytes": "71576"
}
],
"symlink_target": ""
} |
import re
from .completion_base import GenerateCompletionListBase
from ..anf_util import *
class WindowsCompletion(GenerateCompletionListBase):
def __init__(self, command):
super(WindowsCompletion, self).__init__(command)
self.view = command.view
def completion(self, path_in):
pattern = r"(.*[/\\:])(.*)"
match = re.match(pattern, path_in)
if "prev_text" in dir(self) and self.prev_text == path_in:
self.offset = (self.offset + 1) % len(self.completion_list)
else:
# Generate new completion list
self.completion_list, self.alias_list, self.dir_list, self.file_list = self.generate_completion_list(path_in)
self.offset = 0
if len(self.completion_list) == 0:
if match:
self.completion_list = [match.group(2)]
else:
self.completion_list = [path_in]
match = re.match(pattern, path_in)
if match :
completion = self.completion_list[self.offset]
if self.settings.get(COMPLETE_SINGLE_ENTRY_SETTING):
if len(self.completion_list) == 1:
if completion in self.alias_list:
completion += ":"
elif completion in self.dir_list:
completion += "/"
new_content = re.sub(pattern, r"\1" , path_in)
new_content += completion
first_token = False
else:
completion = self.completion_list[self.offset]
if self.settings.get(COMPLETE_SINGLE_ENTRY_SETTING):
if len(self.completion_list) == 1:
if completion in self.alias_list:
completion += ":"
elif completion in self.dir_list:
completion += "/"
new_content = completion
first_token = True
if len(self.completion_list) > 1:
if first_token:
if self.view is not None:
if completion in self.alias_list:
self.view.set_status("AdvancedNewFile2", "Alias Completion")
elif completion in self.dir_list:
self.view.set_status("AdvancedNewFile2", "Directory Completion")
self.prev_text = new_content
else:
self.prev_text = None
return new_content
| {
"content_hash": "3b8bde7dc4e593fe635c933e7eaebb0e",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 121,
"avg_line_length": 41.644067796610166,
"alnum_prop": 0.5327635327635327,
"repo_name": "herove/dotfiles",
"id": "952be8e560a470ec22ebee24e7fa33e950bff16a",
"size": "2457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sublime/Packages/AdvancedNewFile/advanced_new_file/completions/windows_completion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "358958"
},
{
"name": "C++",
"bytes": "601356"
},
{
"name": "CMake",
"bytes": "17100"
},
{
"name": "Java",
"bytes": "77"
},
{
"name": "JavaScript",
"bytes": "1058301"
},
{
"name": "Python",
"bytes": "5847904"
},
{
"name": "Shell",
"bytes": "49159"
},
{
"name": "Vim script",
"bytes": "43682"
}
],
"symlink_target": ""
} |
import sys
sys.path.insert(0,'../')
if sys.version_info[:2] == (2,6):
import unittest2 as unittest
else:
import unittest
from rivets_test import RivetsTest
import rivets
class TestDirectiveProcessor(RivetsTest):
def directive_parser(self,fixture_name):
return rivets.processing.DirectiveProcessor(self.fixture_path("directives/%s"%fixture_name))
def directive_fixture(self,name):
return self.fixture("directives/%s"%name)
def directives(self,offset=0):
return [
(3+offset,'require','a'),
(4+offset,'require','b'),
(6+offset,'require','c')
]
def testParsingDoubleSlashComments(self):
''' Test parsing double-slash comments '''
parser = self.directive_parser("double_slash")
self.assertEqual(
"// Header\n//\n\n\n//\n\n(function() {\n})();\n",
parser.processed_source
)
self.assertEqual(self.directives(),parser.directives)
def testParsingHashComments(self):
''' Test parsing hash comments '''
parser = self.directive_parser("hash")
self.assertEqual(
"# Header\n#\n\n\n#\n\n(->)()\n",
parser.processed_source
)
self.assertEqual(self.directives(),parser.directives)
def testParsingSlashStarComments(self):
''' Test parsing slash-star comments '''
parser = self.directive_parser("slash_star")
self.assertEqual(
"/* Header\n *\n\n\n *\n\n */\n\n(function() {\n})();\n",
parser.processed_source
)
self.assertEqual(self.directives(),parser.directives)
def testParsingSingleLineSlashStarComments(self):
''' Test parsing single line slash-star comments '''
parser = self.directive_parser("slash_star_single")
self.assertEqual(
"\n(function() {\n})();\n",
parser.processed_source
)
self.assertEqual([(1, "require", "a")],parser.directives)
def testParsingTripleHashComments(self):
''' Test parsing triple-hash comments '''
parser = self.directive_parser("triple_hash")
self.assertEqual(
"###\nHeader\n\n\n\n\n\n###\n\n(->)()\n",
parser.processed_source
)
self.assertEqual(self.directives(1),parser.directives)
def testHeaderCommentWithoutDirectivesIsUnmodified(self):
''' Test header comment without directives is unmodified '''
parser = self.directive_parser("comment_without_directives")
self.assertEqual(
"/*\n * Comment\n */\n\n(function() {\n})();\n",
parser.processed_source
)
self.assertEqual([],parser.directives)
def testDirectivesInCommentAfterHeaderAreNotParsed(self):
''' Test directives in comment after header are not parsed '''
parser = self.directive_parser("directives_after_header")
self.assertEqual(
"/*\n * Header\n\n */\n\n\n\n\n\n\n/* Not a directive */\n\n(function() {\n})();\n\n/*= require e */\n",
parser.processed_source
)
self.assertEqual(
[
(3, "require", "a"),
(6, "require", "b"),
(7, "require", "c"),
(9, "require", "d")
],
parser.directives
)
def testHeadersMustOccurAtTheBeginningOfTheFile(self):
''' Test headers must occur at the beginning of the file '''
parser = self.directive_parser("code_before_comment")
self.assertEqual("",parser.processed_header)
self.assertEqual([],parser.directives)
def testNoHeader(self):
''' Test no header '''
parser = self.directive_parser("no_header")
self.assertEqual(
self.directive_fixture("no_header"),
parser.processed_source
)
self.assertEqual([],parser.directives)
def testDirectiveWordSplitting(self):
''' Test directive word splitting '''
parser = self.directive_parser("directive_word_splitting")
self.assertEqual(
[
(1, "require"),
(2, "require", "two"),
(3, "require", "two", "three"),
(4, "require", "two three"),
(6, "require", "seven")
],
parser.directives
)
def testSpaceBetweenEqualsandDirectiveWord(self):
''' Test space between = and directive word '''
parser = self.directive_parser("space_between_directive_word")
self.assertEqual("var foo;\n",parser.processed_source)
self.assertEqual([(1,"require","foo")],parser.directives)
def testDocumentationHeaders(self):
''' Test documentation headers '''
parser = self.directive_parser("documentation")
self.assertEqual(
"\n//\n// = Foo\n//\n// == Examples\n//\n// Foo.bar()\n// => \"baz\"\nvar Foo;\n",
parser.processed_source
)
self.assertEqual([(1,"require","project")],parser.directives)
class CustomDirectiveProcessor(rivets.processing.DirectiveProcessor):
def process_require_glob_directive(self,glob_path):
import glob
import os
for filename in sorted(glob.glob("%s/%s" %(os.path.dirname(self._file),glob_path))):
self.context.require_asset(filename)
class TestCustomDirectiveProcessor(RivetsTest):
def setUp(self):
self.env = rivets.Environment()
self.env.append_path(self.fixture_path('context'))
def testCustomProcessorUsingContextResolveAndContextDepend(self):
''' Test custom processor using Context#rivets_resolve and Context#rivets_depend '''
self.env.unregister_preprocessor('application/javascript',rivets.processing.DirectiveProcessor)
self.env.register_preprocessor('application/javascript',CustomDirectiveProcessor)
self.assertEqual("var Foo = {};\n\n",str(self.env["require_glob.js"]))
if __name__ == '__main__':
unittest.main() | {
"content_hash": "ec53e695462c5343ed178f0e71a8e282",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 108,
"avg_line_length": 28.594594594594593,
"alnum_prop": 0.6805293005671077,
"repo_name": "OiNutter/rivets",
"id": "930a08ed945abb14b6e8d3b8533884264391068b",
"size": "5290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_directive_processor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "584"
},
{
"name": "CoffeeScript",
"bytes": "7131"
},
{
"name": "JavaScript",
"bytes": "818"
},
{
"name": "Python",
"bytes": "164710"
}
],
"symlink_target": ""
} |
import json
import operator
import os
import re
import sys
from datetime import datetime, timedelta, timezone
from decimal import Decimal
from functools import wraps
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, TypeVar, cast
from unittest.mock import Mock, patch
import orjson
import responses
import stripe
from django.conf import settings
from django.core import signing
from django.http import HttpResponse
from django.urls.resolvers import get_resolver
from django.utils.timezone import now as timezone_now
from corporate.lib.stripe import (
MAX_INVOICED_LICENSES,
MIN_INVOICED_LICENSES,
BillingError,
StripeCardError,
add_months,
attach_discount_to_realm,
catch_stripe_errors,
compute_plan_parameters,
get_discount_for_realm,
get_latest_seat_count,
invoice_plan,
invoice_plans_as_needed,
make_end_of_cycle_updates_if_needed,
next_month,
process_initial_upgrade,
sign_string,
stripe_get_customer,
unsign_string,
update_license_ledger_for_automanaged_plan,
update_license_ledger_if_needed,
update_or_create_stripe_customer,
)
from corporate.models import (
Customer,
CustomerPlan,
LicenseLedger,
get_current_plan_by_customer,
get_current_plan_by_realm,
get_customer_by_realm,
)
from zerver.lib.actions import (
do_activate_user,
do_create_user,
do_deactivate_realm,
do_deactivate_user,
do_reactivate_realm,
do_reactivate_user,
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import reset_emails_in_zulip_realm
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.models import Realm, RealmAuditLog, UserProfile, get_realm
CallableT = TypeVar('CallableT', bound=Callable[..., Any])
STRIPE_FIXTURES_DIR = "corporate/tests/stripe_fixtures"
# TODO: check that this creates a token similar to what is created by our
# actual Stripe Checkout flows
def stripe_create_token(card_number: str="4242424242424242") -> stripe.Token:
return stripe.Token.create(
card={
"number": card_number,
"exp_month": 3,
"exp_year": 2033,
"cvc": "333",
"name": "Ada Starr",
"address_line1": "Under the sea,",
"address_city": "Pacific",
"address_zip": "33333",
"address_country": "United States",
})
def stripe_fixture_path(decorated_function_name: str, mocked_function_name: str, call_count: int) -> str:
# Make the eventual filename a bit shorter, and also we conventionally
# use test_* for the python test files
if decorated_function_name[:5] == 'test_':
decorated_function_name = decorated_function_name[5:]
return f"{STRIPE_FIXTURES_DIR}/{decorated_function_name}--{mocked_function_name[7:]}.{call_count}.json"
def fixture_files_for_function(decorated_function: CallableT) -> List[str]: # nocoverage
decorated_function_name = decorated_function.__name__
if decorated_function_name[:5] == 'test_':
decorated_function_name = decorated_function_name[5:]
return sorted([f'{STRIPE_FIXTURES_DIR}/{f}' for f in os.listdir(STRIPE_FIXTURES_DIR)
if f.startswith(decorated_function_name + '--')])
def generate_and_save_stripe_fixture(decorated_function_name: str, mocked_function_name: str,
mocked_function: CallableT) -> Callable[[Any, Any], Any]: # nocoverage
def _generate_and_save_stripe_fixture(*args: Any, **kwargs: Any) -> Any:
# Note that mock is not the same as mocked_function, even though their
# definitions look the same
mock = operator.attrgetter(mocked_function_name)(sys.modules[__name__])
fixture_path = stripe_fixture_path(decorated_function_name, mocked_function_name, mock.call_count)
try:
with responses.RequestsMock() as request_mock:
request_mock.add_passthru("https://api.stripe.com")
# Talk to Stripe
stripe_object = mocked_function(*args, **kwargs)
except stripe.error.StripeError as e:
with open(fixture_path, 'w') as f:
error_dict = e.__dict__
error_dict["headers"] = dict(error_dict["headers"])
f.write(json.dumps(error_dict, indent=2, separators=(',', ': '), sort_keys=True) + "\n")
raise e
with open(fixture_path, 'w') as f:
if stripe_object is not None:
f.write(str(stripe_object) + "\n")
else:
f.write("{}\n")
return stripe_object
return _generate_and_save_stripe_fixture
def read_stripe_fixture(decorated_function_name: str,
mocked_function_name: str) -> Callable[[Any, Any], Any]:
def _read_stripe_fixture(*args: Any, **kwargs: Any) -> Any:
mock = operator.attrgetter(mocked_function_name)(sys.modules[__name__])
fixture_path = stripe_fixture_path(decorated_function_name, mocked_function_name, mock.call_count)
with open(fixture_path, "rb") as f:
fixture = orjson.loads(f.read())
# Check for StripeError fixtures
if "json_body" in fixture:
requestor = stripe.api_requestor.APIRequestor()
# This function will raise the relevant StripeError according to the fixture
requestor.interpret_response(fixture["http_body"], fixture["http_status"], fixture["headers"])
return stripe.util.convert_to_stripe_object(fixture)
return _read_stripe_fixture
def delete_fixture_data(decorated_function: CallableT) -> None: # nocoverage
for fixture_file in fixture_files_for_function(decorated_function):
os.remove(fixture_file)
def normalize_fixture_data(decorated_function: CallableT,
tested_timestamp_fields: Sequence[str] = []) -> None: # nocoverage
# stripe ids are all of the form cus_D7OT2jf5YAtZQ2
id_lengths = [
('cus', 14), ('sub', 14), ('si', 14), ('sli', 14), ('req', 14), ('tok', 24), ('card', 24),
('txn', 24), ('ch', 24), ('in', 24), ('ii', 24), ('test', 12), ('src_client_secret', 24),
('src', 24), ('invst', 26), ('acct', 16), ('rcpt', 31)]
# We'll replace cus_D7OT2jf5YAtZQ2 with something like cus_NORMALIZED0001
pattern_translations = {
f"{prefix}_[A-Za-z0-9]{{{length}}}": f"{prefix}_NORMALIZED%0{length - 10}d"
for prefix, length in id_lengths
}
# We'll replace "invoice_prefix": "A35BC4Q" with something like "invoice_prefix": "NORMA01"
pattern_translations.update({
'"invoice_prefix": "([A-Za-z0-9]{7,8})"': 'NORMA%02d',
'"fingerprint": "([A-Za-z0-9]{16})"': 'NORMALIZED%06d',
'"number": "([A-Za-z0-9]{7,8}-[A-Za-z0-9]{4})"': 'NORMALI-%04d',
'"address": "([A-Za-z0-9]{9}-test_[A-Za-z0-9]{12})"': '000000000-test_NORMALIZED%02d',
# Don't use (..) notation, since the matched strings may be small integers that will also match
# elsewhere in the file
'"realm_id": "[0-9]+"': '"realm_id": "%d"',
})
# Normalizing across all timestamps still causes a lot of variance run to run, which is
# why we're doing something a bit more complicated
for i, timestamp_field in enumerate(tested_timestamp_fields):
# Don't use (..) notation, since the matched timestamp can easily appear in other fields
pattern_translations[
f'"{timestamp_field}": 1[5-9][0-9]{{8}}(?![0-9-])'
] = f'"{timestamp_field}": 1{i+1:02}%07d'
normalized_values: Dict[str, Dict[str, str]] = {
pattern: {} for pattern in pattern_translations.keys()
}
for fixture_file in fixture_files_for_function(decorated_function):
with open(fixture_file) as f:
file_content = f.read()
for pattern, translation in pattern_translations.items():
for match in re.findall(pattern, file_content):
if match not in normalized_values[pattern]:
normalized_values[pattern][match] = translation % (len(normalized_values[pattern]) + 1,)
file_content = file_content.replace(match, normalized_values[pattern][match])
file_content = re.sub(r'(?<="risk_score": )(\d+)', '0', file_content)
file_content = re.sub(r'(?<="times_redeemed": )(\d+)', '0', file_content)
file_content = re.sub(r'(?<="idempotency-key": )"([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f-]*)"',
'"00000000-0000-0000-0000-000000000000"', file_content)
# Dates
file_content = re.sub(r'(?<="Date": )"(.* GMT)"', '"NORMALIZED DATETIME"', file_content)
file_content = re.sub(r'[0-3]\d [A-Z][a-z]{2} 20[1-2]\d', 'NORMALIZED DATE', file_content)
# IP addresses
file_content = re.sub(r'"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"', '"0.0.0.0"', file_content)
# All timestamps not in tested_timestamp_fields
file_content = re.sub(r': (1[5-9][0-9]{8})(?![0-9-])', ': 1000000000', file_content)
with open(fixture_file, "w") as f:
f.write(file_content)
MOCKED_STRIPE_FUNCTION_NAMES = [f"stripe.{name}" for name in [
"Charge.create", "Charge.list",
"Coupon.create",
"Customer.create", "Customer.retrieve", "Customer.save",
"Invoice.create", "Invoice.finalize_invoice", "Invoice.list", "Invoice.pay", "Invoice.upcoming",
"InvoiceItem.create", "InvoiceItem.list",
"Plan.create",
"Product.create",
"Subscription.create", "Subscription.delete", "Subscription.retrieve", "Subscription.save",
"Token.create",
]]
def mock_stripe(tested_timestamp_fields: Sequence[str]=[],
generate: Optional[bool]=None) -> Callable[[CallableT], CallableT]:
def _mock_stripe(decorated_function: CallableT) -> CallableT:
generate_fixture = generate
if generate_fixture is None:
generate_fixture = settings.GENERATE_STRIPE_FIXTURES
for mocked_function_name in MOCKED_STRIPE_FUNCTION_NAMES:
mocked_function = operator.attrgetter(mocked_function_name)(sys.modules[__name__])
if generate_fixture:
side_effect = generate_and_save_stripe_fixture(
decorated_function.__name__, mocked_function_name, mocked_function) # nocoverage
else:
side_effect = read_stripe_fixture(decorated_function.__name__, mocked_function_name)
decorated_function = patch(mocked_function_name, side_effect=side_effect)(decorated_function)
@wraps(decorated_function)
def wrapped(*args: object, **kwargs: object) -> object:
if generate_fixture: # nocoverage
delete_fixture_data(decorated_function)
val = decorated_function(*args, **kwargs)
normalize_fixture_data(decorated_function, tested_timestamp_fields)
return val
else:
return decorated_function(*args, **kwargs)
return cast(CallableT, wrapped)
return _mock_stripe
# A Kandra is a fictional character that can become anything. Used as a
# wildcard when testing for equality.
class Kandra: # nocoverage: TODO
def __eq__(self, other: Any) -> bool:
return True
class StripeTestCase(ZulipTestCase):
def setUp(self, *mocks: Mock) -> None:
super().setUp()
reset_emails_in_zulip_realm()
realm = get_realm('zulip')
# Explicitly limit our active users to 6 regular users,
# to make seat_count less prone to changes in our test data.
# We also keep a guest user and a bot to make the data
# slightly realistic.
active_emails = [
self.example_email('AARON'),
self.example_email('cordelia'),
self.example_email('hamlet'),
self.example_email('iago'),
self.example_email('othello'),
self.example_email('desdemona'),
self.example_email('polonius'), # guest
self.example_email('default_bot'), # bot
]
# Deactivate all users in our realm that aren't in our whitelist.
UserProfile.objects.filter(realm_id=realm.id).exclude(email__in=active_emails).update(is_active=False)
# sanity check our 8 expected users are active
self.assertEqual(
UserProfile.objects.filter(realm=realm, is_active=True).count(),
8,
)
# Make sure we have active users outside our realm (to make
# sure relevant queries restrict on realm).
self.assertEqual(
UserProfile.objects.exclude(realm=realm).filter(is_active=True).count(),
10,
)
# Our seat count excludes our guest user and bot, and
# we want this to be predictable for certain tests with
# arithmetic calculations.
self.assertEqual(get_latest_seat_count(realm), 6)
self.seat_count = 6
self.signed_seat_count, self.salt = sign_string(str(self.seat_count))
# Choosing dates with corresponding timestamps below 1500000000 so that they are
# not caught by our timestamp normalization regex in normalize_fixture_data
self.now = datetime(2012, 1, 2, 3, 4, 5, tzinfo=timezone.utc)
self.next_month = datetime(2012, 2, 2, 3, 4, 5, tzinfo=timezone.utc)
self.next_year = datetime(2013, 1, 2, 3, 4, 5, tzinfo=timezone.utc)
def get_signed_seat_count_from_response(self, response: HttpResponse) -> Optional[str]:
match = re.search(r'name=\"signed_seat_count\" value=\"(.+)\"', response.content.decode("utf-8"))
return match.group(1) if match else None
def get_salt_from_response(self, response: HttpResponse) -> Optional[str]:
match = re.search(r'name=\"salt\" value=\"(\w+)\"', response.content.decode("utf-8"))
return match.group(1) if match else None
def upgrade(self, invoice: bool=False, talk_to_stripe: bool=True,
realm: Optional[Realm]=None, del_args: Sequence[str]=[],
**kwargs: Any) -> HttpResponse:
host_args = {}
if realm is not None: # nocoverage: TODO
host_args['HTTP_HOST'] = realm.host
response = self.client_get("/upgrade/", {}, **host_args)
params: Dict[str, Any] = {
'schedule': 'annual',
'signed_seat_count': self.get_signed_seat_count_from_response(response),
'salt': self.get_salt_from_response(response)}
if invoice: # send_invoice
params.update({
'billing_modality': 'send_invoice',
'licenses': 123})
else: # charge_automatically
stripe_token = None
if not talk_to_stripe:
stripe_token = 'token'
stripe_token = kwargs.get('stripe_token', stripe_token)
if stripe_token is None:
stripe_token = stripe_create_token().id
params.update({
'billing_modality': 'charge_automatically',
'license_management': 'automatic',
'stripe_token': stripe_token,
})
params.update(kwargs)
for key in del_args:
if key in params:
del params[key]
for key, value in params.items():
params[key] = orjson.dumps(value).decode()
return self.client_post("/json/billing/upgrade", params, **host_args)
# Upgrade without talking to Stripe
def local_upgrade(self, *args: Any) -> None:
class StripeMock(Mock):
def __init__(self, depth: int=1):
super().__init__(spec=stripe.Card)
self.id = 'id'
self.created = '1000'
self.last4 = '4242'
if depth == 1:
self.source = StripeMock(depth=2)
def upgrade_func(*args: Any) -> Any:
return process_initial_upgrade(self.example_user('hamlet'), *args[:4])
for mocked_function_name in MOCKED_STRIPE_FUNCTION_NAMES:
upgrade_func = patch(mocked_function_name, return_value=StripeMock())(upgrade_func)
upgrade_func(*args)
class StripeTest(StripeTestCase):
@patch("corporate.lib.stripe.billing_logger.error")
def test_catch_stripe_errors(self, mock_billing_logger_error: Mock) -> None:
@catch_stripe_errors
def raise_invalid_request_error() -> None:
raise stripe.error.InvalidRequestError(
"message", "param", "code", json_body={})
with self.assertRaises(BillingError) as context:
raise_invalid_request_error()
self.assertEqual('other stripe error', context.exception.description)
mock_billing_logger_error.assert_called()
@catch_stripe_errors
def raise_card_error() -> None:
error_message = "The card number is not a valid credit card number."
json_body = {"error": {"message": error_message}}
raise stripe.error.CardError(error_message, "number", "invalid_number",
json_body=json_body)
with self.assertRaises(StripeCardError) as context:
raise_card_error()
self.assertIn('not a valid credit card', context.exception.message)
self.assertEqual('card error', context.exception.description)
mock_billing_logger_error.assert_called()
def test_billing_not_enabled(self) -> None:
iago = self.example_user('iago')
with self.settings(BILLING_ENABLED=False):
self.login_user(iago)
response = self.client_get("/upgrade/", follow=True)
self.assertEqual(response.status_code, 404)
@mock_stripe(tested_timestamp_fields=["created"])
def test_upgrade_by_card(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
response = self.client_get("/upgrade/")
self.assert_in_success_response(['Pay annually'], response)
self.assertNotEqual(user.realm.plan_type, Realm.STANDARD)
self.assertFalse(Customer.objects.filter(realm=user.realm).exists())
# Click "Make payment" in Stripe Checkout
with patch('corporate.lib.stripe.timezone_now', return_value=self.now):
self.upgrade()
# Check that we correctly created a Customer object in Stripe
stripe_customer = stripe_get_customer(Customer.objects.get(realm=user.realm).stripe_customer_id)
self.assertEqual(stripe_customer.default_source.id[:5], 'card_')
self.assertEqual(stripe_customer.description, "zulip (Zulip Dev)")
self.assertEqual(stripe_customer.discount, None)
self.assertEqual(stripe_customer.email, user.email)
metadata_dict = dict(stripe_customer.metadata)
self.assertEqual(metadata_dict['realm_str'], 'zulip')
try:
int(metadata_dict['realm_id'])
except ValueError: # nocoverage
raise AssertionError("realm_id is not a number")
# Check Charges in Stripe
stripe_charges = [charge for charge in stripe.Charge.list(customer=stripe_customer.id)]
self.assertEqual(len(stripe_charges), 1)
self.assertEqual(stripe_charges[0].amount, 8000 * self.seat_count)
# TODO: fix Decimal
self.assertEqual(stripe_charges[0].description,
f"Upgrade to Zulip Standard, $80.0 x {self.seat_count}")
self.assertEqual(stripe_charges[0].receipt_email, user.email)
self.assertEqual(stripe_charges[0].statement_descriptor, "Zulip Standard")
# Check Invoices in Stripe
stripe_invoices = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer.id)]
self.assertEqual(len(stripe_invoices), 1)
self.assertIsNotNone(stripe_invoices[0].status_transitions.finalized_at)
invoice_params = {
# auto_advance is False because the invoice has been paid
'amount_due': 0, 'amount_paid': 0, 'auto_advance': False, 'billing': 'charge_automatically',
'charge': None, 'status': 'paid', 'total': 0}
for key, value in invoice_params.items():
self.assertEqual(stripe_invoices[0].get(key), value)
# Check Line Items on Stripe Invoice
stripe_line_items = [item for item in stripe_invoices[0].lines]
self.assertEqual(len(stripe_line_items), 2)
line_item_params = {
'amount': 8000 * self.seat_count, 'description': 'Zulip Standard', 'discountable': False,
'period': {
'end': datetime_to_timestamp(self.next_year),
'start': datetime_to_timestamp(self.now)},
# There's no unit_amount on Line Items, probably because it doesn't show up on the
# user-facing invoice. We could pull the Invoice Item instead and test unit_amount there,
# but testing the amount and quantity seems sufficient.
'plan': None, 'proration': False, 'quantity': self.seat_count}
for key, value in line_item_params.items():
self.assertEqual(stripe_line_items[0].get(key), value)
line_item_params = {
'amount': -8000 * self.seat_count, 'description': 'Payment (Card ending in 4242)',
'discountable': False, 'plan': None, 'proration': False, 'quantity': 1}
for key, value in line_item_params.items():
self.assertEqual(stripe_line_items[1].get(key), value)
# Check that we correctly populated Customer, CustomerPlan, and LicenseLedger in Zulip
customer = Customer.objects.get(stripe_customer_id=stripe_customer.id, realm=user.realm)
plan = CustomerPlan.objects.get(
customer=customer, automanage_licenses=True,
price_per_license=8000, fixed_price=None, discount=None, billing_cycle_anchor=self.now,
billing_schedule=CustomerPlan.ANNUAL, invoiced_through=LicenseLedger.objects.first(),
next_invoice_date=self.next_month, tier=CustomerPlan.STANDARD,
status=CustomerPlan.ACTIVE)
LicenseLedger.objects.get(
plan=plan, is_renewal=True, event_time=self.now, licenses=self.seat_count,
licenses_at_next_renewal=self.seat_count)
# Check RealmAuditLog
audit_log_entries = list(RealmAuditLog.objects.filter(acting_user=user)
.values_list('event_type', 'event_time').order_by('id'))
self.assertEqual(audit_log_entries, [
(RealmAuditLog.STRIPE_CUSTOMER_CREATED, timestamp_to_datetime(stripe_customer.created)),
(RealmAuditLog.STRIPE_CARD_CHANGED, timestamp_to_datetime(stripe_customer.created)),
(RealmAuditLog.CUSTOMER_PLAN_CREATED, self.now),
# TODO: Check for REALM_PLAN_TYPE_CHANGED
# (RealmAuditLog.REALM_PLAN_TYPE_CHANGED, Kandra()),
])
self.assertEqual(orjson.loads(RealmAuditLog.objects.filter(
event_type=RealmAuditLog.CUSTOMER_PLAN_CREATED).values_list(
'extra_data', flat=True).first())['automanage_licenses'], True)
# Check that we correctly updated Realm
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.STANDARD)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
# Check that we can no longer access /upgrade
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 302)
self.assertEqual('/billing/', response.url)
# Check /billing has the correct information
with patch('corporate.views.timezone_now', return_value=self.now):
response = self.client_get("/billing/")
self.assert_not_in_success_response(['Pay annually'], response)
for substring in [
'Zulip Standard', str(self.seat_count),
'You are using', f'{self.seat_count} of {self.seat_count} licenses',
'Licenses are automatically managed by Zulip; when you add',
'Your plan will renew on', 'January 2, 2013', f'${80 * self.seat_count}.00',
'Visa ending in 4242',
'Update card']:
self.assert_in_response(substring, response)
@mock_stripe(tested_timestamp_fields=["created"])
def test_upgrade_by_invoice(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
# Click "Make payment" in Stripe Checkout
with patch('corporate.lib.stripe.timezone_now', return_value=self.now):
self.upgrade(invoice=True)
# Check that we correctly created a Customer in Stripe
stripe_customer = stripe_get_customer(Customer.objects.get(realm=user.realm).stripe_customer_id)
# It can take a second for Stripe to attach the source to the customer, and in
# particular it may not be attached at the time stripe_get_customer is called above,
# causing test flakes.
# So commenting the next line out, but leaving it here so future readers know what
# is supposed to happen here
# self.assertEqual(stripe_customer.default_source.type, 'ach_credit_transfer')
# Check Charges in Stripe
self.assertFalse(stripe.Charge.list(customer=stripe_customer.id))
# Check Invoices in Stripe
stripe_invoices = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer.id)]
self.assertEqual(len(stripe_invoices), 1)
self.assertIsNotNone(stripe_invoices[0].due_date)
self.assertIsNotNone(stripe_invoices[0].status_transitions.finalized_at)
invoice_params = {
'amount_due': 8000 * 123, 'amount_paid': 0, 'attempt_count': 0,
'auto_advance': True, 'billing': 'send_invoice', 'statement_descriptor': 'Zulip Standard',
'status': 'open', 'total': 8000 * 123}
for key, value in invoice_params.items():
self.assertEqual(stripe_invoices[0].get(key), value)
# Check Line Items on Stripe Invoice
stripe_line_items = [item for item in stripe_invoices[0].lines]
self.assertEqual(len(stripe_line_items), 1)
line_item_params = {
'amount': 8000 * 123, 'description': 'Zulip Standard', 'discountable': False,
'period': {
'end': datetime_to_timestamp(self.next_year),
'start': datetime_to_timestamp(self.now)},
'plan': None, 'proration': False, 'quantity': 123}
for key, value in line_item_params.items():
self.assertEqual(stripe_line_items[0].get(key), value)
# Check that we correctly populated Customer, CustomerPlan and LicenseLedger in Zulip
customer = Customer.objects.get(stripe_customer_id=stripe_customer.id, realm=user.realm)
plan = CustomerPlan.objects.get(
customer=customer, automanage_licenses=False, charge_automatically=False,
price_per_license=8000, fixed_price=None, discount=None, billing_cycle_anchor=self.now,
billing_schedule=CustomerPlan.ANNUAL, invoiced_through=LicenseLedger.objects.first(),
next_invoice_date=self.next_year, tier=CustomerPlan.STANDARD,
status=CustomerPlan.ACTIVE)
LicenseLedger.objects.get(
plan=plan, is_renewal=True, event_time=self.now, licenses=123, licenses_at_next_renewal=123)
# Check RealmAuditLog
audit_log_entries = list(RealmAuditLog.objects.filter(acting_user=user)
.values_list('event_type', 'event_time').order_by('id'))
self.assertEqual(audit_log_entries, [
(RealmAuditLog.STRIPE_CUSTOMER_CREATED, timestamp_to_datetime(stripe_customer.created)),
(RealmAuditLog.CUSTOMER_PLAN_CREATED, self.now),
# TODO: Check for REALM_PLAN_TYPE_CHANGED
# (RealmAuditLog.REALM_PLAN_TYPE_CHANGED, Kandra()),
])
self.assertEqual(orjson.loads(RealmAuditLog.objects.filter(
event_type=RealmAuditLog.CUSTOMER_PLAN_CREATED).values_list(
'extra_data', flat=True).first())['automanage_licenses'], False)
# Check that we correctly updated Realm
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.STANDARD)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
# Check that we can no longer access /upgrade
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 302)
self.assertEqual('/billing/', response.url)
# Check /billing has the correct information
with patch('corporate.views.timezone_now', return_value=self.now):
response = self.client_get("/billing/")
self.assert_not_in_success_response(['Pay annually', 'Update card'], response)
for substring in [
'Zulip Standard', str(123),
'You are using', f'{self.seat_count} of {123} licenses',
'Licenses are manually managed. You will not be able to add ',
'Your plan will renew on', 'January 2, 2013', '$9,840.00', # 9840 = 80 * 123
'Billed by invoice']:
self.assert_in_response(substring, response)
@mock_stripe(tested_timestamp_fields=["created"])
def test_free_trial_upgrade_by_card(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
with self.settings(FREE_TRIAL_DAYS=60):
response = self.client_get("/upgrade/")
free_trial_end_date = self.now + timedelta(days=60)
self.assert_in_success_response(['Pay annually', 'Free Trial', '60 day'], response)
self.assertNotEqual(user.realm.plan_type, Realm.STANDARD)
self.assertFalse(Customer.objects.filter(realm=user.realm).exists())
with patch('corporate.lib.stripe.timezone_now', return_value=self.now):
self.upgrade()
stripe_customer = stripe_get_customer(Customer.objects.get(realm=user.realm).stripe_customer_id)
self.assertEqual(stripe_customer.default_source.id[:5], 'card_')
self.assertEqual(stripe_customer.description, "zulip (Zulip Dev)")
self.assertEqual(stripe_customer.discount, None)
self.assertEqual(stripe_customer.email, user.email)
metadata_dict = dict(stripe_customer.metadata)
self.assertEqual(metadata_dict['realm_str'], 'zulip')
try:
int(metadata_dict['realm_id'])
except ValueError: # nocoverage
raise AssertionError("realm_id is not a number")
stripe_charges = [charge for charge in stripe.Charge.list(customer=stripe_customer.id)]
self.assertEqual(len(stripe_charges), 0)
stripe_invoices = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer.id)]
self.assertEqual(len(stripe_invoices), 0)
customer = Customer.objects.get(stripe_customer_id=stripe_customer.id, realm=user.realm)
plan = CustomerPlan.objects.get(
customer=customer, automanage_licenses=True,
price_per_license=8000, fixed_price=None, discount=None, billing_cycle_anchor=self.now,
billing_schedule=CustomerPlan.ANNUAL, invoiced_through=LicenseLedger.objects.first(),
next_invoice_date=free_trial_end_date, tier=CustomerPlan.STANDARD,
status=CustomerPlan.FREE_TRIAL)
LicenseLedger.objects.get(
plan=plan, is_renewal=True, event_time=self.now, licenses=self.seat_count,
licenses_at_next_renewal=self.seat_count)
audit_log_entries = list(RealmAuditLog.objects.filter(acting_user=user)
.values_list('event_type', 'event_time').order_by('id'))
self.assertEqual(audit_log_entries, [
(RealmAuditLog.STRIPE_CUSTOMER_CREATED, timestamp_to_datetime(stripe_customer.created)),
(RealmAuditLog.STRIPE_CARD_CHANGED, timestamp_to_datetime(stripe_customer.created)),
(RealmAuditLog.CUSTOMER_PLAN_CREATED, self.now),
# TODO: Check for REALM_PLAN_TYPE_CHANGED
# (RealmAuditLog.REALM_PLAN_TYPE_CHANGED, Kandra()),
])
self.assertEqual(orjson.loads(RealmAuditLog.objects.filter(
event_type=RealmAuditLog.CUSTOMER_PLAN_CREATED).values_list(
'extra_data', flat=True).first())['automanage_licenses'], True)
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.STANDARD)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
with patch('corporate.views.timezone_now', return_value=self.now):
response = self.client_get("/billing/")
self.assert_not_in_success_response(['Pay annually'], response)
for substring in [
'Zulip Standard', 'Free Trial', str(self.seat_count),
'You are using', f'{self.seat_count} of {self.seat_count} licenses',
'Your plan will be upgraded to', 'March 2, 2012', f'${80 * self.seat_count}.00',
'Visa ending in 4242',
'Update card']:
self.assert_in_response(substring, response)
self.assert_not_in_success_response(["Go to your Zulip organization"], response)
with patch('corporate.views.timezone_now', return_value=self.now):
response = self.client_get("/billing/?onboarding=true")
self.assert_in_success_response(["Go to your Zulip organization"], response)
with patch('corporate.lib.stripe.get_latest_seat_count', return_value=12):
update_license_ledger_if_needed(realm, self.now)
self.assertEqual(
LicenseLedger.objects.order_by('-id').values_list('licenses', 'licenses_at_next_renewal').first(),
(12, 12),
)
with patch('corporate.lib.stripe.get_latest_seat_count', return_value=15):
update_license_ledger_if_needed(realm, self.next_month)
self.assertEqual(
LicenseLedger.objects.order_by('-id').values_list('licenses', 'licenses_at_next_renewal').first(),
(15, 15),
)
invoice_plans_as_needed(self.next_month)
invoices = stripe.Invoice.list(customer=stripe_customer.id)
self.assertEqual(len(invoices), 0)
customer_plan = CustomerPlan.objects.get(customer=customer)
self.assertEqual(customer_plan.status, CustomerPlan.FREE_TRIAL)
self.assertEqual(customer_plan.next_invoice_date, free_trial_end_date)
invoice_plans_as_needed(free_trial_end_date)
customer_plan.refresh_from_db()
realm.refresh_from_db()
self.assertEqual(customer_plan.status, CustomerPlan.ACTIVE)
self.assertEqual(customer_plan.next_invoice_date, add_months(free_trial_end_date, 1))
self.assertEqual(realm.plan_type, Realm.STANDARD)
invoices = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer.id)]
self.assertEqual(len(invoices), 1)
invoice_params = {
"amount_due": 15 * 80 * 100, "amount_paid": 0, "amount_remaining": 15 * 80 * 100,
"auto_advance": True, "billing": "charge_automatically", "collection_method": "charge_automatically",
"customer_email": self.example_email("hamlet"), "discount": None, "paid": False, "status": "open",
"total": 15 * 80 * 100,
}
for key, value in invoice_params.items():
self.assertEqual(invoices[0].get(key), value)
invoice_items = [invoice_item for invoice_item in invoices[0].get("lines")]
self.assertEqual(len(invoice_items), 1)
invoice_item_params = {
"amount": 15 * 80 * 100, "description": "Zulip Standard - renewal",
"plan": None, "quantity": 15, "subscription": None, "discountable": False,
"period": {
"start": datetime_to_timestamp(free_trial_end_date),
"end": datetime_to_timestamp(add_months(free_trial_end_date, 12)),
},
}
for key, value in invoice_item_params.items():
self.assertEqual(invoice_items[0][key], value)
invoice_plans_as_needed(add_months(free_trial_end_date, 1))
invoices = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer.id)]
self.assertEqual(len(invoices), 1)
with patch('corporate.lib.stripe.get_latest_seat_count', return_value=19):
update_license_ledger_if_needed(realm, add_months(free_trial_end_date, 10))
self.assertEqual(
LicenseLedger.objects.order_by('-id').values_list('licenses', 'licenses_at_next_renewal').first(),
(19, 19),
)
invoice_plans_as_needed(add_months(free_trial_end_date, 10))
invoices = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer.id)]
self.assertEqual(len(invoices), 2)
invoice_params = {
"amount_due": 5172, "auto_advance": True, "billing": "charge_automatically",
"collection_method": "charge_automatically", "customer_email": "hamlet@zulip.com",
}
invoice_items = [invoice_item for invoice_item in invoices[0].get("lines")]
self.assertEqual(len(invoice_items), 1)
invoice_item_params = {
"amount": 5172, "description": "Additional license (Jan 2, 2013 - Mar 2, 2013)",
"discountable": False, "quantity": 4,
"period": {
"start": datetime_to_timestamp(add_months(free_trial_end_date, 10)),
"end": datetime_to_timestamp(add_months(free_trial_end_date, 12)),
},
}
invoice_plans_as_needed(add_months(free_trial_end_date, 12))
invoices = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer.id)]
self.assertEqual(len(invoices), 3)
@mock_stripe(tested_timestamp_fields=["created"])
def test_free_trial_upgrade_by_invoice(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
free_trial_end_date = self.now + timedelta(days=60)
with self.settings(FREE_TRIAL_DAYS=60):
response = self.client_get("/upgrade/")
self.assert_in_success_response(['Pay annually', 'Free Trial', '60 day'], response)
self.assertNotEqual(user.realm.plan_type, Realm.STANDARD)
self.assertFalse(Customer.objects.filter(realm=user.realm).exists())
with patch('corporate.lib.stripe.timezone_now', return_value=self.now):
self.upgrade(invoice=True)
stripe_customer = stripe_get_customer(Customer.objects.get(realm=user.realm).stripe_customer_id)
self.assertEqual(stripe_customer.discount, None)
self.assertEqual(stripe_customer.email, user.email)
metadata_dict = dict(stripe_customer.metadata)
self.assertEqual(metadata_dict['realm_str'], 'zulip')
try:
int(metadata_dict['realm_id'])
except ValueError: # nocoverage
raise AssertionError("realm_id is not a number")
stripe_invoices = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer.id)]
self.assertEqual(len(stripe_invoices), 0)
customer = Customer.objects.get(stripe_customer_id=stripe_customer.id, realm=user.realm)
plan = CustomerPlan.objects.get(
customer=customer, automanage_licenses=False,
price_per_license=8000, fixed_price=None, discount=None, billing_cycle_anchor=self.now,
billing_schedule=CustomerPlan.ANNUAL, invoiced_through=LicenseLedger.objects.first(),
next_invoice_date=free_trial_end_date, tier=CustomerPlan.STANDARD,
status=CustomerPlan.FREE_TRIAL)
LicenseLedger.objects.get(
plan=plan, is_renewal=True, event_time=self.now, licenses=123,
licenses_at_next_renewal=123)
audit_log_entries = list(RealmAuditLog.objects.filter(acting_user=user)
.values_list('event_type', 'event_time').order_by('id'))
self.assertEqual(audit_log_entries, [
(RealmAuditLog.STRIPE_CUSTOMER_CREATED, timestamp_to_datetime(stripe_customer.created)),
(RealmAuditLog.CUSTOMER_PLAN_CREATED, self.now),
# TODO: Check for REALM_PLAN_TYPE_CHANGED
# (RealmAuditLog.REALM_PLAN_TYPE_CHANGED, Kandra()),
])
self.assertEqual(orjson.loads(RealmAuditLog.objects.filter(
event_type=RealmAuditLog.CUSTOMER_PLAN_CREATED).values_list(
'extra_data', flat=True).first())['automanage_licenses'], False)
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.STANDARD)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
with patch('corporate.views.timezone_now', return_value=self.now):
response = self.client_get("/billing/")
self.assert_not_in_success_response(['Pay annually'], response)
for substring in [
'Zulip Standard', 'Free Trial', str(self.seat_count),
'You are using', f'{self.seat_count} of {123} licenses',
'Your plan will be upgraded to', 'March 2, 2012',
f'{80 * 123:,.2f}', 'Billed by invoice',
]:
self.assert_in_response(substring, response)
with patch('corporate.lib.stripe.invoice_plan') as mocked:
invoice_plans_as_needed(self.next_month)
mocked.assert_not_called()
mocked.reset_mock()
customer_plan = CustomerPlan.objects.get(customer=customer)
self.assertEqual(customer_plan.status, CustomerPlan.FREE_TRIAL)
self.assertEqual(customer_plan.next_invoice_date, free_trial_end_date)
invoice_plans_as_needed(free_trial_end_date)
customer_plan.refresh_from_db()
realm.refresh_from_db()
self.assertEqual(customer_plan.status, CustomerPlan.ACTIVE)
self.assertEqual(customer_plan.next_invoice_date, add_months(free_trial_end_date, 12))
self.assertEqual(realm.plan_type, Realm.STANDARD)
invoices = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer.id)]
self.assertEqual(len(invoices), 1)
invoice_params = {
"amount_due": 123 * 80 * 100, "amount_paid": 0, "amount_remaining": 123 * 80 * 100,
"auto_advance": True, "billing": "send_invoice", "collection_method": "send_invoice",
"customer_email": self.example_email("hamlet"), "discount": None, "paid": False, "status": "open",
"total": 123 * 80 * 100,
}
for key, value in invoice_params.items():
self.assertEqual(invoices[0].get(key), value)
invoice_items = [invoice_item for invoice_item in invoices[0].get("lines")]
self.assertEqual(len(invoice_items), 1)
invoice_item_params = {
"amount": 123 * 80 * 100, "description": "Zulip Standard - renewal",
"plan": None, "quantity": 123, "subscription": None, "discountable": False,
"period": {
"start": datetime_to_timestamp(free_trial_end_date),
"end": datetime_to_timestamp(add_months(free_trial_end_date, 12)),
},
}
for key, value in invoice_item_params.items():
self.assertEqual(invoice_items[0][key], value)
invoice_plans_as_needed(add_months(free_trial_end_date, 1))
invoices = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer.id)]
self.assertEqual(len(invoices), 1)
invoice_plans_as_needed(add_months(free_trial_end_date, 10))
invoices = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer.id)]
self.assertEqual(len(invoices), 1)
invoice_plans_as_needed(add_months(free_trial_end_date, 12))
invoices = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer.id)]
self.assertEqual(len(invoices), 2)
@mock_stripe()
def test_billing_page_permissions(self, *mocks: Mock) -> None:
# Guest users can't access /upgrade page
self.login_user(self.example_user('polonius'))
response = self.client_get("/upgrade/", follow=True)
self.assertEqual(response.status_code, 404)
# Check that non-admins can access /upgrade via /billing, when there is no Customer object
self.login_user(self.example_user('hamlet'))
response = self.client_get("/billing/")
self.assertEqual(response.status_code, 302)
self.assertEqual('/upgrade/', response.url)
# Check that non-admins can sign up and pay
self.upgrade()
# Check that the non-admin hamlet can still access /billing
response = self.client_get("/billing/")
self.assert_in_success_response(["Your current plan is"], response)
# Check realm owners can access billing, even though they are not a billing admin
desdemona = self.example_user('desdemona')
desdemona.role = UserProfile.ROLE_REALM_OWNER
desdemona.save(update_fields=["role"])
self.login_user(self.example_user('desdemona'))
response = self.client_get("/billing/")
self.assert_in_success_response(["Your current plan is"], response)
# Check that member who is not a billing admin does not have access
self.login_user(self.example_user('cordelia'))
response = self.client_get("/billing/")
self.assert_in_success_response(["You must be an organization owner or a billing administrator"], response)
@mock_stripe(tested_timestamp_fields=["created"])
def test_upgrade_by_card_with_outdated_seat_count(self, *mocks: Mock) -> None:
hamlet = self.example_user('hamlet')
self.login_user(hamlet)
new_seat_count = 23
# Change the seat count while the user is going through the upgrade flow
with patch('corporate.lib.stripe.get_latest_seat_count', return_value=new_seat_count):
self.upgrade()
stripe_customer_id = Customer.objects.first().stripe_customer_id
# Check that the Charge used the old quantity, not new_seat_count
self.assertEqual(8000 * self.seat_count,
[charge for charge in stripe.Charge.list(customer=stripe_customer_id)][0].amount)
# Check that the invoice has a credit for the old amount and a charge for the new one
stripe_invoice = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer_id)][0]
self.assertEqual([8000 * new_seat_count, -8000 * self.seat_count],
[item.amount for item in stripe_invoice.lines])
# Check LicenseLedger has the new amount
self.assertEqual(LicenseLedger.objects.first().licenses, new_seat_count)
self.assertEqual(LicenseLedger.objects.first().licenses_at_next_renewal, new_seat_count)
@mock_stripe()
def test_upgrade_where_first_card_fails(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
# From https://stripe.com/docs/testing#cards: Attaching this card to
# a Customer object succeeds, but attempts to charge the customer fail.
with patch("corporate.lib.stripe.billing_logger.error") as mock_billing_logger:
self.upgrade(stripe_token=stripe_create_token('4000000000000341').id)
mock_billing_logger.assert_called()
# Check that we created a Customer object but no CustomerPlan
stripe_customer_id = Customer.objects.get(realm=get_realm('zulip')).stripe_customer_id
self.assertFalse(CustomerPlan.objects.exists())
# Check that we created a Customer in stripe, a failed Charge, and no Invoices or Invoice Items
self.assertTrue(stripe_get_customer(stripe_customer_id))
stripe_charges = [charge for charge in stripe.Charge.list(customer=stripe_customer_id)]
self.assertEqual(len(stripe_charges), 1)
self.assertEqual(stripe_charges[0].failure_code, 'card_declined')
# TODO: figure out what these actually are
self.assertFalse(stripe.Invoice.list(customer=stripe_customer_id))
self.assertFalse(stripe.InvoiceItem.list(customer=stripe_customer_id))
# Check that we correctly populated RealmAuditLog
audit_log_entries = list(RealmAuditLog.objects.filter(acting_user=user)
.values_list('event_type', flat=True).order_by('id'))
self.assertEqual(audit_log_entries, [RealmAuditLog.STRIPE_CUSTOMER_CREATED,
RealmAuditLog.STRIPE_CARD_CHANGED])
# Check that we did not update Realm
realm = get_realm("zulip")
self.assertNotEqual(realm.plan_type, Realm.STANDARD)
# Check that we still get redirected to /upgrade
response = self.client_get("/billing/")
self.assertEqual(response.status_code, 302)
self.assertEqual('/upgrade/', response.url)
# Try again, with a valid card, after they added a few users
with patch('corporate.lib.stripe.get_latest_seat_count', return_value=23):
with patch('corporate.views.get_latest_seat_count', return_value=23):
self.upgrade()
customer = Customer.objects.get(realm=get_realm('zulip'))
# It's impossible to create two Customers, but check that we didn't
# change stripe_customer_id
self.assertEqual(customer.stripe_customer_id, stripe_customer_id)
# Check that we successfully added a CustomerPlan, and have the right number of licenses
plan = CustomerPlan.objects.get(customer=customer)
ledger_entry = LicenseLedger.objects.get(plan=plan)
self.assertEqual(ledger_entry.licenses, 23)
self.assertEqual(ledger_entry.licenses_at_next_renewal, 23)
# Check the Charges and Invoices in Stripe
self.assertEqual(8000 * 23, [charge for charge in
stripe.Charge.list(customer=stripe_customer_id)][0].amount)
stripe_invoice = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer_id)][0]
self.assertEqual([8000 * 23, -8000 * 23],
[item.amount for item in stripe_invoice.lines])
# Check that we correctly populated RealmAuditLog
audit_log_entries = list(RealmAuditLog.objects.filter(acting_user=user)
.values_list('event_type', flat=True).order_by('id'))
# TODO: Test for REALM_PLAN_TYPE_CHANGED as the last entry
self.assertEqual(audit_log_entries, [RealmAuditLog.STRIPE_CUSTOMER_CREATED,
RealmAuditLog.STRIPE_CARD_CHANGED,
RealmAuditLog.STRIPE_CARD_CHANGED,
RealmAuditLog.CUSTOMER_PLAN_CREATED])
# Check that we correctly updated Realm
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.STANDARD)
# Check that we can no longer access /upgrade
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 302)
self.assertEqual('/billing/', response.url)
def test_upgrade_with_tampered_seat_count(self) -> None:
hamlet = self.example_user('hamlet')
self.login_user(hamlet)
response = self.upgrade(talk_to_stripe=False, salt='badsalt')
self.assert_json_error_contains(response, "Something went wrong. Please contact")
self.assertEqual(orjson.loads(response.content)['error_description'], 'tampered seat count')
def test_upgrade_race_condition(self) -> None:
hamlet = self.example_user('hamlet')
self.login_user(hamlet)
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
with patch("corporate.lib.stripe.billing_logger.warning") as mock_billing_logger:
with self.assertRaises(BillingError) as context:
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
self.assertEqual('subscribing with existing subscription', context.exception.description)
mock_billing_logger.assert_called()
def test_check_upgrade_parameters(self) -> None:
# Tests all the error paths except 'not enough licenses'
def check_error(error_description: str, upgrade_params: Mapping[str, Any],
del_args: Sequence[str] = []) -> None:
response = self.upgrade(talk_to_stripe=False, del_args=del_args, **upgrade_params)
self.assert_json_error_contains(response, "Something went wrong. Please contact")
self.assertEqual(orjson.loads(response.content)['error_description'], error_description)
hamlet = self.example_user('hamlet')
self.login_user(hamlet)
check_error('unknown billing_modality', {'billing_modality': 'invalid'})
check_error('unknown schedule', {'schedule': 'invalid'})
check_error('unknown license_management', {'license_management': 'invalid'})
check_error('autopay with no card', {}, del_args=['stripe_token'])
def test_upgrade_license_counts(self) -> None:
def check_min_licenses_error(invoice: bool, licenses: Optional[int], min_licenses_in_response: int,
upgrade_params: Dict[str, Any]={}) -> None:
if licenses is None:
del_args = ['licenses']
else:
del_args = []
upgrade_params['licenses'] = licenses
response = self.upgrade(invoice=invoice, talk_to_stripe=False,
del_args=del_args, **upgrade_params)
self.assert_json_error_contains(response, f"at least {min_licenses_in_response} users")
self.assertEqual(orjson.loads(response.content)['error_description'], 'not enough licenses')
def check_max_licenses_error(licenses: int) -> None:
response = self.upgrade(invoice=True, talk_to_stripe=False,
licenses=licenses)
self.assert_json_error_contains(response, f"with more than {MAX_INVOICED_LICENSES} licenses")
self.assertEqual(orjson.loads(response.content)['error_description'], 'too many licenses')
def check_success(invoice: bool, licenses: Optional[int], upgrade_params: Dict[str, Any]={}) -> None:
if licenses is None:
del_args = ['licenses']
else:
del_args = []
upgrade_params['licenses'] = licenses
with patch('corporate.views.process_initial_upgrade'):
response = self.upgrade(invoice=invoice, talk_to_stripe=False,
del_args=del_args, **upgrade_params)
self.assert_json_success(response)
hamlet = self.example_user('hamlet')
self.login_user(hamlet)
# Autopay with licenses < seat count
check_min_licenses_error(False, self.seat_count - 1, self.seat_count, {'license_management': 'manual'})
# Autopay with not setting licenses
check_min_licenses_error(False, None, self.seat_count, {'license_management': 'manual'})
# Invoice with licenses < MIN_INVOICED_LICENSES
check_min_licenses_error(True, MIN_INVOICED_LICENSES - 1, MIN_INVOICED_LICENSES)
# Invoice with licenses < seat count
with patch("corporate.views.MIN_INVOICED_LICENSES", 3):
check_min_licenses_error(True, 4, self.seat_count)
# Invoice with not setting licenses
check_min_licenses_error(True, None, MIN_INVOICED_LICENSES)
# Invoice exceeding max licenses
check_max_licenses_error(MAX_INVOICED_LICENSES + 1)
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=MAX_INVOICED_LICENSES + 5):
check_max_licenses_error(MAX_INVOICED_LICENSES + 5)
# Autopay with automatic license_management
check_success(False, None)
# Autopay with automatic license_management, should just ignore the licenses entry
check_success(False, self.seat_count)
# Autopay
check_success(False, self.seat_count, {'license_management': 'manual'})
# Autopay has no limit on max licenses
check_success(False, MAX_INVOICED_LICENSES + 1, {'license_management': 'manual'})
# Invoice
check_success(True, self.seat_count + MIN_INVOICED_LICENSES)
# Invoice
check_success(True, MAX_INVOICED_LICENSES)
@patch("corporate.lib.stripe.billing_logger.error")
def test_upgrade_with_uncaught_exception(self, mock_: Mock) -> None:
hamlet = self.example_user('hamlet')
self.login_user(hamlet)
with patch("corporate.views.process_initial_upgrade", side_effect=Exception):
response = self.upgrade(talk_to_stripe=False)
self.assert_json_error_contains(response, "Something went wrong. Please contact desdemona+admin@zulip.com.")
self.assertEqual(orjson.loads(response.content)['error_description'], 'uncaught exception during upgrade')
def test_request_sponsorship(self) -> None:
user = self.example_user("hamlet")
self.assertIsNone(get_customer_by_realm(user.realm))
self.login_user(user)
data = {
"organization-type": orjson.dumps("Open-source").decode(),
"website": orjson.dumps("https://infinispan.org/").decode(),
"description": orjson.dumps("Infinispan is a distributed in-memory key/value data store with optional schema.").decode(),
}
response = self.client_post("/json/billing/sponsorship", data)
self.assert_json_success(response)
customer = get_customer_by_realm(user.realm)
assert(customer is not None)
self.assertEqual(customer.sponsorship_pending, True)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
for message in outbox:
self.assertEqual(len(message.to), 1)
self.assertEqual(message.to[0], "desdemona+admin@zulip.com")
self.assertEqual(message.subject, "Sponsorship request (Open-source) for zulip")
self.assertEqual(message.reply_to, ['hamlet@zulip.com'])
self.assertIn('Zulip sponsorship <noreply-', message.from_email)
self.assertIn("Requested by: King Hamlet (Member)", message.body)
self.assertIn("Support URL: http://zulip.testserver/activity/support?q=zulip", message.body)
self.assertIn("Website: https://infinispan.org", message.body)
self.assertIn("Organization type: Open-source", message.body)
self.assertIn("Description:\nInfinispan is a distributed in-memory", message.body)
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/billing/")
response = self.client_get("/billing/")
self.assert_in_success_response(["Your organization has requested sponsored or discounted hosting."], response)
self.login_user(self.example_user("othello"))
response = self.client_get("/billing/")
self.assert_in_success_response(["You must be an organization owner or a billing administrator to view this page."], response)
def test_redirect_for_billing_home(self) -> None:
user = self.example_user("iago")
self.login_user(user)
# No Customer yet; check that we are redirected to /upgrade
response = self.client_get("/billing/")
self.assertEqual(response.status_code, 302)
self.assertEqual('/upgrade/', response.url)
# Customer, but no CustomerPlan; check that we are still redirected to /upgrade
Customer.objects.create(realm=user.realm, stripe_customer_id='cus_123')
response = self.client_get("/billing/")
self.assertEqual(response.status_code, 302)
self.assertEqual('/upgrade/', response.url)
def test_redirect_for_upgrade_page(self) -> None:
user = self.example_user("iago")
self.login_user(user)
# No Customer yet;
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 200)
# Customer, but no CustomerPlan;
customer = Customer.objects.create(realm=user.realm, stripe_customer_id='cus_123')
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 200)
CustomerPlan.objects.create(customer=customer, billing_cycle_anchor=timezone_now(),
billing_schedule=CustomerPlan.ANNUAL, tier=CustomerPlan.STANDARD)
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/billing/")
with self.settings(FREE_TRIAL_DAYS=30):
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/billing/")
response = self.client_get("/upgrade/?onboarding=true")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/billing/?onboarding=true")
def test_get_latest_seat_count(self) -> None:
realm = get_realm("zulip")
initial_count = get_latest_seat_count(realm)
user1 = UserProfile.objects.create(realm=realm, email='user1@zulip.com',
delivery_email='user1@zulip.com')
user2 = UserProfile.objects.create(realm=realm, email='user2@zulip.com',
delivery_email='user2@zulip.com')
self.assertEqual(get_latest_seat_count(realm), initial_count + 2)
# Test that bots aren't counted
user1.is_bot = True
user1.save(update_fields=['is_bot'])
self.assertEqual(get_latest_seat_count(realm), initial_count + 1)
# Test that inactive users aren't counted
do_deactivate_user(user2)
self.assertEqual(get_latest_seat_count(realm), initial_count)
# Test guests
# Adding a guest to a realm with a lot of members shouldn't change anything
UserProfile.objects.create(realm=realm, email='user3@zulip.com', delivery_email='user3@zulip.com',
role=UserProfile.ROLE_GUEST)
self.assertEqual(get_latest_seat_count(realm), initial_count)
# Test 1 member and 5 guests
realm = Realm.objects.create(string_id='second', name='second')
UserProfile.objects.create(realm=realm, email='member@second.com',
delivery_email='member@second.com')
for i in range(5):
UserProfile.objects.create(realm=realm, email=f'guest{i}@second.com',
delivery_email=f'guest{i}@second.com',
role=UserProfile.ROLE_GUEST)
self.assertEqual(get_latest_seat_count(realm), 1)
# Test 1 member and 6 guests
UserProfile.objects.create(realm=realm, email='guest5@second.com',
delivery_email='guest5@second.com',
role=UserProfile.ROLE_GUEST)
self.assertEqual(get_latest_seat_count(realm), 2)
def test_sign_string(self) -> None:
string = "abc"
signed_string, salt = sign_string(string)
self.assertEqual(string, unsign_string(signed_string, salt))
with self.assertRaises(signing.BadSignature):
unsign_string(signed_string, "randomsalt")
# This tests both the payment method string, and also is a very basic
# test that the various upgrade paths involving non-standard payment
# histories don't throw errors
@mock_stripe()
def test_payment_method_string(self, *mocks: Mock) -> None:
pass
# If you signup with a card, we should show your card as the payment method
# Already tested in test_initial_upgrade
# If you pay by invoice, your payment method should be
# "Billed by invoice", even if you have a card on file
# user = self.example_user("hamlet")
# do_create_stripe_customer(user, stripe_create_token().id)
# self.login_user(user)
# self.upgrade(invoice=True)
# stripe_customer = stripe_get_customer(Customer.objects.get(realm=user.realm).stripe_customer_id)
# self.assertEqual('Billed by invoice', payment_method_string(stripe_customer))
# If you signup with a card and then downgrade, we still have your
# card on file, and should show it
# TODO
@mock_stripe()
def test_attach_discount_to_realm(self, *mocks: Mock) -> None:
# Attach discount before Stripe customer exists
user = self.example_user('hamlet')
attach_discount_to_realm(user.realm, Decimal(85))
self.login_user(user)
# Check that the discount appears in page_params
self.assert_in_success_response(['85'], self.client_get("/upgrade/"))
# Check that the customer was charged the discounted amount
self.upgrade()
stripe_customer_id = Customer.objects.values_list('stripe_customer_id', flat=True).first()
self.assertEqual(1200 * self.seat_count,
[charge for charge in stripe.Charge.list(customer=stripe_customer_id)][0].amount)
stripe_invoice = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer_id)][0]
self.assertEqual([1200 * self.seat_count, -1200 * self.seat_count],
[item.amount for item in stripe_invoice.lines])
# Check CustomerPlan reflects the discount
plan = CustomerPlan.objects.get(price_per_license=1200, discount=Decimal(85))
# Attach discount to existing Stripe customer
plan.status = CustomerPlan.ENDED
plan.save(update_fields=['status'])
attach_discount_to_realm(user.realm, Decimal(25))
process_initial_upgrade(user, self.seat_count, True, CustomerPlan.ANNUAL, stripe_create_token().id)
self.assertEqual(6000 * self.seat_count,
[charge for charge in stripe.Charge.list(customer=stripe_customer_id)][0].amount)
stripe_invoice = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer_id)][0]
self.assertEqual([6000 * self.seat_count, -6000 * self.seat_count],
[item.amount for item in stripe_invoice.lines])
plan = CustomerPlan.objects.get(price_per_license=6000, discount=Decimal(25))
def test_get_discount_for_realm(self) -> None:
user = self.example_user('hamlet')
self.assertEqual(get_discount_for_realm(user.realm), None)
attach_discount_to_realm(user.realm, Decimal(85))
self.assertEqual(get_discount_for_realm(user.realm), 85)
@mock_stripe()
def test_replace_payment_source(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
self.upgrade()
# Create an open invoice
stripe_customer_id = Customer.objects.first().stripe_customer_id
stripe.InvoiceItem.create(amount=5000, currency='usd', customer=stripe_customer_id)
stripe_invoice = stripe.Invoice.create(customer=stripe_customer_id)
stripe.Invoice.finalize_invoice(stripe_invoice)
RealmAuditLog.objects.filter(event_type=RealmAuditLog.STRIPE_CARD_CHANGED).delete()
# Replace with an invalid card
stripe_token = stripe_create_token(card_number='4000000000009987').id
with patch("corporate.lib.stripe.billing_logger.error") as mock_billing_logger:
with patch("stripe.Invoice.list") as mock_invoice_list:
response = self.client_post("/json/billing/sources/change",
{'stripe_token': orjson.dumps(stripe_token).decode()})
mock_billing_logger.assert_called()
mock_invoice_list.assert_not_called()
self.assertEqual(orjson.loads(response.content)['error_description'], 'card error')
self.assert_json_error_contains(response, 'Your card was declined')
for stripe_source in stripe_get_customer(stripe_customer_id).sources:
assert isinstance(stripe_source, stripe.Card)
self.assertEqual(stripe_source.last4, '4242')
self.assertFalse(RealmAuditLog.objects.filter(event_type=RealmAuditLog.STRIPE_CARD_CHANGED).exists())
# Replace with a card that's valid, but charging the card fails
stripe_token = stripe_create_token(card_number='4000000000000341').id
with patch("corporate.lib.stripe.billing_logger.error") as mock_billing_logger:
response = self.client_post("/json/billing/sources/change",
{'stripe_token': orjson.dumps(stripe_token).decode()})
mock_billing_logger.assert_called()
self.assertEqual(orjson.loads(response.content)['error_description'], 'card error')
self.assert_json_error_contains(response, 'Your card was declined')
for stripe_source in stripe_get_customer(stripe_customer_id).sources:
assert isinstance(stripe_source, stripe.Card)
self.assertEqual(stripe_source.last4, '0341')
self.assertEqual(len(list(stripe.Invoice.list(customer=stripe_customer_id, status='open'))), 1)
self.assertEqual(1, RealmAuditLog.objects.filter(
event_type=RealmAuditLog.STRIPE_CARD_CHANGED).count())
# Replace with a valid card
stripe_token = stripe_create_token(card_number='5555555555554444').id
response = self.client_post("/json/billing/sources/change",
{'stripe_token': orjson.dumps(stripe_token).decode()})
self.assert_json_success(response)
number_of_sources = 0
for stripe_source in stripe_get_customer(stripe_customer_id).sources:
assert isinstance(stripe_source, stripe.Card)
self.assertEqual(stripe_source.last4, '4444')
number_of_sources += 1
# Verify that we replaced the previous card, rather than adding a new one
self.assertEqual(number_of_sources, 1)
# Ideally we'd also test that we don't pay invoices with billing=='send_invoice'
for stripe_invoice in stripe.Invoice.list(customer=stripe_customer_id):
self.assertEqual(stripe_invoice.status, 'paid')
self.assertEqual(2, RealmAuditLog.objects.filter(
event_type=RealmAuditLog.STRIPE_CARD_CHANGED).count())
@patch("corporate.lib.stripe.billing_logger.info")
def test_downgrade(self, mock_: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
response = self.client_post("/json/billing/plan/change",
{'status': CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE})
self.assert_json_success(response)
# Verify that we still write LicenseLedger rows during the remaining
# part of the cycle
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=20):
update_license_ledger_if_needed(user.realm, self.now)
self.assertEqual(LicenseLedger.objects.order_by('-id').values_list(
'licenses', 'licenses_at_next_renewal').first(), (20, 20))
# Verify that we invoice them for the additional users
from stripe import Invoice
Invoice.create = lambda **args: None # type: ignore[assignment] # cleaner than mocking
Invoice.finalize_invoice = lambda *args: None # type: ignore[assignment] # cleaner than mocking
with patch("stripe.InvoiceItem.create") as mocked:
invoice_plans_as_needed(self.next_month)
mocked.assert_called_once()
mocked.reset_mock()
# Check that we downgrade properly if the cycle is over
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=30):
update_license_ledger_if_needed(user.realm, self.next_year)
self.assertEqual(get_realm('zulip').plan_type, Realm.LIMITED)
self.assertEqual(CustomerPlan.objects.first().status, CustomerPlan.ENDED)
self.assertEqual(LicenseLedger.objects.order_by('-id').values_list(
'licenses', 'licenses_at_next_renewal').first(), (20, 20))
# Verify that we don't write LicenseLedger rows once we've downgraded
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=40):
update_license_ledger_if_needed(user.realm, self.next_year)
self.assertEqual(LicenseLedger.objects.order_by('-id').values_list(
'licenses', 'licenses_at_next_renewal').first(), (20, 20))
# Verify that we call invoice_plan once more after cycle end but
# don't invoice them for users added after the cycle end
self.assertIsNotNone(CustomerPlan.objects.first().next_invoice_date)
with patch("stripe.InvoiceItem.create") as mocked:
invoice_plans_as_needed(self.next_year + timedelta(days=32))
mocked.assert_not_called()
mocked.reset_mock()
# Check that we updated next_invoice_date in invoice_plan
self.assertIsNone(CustomerPlan.objects.first().next_invoice_date)
# Check that we don't call invoice_plan after that final call
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=50):
update_license_ledger_if_needed(user.realm, self.next_year + timedelta(days=80))
with patch("corporate.lib.stripe.invoice_plan") as mocked:
invoice_plans_as_needed(self.next_year + timedelta(days=400))
mocked.assert_not_called()
@mock_stripe()
@patch("corporate.lib.stripe.billing_logger.info")
def test_switch_from_monthly_plan_to_annual_plan_for_automatic_license_management(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
with patch('corporate.lib.stripe.timezone_now', return_value=self.now):
self.upgrade(schedule='monthly')
monthly_plan = get_current_plan_by_realm(user.realm)
assert(monthly_plan is not None)
self.assertEqual(monthly_plan.automanage_licenses, True)
self.assertEqual(monthly_plan.billing_schedule, CustomerPlan.MONTHLY)
response = self.client_post("/json/billing/plan/change",
{'status': CustomerPlan.SWITCH_TO_ANNUAL_AT_END_OF_CYCLE})
self.assert_json_success(response)
monthly_plan.refresh_from_db()
self.assertEqual(monthly_plan.status, CustomerPlan.SWITCH_TO_ANNUAL_AT_END_OF_CYCLE)
with patch('corporate.views.timezone_now', return_value=self.now):
response = self.client_get("/billing/")
self.assert_in_success_response(["be switched from monthly to annual billing on <strong>February 2, 2012"], response)
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=20):
update_license_ledger_if_needed(user.realm, self.now)
self.assertEqual(LicenseLedger.objects.filter(plan=monthly_plan).count(), 2)
self.assertEqual(LicenseLedger.objects.order_by('-id').values_list(
'licenses', 'licenses_at_next_renewal').first(), (20, 20))
with patch('corporate.lib.stripe.timezone_now', return_value=self.next_month):
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=25):
update_license_ledger_if_needed(user.realm, self.next_month)
self.assertEqual(LicenseLedger.objects.filter(plan=monthly_plan).count(), 2)
customer = get_customer_by_realm(user.realm)
assert(customer is not None)
self.assertEqual(CustomerPlan.objects.filter(customer=customer).count(), 2)
monthly_plan.refresh_from_db()
self.assertEqual(monthly_plan.status, CustomerPlan.ENDED)
self.assertEqual(monthly_plan.next_invoice_date, self.next_month)
annual_plan = get_current_plan_by_realm(user.realm)
assert(annual_plan is not None)
self.assertEqual(annual_plan.status, CustomerPlan.ACTIVE)
self.assertEqual(annual_plan.billing_schedule, CustomerPlan.ANNUAL)
self.assertEqual(annual_plan.invoicing_status, CustomerPlan.INITIAL_INVOICE_TO_BE_SENT)
self.assertEqual(annual_plan.billing_cycle_anchor, self.next_month)
self.assertEqual(annual_plan.next_invoice_date, self.next_month)
self.assertEqual(annual_plan.invoiced_through, None)
annual_ledger_entries = LicenseLedger.objects.filter(plan=annual_plan).order_by('id')
self.assertEqual(len(annual_ledger_entries), 2)
self.assertEqual(annual_ledger_entries[0].is_renewal, True)
self.assertEqual(annual_ledger_entries.values_list('licenses', 'licenses_at_next_renewal')[0], (20, 20))
self.assertEqual(annual_ledger_entries[1].is_renewal, False)
self.assertEqual(annual_ledger_entries.values_list('licenses', 'licenses_at_next_renewal')[1], (25, 25))
audit_log = RealmAuditLog.objects.get(event_type=RealmAuditLog.CUSTOMER_SWITCHED_FROM_MONTHLY_TO_ANNUAL_PLAN)
self.assertEqual(audit_log.realm, user.realm)
self.assertEqual(orjson.loads(audit_log.extra_data)["monthly_plan_id"], monthly_plan.id)
self.assertEqual(orjson.loads(audit_log.extra_data)["annual_plan_id"], annual_plan.id)
invoice_plans_as_needed(self.next_month)
annual_ledger_entries = LicenseLedger.objects.filter(plan=annual_plan).order_by('id')
self.assertEqual(len(annual_ledger_entries), 2)
annual_plan.refresh_from_db()
self.assertEqual(annual_plan.invoicing_status, CustomerPlan.DONE)
self.assertEqual(annual_plan.invoiced_through, annual_ledger_entries[1])
self.assertEqual(annual_plan.billing_cycle_anchor, self.next_month)
self.assertEqual(annual_plan.next_invoice_date, add_months(self.next_month, 1))
monthly_plan.refresh_from_db()
self.assertEqual(monthly_plan.next_invoice_date, None)
invoices = [invoice for invoice in stripe.Invoice.list(customer=customer.stripe_customer_id)]
self.assertEqual(len(invoices), 3)
annual_plan_invoice_items = [invoice_item for invoice_item in invoices[0].get("lines")]
self.assertEqual(len(annual_plan_invoice_items), 2)
annual_plan_invoice_item_params = {
"amount": 5 * 80 * 100,
"description": "Additional license (Feb 2, 2012 - Feb 2, 2013)",
"plan": None, "quantity": 5, "subscription": None, "discountable": False,
"period": {
"start": datetime_to_timestamp(self.next_month),
"end": datetime_to_timestamp(add_months(self.next_month, 12))
},
}
for key, value in annual_plan_invoice_item_params.items():
self.assertEqual(annual_plan_invoice_items[0][key], value)
annual_plan_invoice_item_params = {
"amount": 20 * 80 * 100, "description": "Zulip Standard - renewal",
"plan": None, "quantity": 20, "subscription": None, "discountable": False,
"period": {
"start": datetime_to_timestamp(self.next_month),
"end": datetime_to_timestamp(add_months(self.next_month, 12))
},
}
for key, value in annual_plan_invoice_item_params.items():
self.assertEqual(annual_plan_invoice_items[1][key], value)
monthly_plan_invoice_items = [invoice_item for invoice_item in invoices[1].get("lines")]
self.assertEqual(len(monthly_plan_invoice_items), 1)
monthly_plan_invoice_item_params = {
"amount": 14 * 8 * 100,
"description": "Additional license (Jan 2, 2012 - Feb 2, 2012)",
"plan": None, "quantity": 14, "subscription": None, "discountable": False,
"period": {
"start": datetime_to_timestamp(self.now),
"end": datetime_to_timestamp(self.next_month)
},
}
for key, value in monthly_plan_invoice_item_params.items():
self.assertEqual(monthly_plan_invoice_items[0][key], value)
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=30):
update_license_ledger_if_needed(user.realm, add_months(self.next_month, 1))
invoice_plans_as_needed(add_months(self.next_month, 1))
invoices = [invoice for invoice in stripe.Invoice.list(customer=customer.stripe_customer_id)]
self.assertEqual(len(invoices), 4)
monthly_plan_invoice_items = [invoice_item for invoice_item in invoices[0].get("lines")]
self.assertEqual(len(monthly_plan_invoice_items), 1)
monthly_plan_invoice_item_params = {
"amount": 5 * 7366,
"description": "Additional license (Mar 2, 2012 - Feb 2, 2013)",
"plan": None, "quantity": 5, "subscription": None, "discountable": False,
"period": {
"start": datetime_to_timestamp(add_months(self.next_month, 1)),
"end": datetime_to_timestamp(add_months(self.next_month, 12))
},
}
for key, value in monthly_plan_invoice_item_params.items():
self.assertEqual(monthly_plan_invoice_items[0][key], value)
invoice_plans_as_needed(add_months(self.now, 13))
invoices = [invoice for invoice in stripe.Invoice.list(customer=customer.stripe_customer_id)]
self.assertEqual(len(invoices), 5)
annual_plan_invoice_items = [invoice_item for invoice_item in invoices[0].get("lines")]
self.assertEqual(len(annual_plan_invoice_items), 1)
annual_plan_invoice_item_params = {
"amount": 30 * 80 * 100,
"description": "Zulip Standard - renewal",
"plan": None, "quantity": 30, "subscription": None, "discountable": False,
"period": {
"start": datetime_to_timestamp(add_months(self.next_month, 12)),
"end": datetime_to_timestamp(add_months(self.next_month, 24))
},
}
for key, value in annual_plan_invoice_item_params.items():
self.assertEqual(annual_plan_invoice_items[0][key], value)
@mock_stripe()
@patch("corporate.lib.stripe.billing_logger.info")
def test_switch_from_monthly_plan_to_annual_plan_for_manual_license_management(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
num_licenses = 35
self.login_user(user)
with patch('corporate.lib.stripe.timezone_now', return_value=self.now):
self.upgrade(schedule='monthly', license_management='manual', licenses=num_licenses)
monthly_plan = get_current_plan_by_realm(user.realm)
assert(monthly_plan is not None)
self.assertEqual(monthly_plan.automanage_licenses, False)
self.assertEqual(monthly_plan.billing_schedule, CustomerPlan.MONTHLY)
response = self.client_post("/json/billing/plan/change",
{'status': CustomerPlan.SWITCH_TO_ANNUAL_AT_END_OF_CYCLE})
self.assert_json_success(response)
monthly_plan.refresh_from_db()
self.assertEqual(monthly_plan.status, CustomerPlan.SWITCH_TO_ANNUAL_AT_END_OF_CYCLE)
with patch('corporate.views.timezone_now', return_value=self.now):
response = self.client_get("/billing/")
self.assert_in_success_response(["be switched from monthly to annual billing on <strong>February 2, 2012"], response)
invoice_plans_as_needed(self.next_month)
self.assertEqual(LicenseLedger.objects.filter(plan=monthly_plan).count(), 1)
customer = get_customer_by_realm(user.realm)
assert(customer is not None)
self.assertEqual(CustomerPlan.objects.filter(customer=customer).count(), 2)
monthly_plan.refresh_from_db()
self.assertEqual(monthly_plan.status, CustomerPlan.ENDED)
self.assertEqual(monthly_plan.next_invoice_date, None)
annual_plan = get_current_plan_by_realm(user.realm)
assert(annual_plan is not None)
self.assertEqual(annual_plan.status, CustomerPlan.ACTIVE)
self.assertEqual(annual_plan.billing_schedule, CustomerPlan.ANNUAL)
self.assertEqual(annual_plan.invoicing_status, CustomerPlan.INITIAL_INVOICE_TO_BE_SENT)
self.assertEqual(annual_plan.billing_cycle_anchor, self.next_month)
self.assertEqual(annual_plan.next_invoice_date, self.next_month)
annual_ledger_entries = LicenseLedger.objects.filter(plan=annual_plan).order_by('id')
self.assertEqual(len(annual_ledger_entries), 1)
self.assertEqual(annual_ledger_entries[0].is_renewal, True)
self.assertEqual(annual_ledger_entries.values_list('licenses', 'licenses_at_next_renewal')[0], (num_licenses, num_licenses))
self.assertEqual(annual_plan.invoiced_through, None)
# First call of invoice_plans_as_needed creates the new plan. Second call
# calls invoice_plan on the newly created plan.
invoice_plans_as_needed(self.next_month + timedelta(days=1))
annual_plan.refresh_from_db()
self.assertEqual(annual_plan.invoiced_through, annual_ledger_entries[0])
self.assertEqual(annual_plan.next_invoice_date, add_months(self.next_month, 12))
self.assertEqual(annual_plan.invoicing_status, CustomerPlan.DONE)
invoices = [invoice for invoice in stripe.Invoice.list(customer=customer.stripe_customer_id)]
self.assertEqual(len(invoices), 2)
annual_plan_invoice_items = [invoice_item for invoice_item in invoices[0].get("lines")]
self.assertEqual(len(annual_plan_invoice_items), 1)
annual_plan_invoice_item_params = {
"amount": num_licenses * 80 * 100, "description": "Zulip Standard - renewal",
"plan": None, "quantity": num_licenses, "subscription": None, "discountable": False,
"period": {
"start": datetime_to_timestamp(self.next_month),
"end": datetime_to_timestamp(add_months(self.next_month, 12))
},
}
for key, value in annual_plan_invoice_item_params.items():
self.assertEqual(annual_plan_invoice_items[0][key], value)
with patch('corporate.lib.stripe.invoice_plan') as m:
invoice_plans_as_needed(add_months(self.now, 2))
m.assert_not_called()
invoice_plans_as_needed(add_months(self.now, 13))
invoices = [invoice for invoice in stripe.Invoice.list(customer=customer.stripe_customer_id)]
self.assertEqual(len(invoices), 3)
annual_plan_invoice_items = [invoice_item for invoice_item in invoices[0].get("lines")]
self.assertEqual(len(annual_plan_invoice_items), 1)
annual_plan_invoice_item_params = {
"amount": num_licenses * 80 * 100,
"description": "Zulip Standard - renewal",
"plan": None, "quantity": num_licenses, "subscription": None, "discountable": False,
"period": {
"start": datetime_to_timestamp(add_months(self.next_month, 12)),
"end": datetime_to_timestamp(add_months(self.next_month, 24))
},
}
for key, value in annual_plan_invoice_item_params.items():
self.assertEqual(annual_plan_invoice_items[0][key], value)
@patch("corporate.lib.stripe.billing_logger.info")
def test_reupgrade_after_plan_status_changed_to_downgrade_at_end_of_cycle(self, mock_: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
response = self.client_post("/json/billing/plan/change",
{'status': CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE})
self.assert_json_success(response)
self.assertEqual(CustomerPlan.objects.first().status, CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE)
response = self.client_post("/json/billing/plan/change",
{'status': CustomerPlan.ACTIVE})
self.assert_json_success(response)
self.assertEqual(CustomerPlan.objects.first().status, CustomerPlan.ACTIVE)
@patch("corporate.lib.stripe.billing_logger.info")
@patch("stripe.Invoice.create")
@patch("stripe.Invoice.finalize_invoice")
@patch("stripe.InvoiceItem.create")
def test_downgrade_during_invoicing(self, *mocks: Mock) -> None:
# The difference between this test and test_downgrade is that
# CustomerPlan.status is DOWNGRADE_AT_END_OF_CYCLE rather than ENDED
# when we call invoice_plans_as_needed
# This test is essentially checking that we call make_end_of_cycle_updates_if_needed
# during the invoicing process.
user = self.example_user("hamlet")
self.login_user(user)
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
self.client_post("/json/billing/plan/change",
{'status': CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE})
plan = CustomerPlan.objects.first()
self.assertIsNotNone(plan.next_invoice_date)
self.assertEqual(plan.status, CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE)
invoice_plans_as_needed(self.next_year)
plan = CustomerPlan.objects.first()
self.assertIsNone(plan.next_invoice_date)
self.assertEqual(plan.status, CustomerPlan.ENDED)
@patch("corporate.lib.stripe.billing_logger.info")
def test_downgrade_free_trial(self, mock_: Mock) -> None:
user = self.example_user("hamlet")
free_trial_end_date = self.now + timedelta(days=60)
with self.settings(FREE_TRIAL_DAYS=60):
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
plan = CustomerPlan.objects.get()
self.assertEqual(plan.next_invoice_date, free_trial_end_date)
self.assertEqual(get_realm('zulip').plan_type, Realm.STANDARD)
self.assertEqual(plan.status, CustomerPlan.FREE_TRIAL)
# Add some extra users before the realm is deactivated
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=21):
update_license_ledger_if_needed(user.realm, self.now)
last_ledger_entry = LicenseLedger.objects.order_by('id').last()
self.assertEqual(last_ledger_entry.licenses, 21)
self.assertEqual(last_ledger_entry.licenses_at_next_renewal, 21)
self.login_user(user)
self.client_post("/json/billing/plan/change", {'status': CustomerPlan.ENDED})
plan.refresh_from_db()
self.assertEqual(get_realm('zulip').plan_type, Realm.LIMITED)
self.assertEqual(plan.status, CustomerPlan.ENDED)
self.assertEqual(plan.invoiced_through, last_ledger_entry)
self.assertIsNone(plan.next_invoice_date)
self.login_user(user)
response = self.client_get("/billing/")
self.assert_in_success_response(["Your organization is on the <b>Zulip Free</b>"], response)
# The extra users added in the final month are not charged
with patch("corporate.lib.stripe.invoice_plan") as mocked:
invoice_plans_as_needed(self.next_month)
mocked.assert_not_called()
# The plan is not renewed after an year
with patch("corporate.lib.stripe.invoice_plan") as mocked:
invoice_plans_as_needed(self.next_year)
mocked.assert_not_called()
@patch("corporate.lib.stripe.billing_logger.warning")
@patch("corporate.lib.stripe.billing_logger.info")
def test_reupgrade_by_billing_admin_after_downgrade(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
self.login_user(user)
self.client_post("/json/billing/plan/change",
{'status': CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE})
with self.assertRaises(BillingError) as context:
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
self.assertEqual(context.exception.description, "subscribing with existing subscription")
invoice_plans_as_needed(self.next_year)
response = self.client_get("/billing/")
self.assert_in_success_response(["Your organization is on the <b>Zulip Free</b>"], response)
with patch("corporate.lib.stripe.timezone_now", return_value=self.next_year):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
self.assertEqual(Customer.objects.count(), 1)
self.assertEqual(CustomerPlan.objects.count(), 2)
current_plan = CustomerPlan.objects.all().order_by("id").last()
next_invoice_date = add_months(self.next_year, 1)
self.assertEqual(current_plan.next_invoice_date, next_invoice_date)
self.assertEqual(get_realm('zulip').plan_type, Realm.STANDARD)
self.assertEqual(current_plan.status, CustomerPlan.ACTIVE)
old_plan = CustomerPlan.objects.all().order_by("id").first()
self.assertEqual(old_plan.next_invoice_date, None)
self.assertEqual(old_plan.status, CustomerPlan.ENDED)
@patch("corporate.lib.stripe.billing_logger.info")
def test_deactivate_realm(self, mock_: Mock) -> None:
user = self.example_user("hamlet")
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
plan = CustomerPlan.objects.get()
self.assertEqual(plan.next_invoice_date, self.next_month)
self.assertEqual(get_realm('zulip').plan_type, Realm.STANDARD)
self.assertEqual(plan.status, CustomerPlan.ACTIVE)
# Add some extra users before the realm is deactivated
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=20):
update_license_ledger_if_needed(user.realm, self.now)
last_ledger_entry = LicenseLedger.objects.order_by('id').last()
self.assertEqual(last_ledger_entry.licenses, 20)
self.assertEqual(last_ledger_entry.licenses_at_next_renewal, 20)
do_deactivate_realm(get_realm("zulip"))
plan.refresh_from_db()
self.assertTrue(get_realm('zulip').deactivated)
self.assertEqual(get_realm('zulip').plan_type, Realm.LIMITED)
self.assertEqual(plan.status, CustomerPlan.ENDED)
self.assertEqual(plan.invoiced_through, last_ledger_entry)
self.assertIsNone(plan.next_invoice_date)
do_reactivate_realm(get_realm('zulip'))
self.login_user(user)
response = self.client_get("/billing/")
self.assert_in_success_response(["Your organization is on the <b>Zulip Free</b>"], response)
# The extra users added in the final month are not charged
with patch("corporate.lib.stripe.invoice_plan") as mocked:
invoice_plans_as_needed(self.next_month)
mocked.assert_not_called()
# The plan is not renewed after an year
with patch("corporate.lib.stripe.invoice_plan") as mocked:
invoice_plans_as_needed(self.next_year)
mocked.assert_not_called()
@patch("corporate.lib.stripe.billing_logger.info")
def test_reupgrade_by_billing_admin_after_realm_deactivation(self, mock_: Mock) -> None:
user = self.example_user("hamlet")
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
do_deactivate_realm(get_realm("zulip"))
self.assertTrue(get_realm('zulip').deactivated)
do_reactivate_realm(get_realm('zulip'))
self.login_user(user)
response = self.client_get("/billing/")
self.assert_in_success_response(["Your organization is on the <b>Zulip Free</b>"], response)
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
self.assertEqual(Customer.objects.count(), 1)
self.assertEqual(CustomerPlan.objects.count(), 2)
current_plan = CustomerPlan.objects.all().order_by("id").last()
self.assertEqual(current_plan.next_invoice_date, self.next_month)
self.assertEqual(get_realm('zulip').plan_type, Realm.STANDARD)
self.assertEqual(current_plan.status, CustomerPlan.ACTIVE)
old_plan = CustomerPlan.objects.all().order_by("id").first()
self.assertEqual(old_plan.next_invoice_date, None)
self.assertEqual(old_plan.status, CustomerPlan.ENDED)
class RequiresBillingAccessTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
hamlet = self.example_user("hamlet")
hamlet.is_billing_admin = True
hamlet.save(update_fields=["is_billing_admin"])
desdemona = self.example_user('desdemona')
desdemona.role = UserProfile.ROLE_REALM_OWNER
desdemona.save(update_fields=["role"])
def test_who_can_access_json_endpoints(self) -> None:
# Billing admins have access
self.login_user(self.example_user('hamlet'))
with patch("corporate.views.do_replace_payment_source") as mocked1:
response = self.client_post("/json/billing/sources/change",
{'stripe_token': orjson.dumps('token').decode()})
self.assert_json_success(response)
mocked1.assert_called()
# Realm owners have access, even if they are not billing admins
self.login_user(self.example_user('desdemona'))
with patch("corporate.views.do_replace_payment_source") as mocked2:
response = self.client_post("/json/billing/sources/change",
{'stripe_token': orjson.dumps('token').decode()})
self.assert_json_success(response)
mocked2.assert_called()
def test_who_cant_access_json_endpoints(self) -> None:
def verify_user_cant_access_endpoint(username: str, endpoint: str, request_data: Dict[str, str], error_message: str) -> None:
self.login_user(self.example_user(username))
response = self.client_post(endpoint, request_data)
self.assert_json_error_contains(response, error_message)
verify_user_cant_access_endpoint("polonius", "/json/billing/upgrade",
{'billing_modality': orjson.dumps("charge_automatically").decode(), 'schedule': orjson.dumps("annual").decode(),
'signed_seat_count': orjson.dumps('signed count').decode(), 'salt': orjson.dumps('salt').decode()},
"Must be an organization member")
verify_user_cant_access_endpoint("polonius", "/json/billing/sponsorship",
{'organization-type': orjson.dumps("event").decode(), 'description': orjson.dumps("event description").decode(),
'website': orjson.dumps("example.com").decode()},
"Must be an organization member")
for username in ["cordelia", "iago"]:
self.login_user(self.example_user(username))
verify_user_cant_access_endpoint(username, "/json/billing/sources/change", {'stripe_token': orjson.dumps('token').decode()},
"Must be a billing administrator or an organization owner")
verify_user_cant_access_endpoint(username, "/json/billing/plan/change", {'status': orjson.dumps(1).decode()},
"Must be a billing administrator or an organization owner")
# Make sure that we are testing all the JSON endpoints
# Quite a hack, but probably fine for now
string_with_all_endpoints = str(get_resolver('corporate.urls').reverse_dict)
json_endpoints = {word.strip("\"'()[],$") for word in string_with_all_endpoints.split()
if 'json/' in word}
self.assertEqual(len(json_endpoints), 4)
class BillingHelpersTest(ZulipTestCase):
def test_next_month(self) -> None:
anchor = datetime(2019, 12, 31, 1, 2, 3, tzinfo=timezone.utc)
period_boundaries = [
anchor,
datetime(2020, 1, 31, 1, 2, 3, tzinfo=timezone.utc),
# Test that this is the 28th even during leap years
datetime(2020, 2, 28, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 3, 31, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 4, 30, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 5, 31, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 6, 30, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 7, 31, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 8, 31, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 9, 30, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 10, 31, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 11, 30, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 12, 31, 1, 2, 3, tzinfo=timezone.utc),
datetime(2021, 1, 31, 1, 2, 3, tzinfo=timezone.utc),
datetime(2021, 2, 28, 1, 2, 3, tzinfo=timezone.utc)]
with self.assertRaises(AssertionError):
add_months(anchor, -1)
# Explicitly test add_months for each value of MAX_DAY_FOR_MONTH and
# for crossing a year boundary
for i, boundary in enumerate(period_boundaries):
self.assertEqual(add_months(anchor, i), boundary)
# Test next_month for small values
for last, next_ in zip(period_boundaries[:-1], period_boundaries[1:]):
self.assertEqual(next_month(anchor, last), next_)
# Test next_month for large values
period_boundaries = [dt.replace(year=dt.year+100) for dt in period_boundaries]
for last, next_ in zip(period_boundaries[:-1], period_boundaries[1:]):
self.assertEqual(next_month(anchor, last), next_)
def test_compute_plan_parameters(self) -> None:
# TODO: test rounding down microseconds
anchor = datetime(2019, 12, 31, 1, 2, 3, tzinfo=timezone.utc)
month_later = datetime(2020, 1, 31, 1, 2, 3, tzinfo=timezone.utc)
year_later = datetime(2020, 12, 31, 1, 2, 3, tzinfo=timezone.utc)
test_cases = [
# test all possibilities, since there aren't that many
((True, CustomerPlan.ANNUAL, None), (anchor, month_later, year_later, 8000)),
((True, CustomerPlan.ANNUAL, 85), (anchor, month_later, year_later, 1200)),
((True, CustomerPlan.MONTHLY, None), (anchor, month_later, month_later, 800)),
((True, CustomerPlan.MONTHLY, 85), (anchor, month_later, month_later, 120)),
((False, CustomerPlan.ANNUAL, None), (anchor, year_later, year_later, 8000)),
((False, CustomerPlan.ANNUAL, 85), (anchor, year_later, year_later, 1200)),
((False, CustomerPlan.MONTHLY, None), (anchor, month_later, month_later, 800)),
((False, CustomerPlan.MONTHLY, 85), (anchor, month_later, month_later, 120)),
# test exact math of Decimals; 800 * (1 - 87.25) = 101.9999999..
((False, CustomerPlan.MONTHLY, 87.25), (anchor, month_later, month_later, 102)),
# test dropping of fractional cents; without the int it's 102.8
((False, CustomerPlan.MONTHLY, 87.15), (anchor, month_later, month_later, 102)),
]
with patch('corporate.lib.stripe.timezone_now', return_value=anchor):
for (automanage_licenses, discount, free_trial), output in test_cases:
output_ = compute_plan_parameters(
automanage_licenses,
discount,
None if free_trial is None else Decimal(free_trial),
)
self.assertEqual(output_, output)
def test_update_or_create_stripe_customer_logic(self) -> None:
user = self.example_user('hamlet')
# No existing Customer object
with patch('corporate.lib.stripe.do_create_stripe_customer', return_value='returned') as mocked1:
returned = update_or_create_stripe_customer(user, stripe_token='token')
mocked1.assert_called()
self.assertEqual(returned, 'returned')
customer = Customer.objects.create(realm=get_realm('zulip'))
# Customer exists but stripe_customer_id is None
with patch('corporate.lib.stripe.do_create_stripe_customer', return_value='returned') as mocked2:
returned = update_or_create_stripe_customer(user, stripe_token='token')
mocked2.assert_called()
self.assertEqual(returned, 'returned')
customer.stripe_customer_id = 'cus_12345'
customer.save()
# Customer exists, replace payment source
with patch('corporate.lib.stripe.do_replace_payment_source') as mocked3:
returned_customer = update_or_create_stripe_customer(self.example_user('hamlet'), 'token')
mocked3.assert_called()
self.assertEqual(returned_customer, customer)
# Customer exists, do nothing
with patch('corporate.lib.stripe.do_replace_payment_source') as mocked4:
returned_customer = update_or_create_stripe_customer(self.example_user('hamlet'), None)
mocked4.assert_not_called()
self.assertEqual(returned_customer, customer)
def test_get_customer_by_realm(self) -> None:
realm = get_realm('zulip')
self.assertEqual(get_customer_by_realm(realm), None)
customer = Customer.objects.create(realm=realm, stripe_customer_id='cus_12345')
self.assertEqual(get_customer_by_realm(realm), customer)
def test_get_current_plan_by_customer(self) -> None:
realm = get_realm("zulip")
customer = Customer.objects.create(realm=realm, stripe_customer_id='cus_12345')
self.assertEqual(get_current_plan_by_customer(customer), None)
plan = CustomerPlan.objects.create(customer=customer, status=CustomerPlan.ACTIVE,
billing_cycle_anchor=timezone_now(),
billing_schedule=CustomerPlan.ANNUAL,
tier=CustomerPlan.STANDARD)
self.assertEqual(get_current_plan_by_customer(customer), plan)
plan.status = CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE
plan.save(update_fields=["status"])
self.assertEqual(get_current_plan_by_customer(customer), plan)
plan.status = CustomerPlan.ENDED
plan.save(update_fields=["status"])
self.assertEqual(get_current_plan_by_customer(customer), None)
plan.status = CustomerPlan.NEVER_STARTED
plan.save(update_fields=["status"])
self.assertEqual(get_current_plan_by_customer(customer), None)
def test_get_current_plan_by_realm(self) -> None:
realm = get_realm("zulip")
self.assertEqual(get_current_plan_by_realm(realm), None)
customer = Customer.objects.create(realm=realm, stripe_customer_id='cus_12345')
self.assertEqual(get_current_plan_by_realm(realm), None)
plan = CustomerPlan.objects.create(customer=customer, status=CustomerPlan.ACTIVE,
billing_cycle_anchor=timezone_now(),
billing_schedule=CustomerPlan.ANNUAL,
tier=CustomerPlan.STANDARD)
self.assertEqual(get_current_plan_by_realm(realm), plan)
class LicenseLedgerTest(StripeTestCase):
def test_add_plan_renewal_if_needed(self) -> None:
with patch('corporate.lib.stripe.timezone_now', return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
self.assertEqual(LicenseLedger.objects.count(), 1)
plan = CustomerPlan.objects.get()
# Plan hasn't renewed yet
make_end_of_cycle_updates_if_needed(plan, self.next_year - timedelta(days=1))
self.assertEqual(LicenseLedger.objects.count(), 1)
# Plan needs to renew
# TODO: do_deactivate_user for a user, so that licenses_at_next_renewal != licenses
new_plan, ledger_entry = make_end_of_cycle_updates_if_needed(plan, self.next_year)
self.assertIsNone(new_plan)
self.assertEqual(LicenseLedger.objects.count(), 2)
ledger_params = {
'plan': plan, 'is_renewal': True, 'event_time': self.next_year,
'licenses': self.seat_count, 'licenses_at_next_renewal': self.seat_count}
for key, value in ledger_params.items():
self.assertEqual(getattr(ledger_entry, key), value)
# Plan needs to renew, but we already added the plan_renewal ledger entry
make_end_of_cycle_updates_if_needed(plan, self.next_year + timedelta(days=1))
self.assertEqual(LicenseLedger.objects.count(), 2)
def test_update_license_ledger_if_needed(self) -> None:
realm = get_realm('zulip')
# Test no Customer
update_license_ledger_if_needed(realm, self.now)
self.assertFalse(LicenseLedger.objects.exists())
# Test plan not automanaged
self.local_upgrade(self.seat_count + 1, False, CustomerPlan.ANNUAL, 'token')
self.assertEqual(LicenseLedger.objects.count(), 1)
update_license_ledger_if_needed(realm, self.now)
self.assertEqual(LicenseLedger.objects.count(), 1)
# Test no active plan
plan = CustomerPlan.objects.get()
plan.automanage_licenses = True
plan.status = CustomerPlan.ENDED
plan.save(update_fields=['automanage_licenses', 'status'])
update_license_ledger_if_needed(realm, self.now)
self.assertEqual(LicenseLedger.objects.count(), 1)
# Test update needed
plan.status = CustomerPlan.ACTIVE
plan.save(update_fields=['status'])
update_license_ledger_if_needed(realm, self.now)
self.assertEqual(LicenseLedger.objects.count(), 2)
def test_update_license_ledger_for_automanaged_plan(self) -> None:
realm = get_realm('zulip')
with patch('corporate.lib.stripe.timezone_now', return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
plan = CustomerPlan.objects.first()
# Simple increase
with patch('corporate.lib.stripe.get_latest_seat_count', return_value=23):
update_license_ledger_for_automanaged_plan(realm, plan, self.now)
# Decrease
with patch('corporate.lib.stripe.get_latest_seat_count', return_value=20):
update_license_ledger_for_automanaged_plan(realm, plan, self.now)
# Increase, but not past high watermark
with patch('corporate.lib.stripe.get_latest_seat_count', return_value=21):
update_license_ledger_for_automanaged_plan(realm, plan, self.now)
# Increase, but after renewal date, and below last year's high watermark
with patch('corporate.lib.stripe.get_latest_seat_count', return_value=22):
update_license_ledger_for_automanaged_plan(realm, plan, self.next_year + timedelta(seconds=1))
ledger_entries = list(LicenseLedger.objects.values_list(
'is_renewal', 'event_time', 'licenses', 'licenses_at_next_renewal').order_by('id'))
self.assertEqual(ledger_entries,
[(True, self.now, self.seat_count, self.seat_count),
(False, self.now, 23, 23),
(False, self.now, 23, 20),
(False, self.now, 23, 21),
(True, self.next_year, 21, 21),
(False, self.next_year + timedelta(seconds=1), 22, 22)])
def test_user_changes(self) -> None:
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
user = do_create_user('email', 'password', get_realm('zulip'), 'name')
do_deactivate_user(user)
do_reactivate_user(user)
# Not a proper use of do_activate_user, but fine for this test
do_activate_user(user)
ledger_entries = list(LicenseLedger.objects.values_list(
'is_renewal', 'licenses', 'licenses_at_next_renewal').order_by('id'))
self.assertEqual(ledger_entries,
[(True, self.seat_count, self.seat_count),
(False, self.seat_count + 1, self.seat_count + 1),
(False, self.seat_count + 1, self.seat_count),
(False, self.seat_count + 1, self.seat_count + 1),
(False, self.seat_count + 1, self.seat_count + 1)])
class InvoiceTest(StripeTestCase):
def test_invoicing_status_is_started(self) -> None:
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
plan = CustomerPlan.objects.first()
plan.invoicing_status = CustomerPlan.STARTED
plan.save(update_fields=['invoicing_status'])
with self.assertRaises(NotImplementedError):
invoice_plan(CustomerPlan.objects.first(), self.now)
@mock_stripe()
def test_invoice_plan(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
with patch('corporate.lib.stripe.timezone_now', return_value=self.now):
self.upgrade()
# Increase
with patch('corporate.lib.stripe.get_latest_seat_count', return_value=self.seat_count + 3):
update_license_ledger_if_needed(get_realm('zulip'), self.now + timedelta(days=100))
# Decrease
with patch('corporate.lib.stripe.get_latest_seat_count', return_value=self.seat_count):
update_license_ledger_if_needed(get_realm('zulip'), self.now + timedelta(days=200))
# Increase, but not past high watermark
with patch('corporate.lib.stripe.get_latest_seat_count', return_value=self.seat_count + 1):
update_license_ledger_if_needed(get_realm('zulip'), self.now + timedelta(days=300))
# Increase, but after renewal date, and below last year's high watermark
with patch('corporate.lib.stripe.get_latest_seat_count', return_value=self.seat_count + 2):
update_license_ledger_if_needed(get_realm('zulip'), self.now + timedelta(days=400))
# Increase, but after event_time
with patch('corporate.lib.stripe.get_latest_seat_count', return_value=self.seat_count + 3):
update_license_ledger_if_needed(get_realm('zulip'), self.now + timedelta(days=500))
plan = CustomerPlan.objects.first()
invoice_plan(plan, self.now + timedelta(days=400))
stripe_invoices = [invoice for invoice in stripe.Invoice.list(
customer=plan.customer.stripe_customer_id)]
self.assertEqual(len(stripe_invoices), 2)
self.assertIsNotNone(stripe_invoices[0].status_transitions.finalized_at)
stripe_line_items = [item for item in stripe_invoices[0].lines]
self.assertEqual(len(stripe_line_items), 3)
line_item_params = {
'amount': int(8000 * (1 - ((400-366) / 365)) + .5),
'description': 'Additional license (Feb 5, 2013 - Jan 2, 2014)',
'discountable': False,
'period': {
'start': datetime_to_timestamp(self.now + timedelta(days=400)),
'end': datetime_to_timestamp(self.now + timedelta(days=2*365 + 1))},
'quantity': 1}
for key, value in line_item_params.items():
self.assertEqual(stripe_line_items[0].get(key), value)
line_item_params = {
'amount': 8000 * (self.seat_count + 1),
'description': 'Zulip Standard - renewal',
'discountable': False,
'period': {
'start': datetime_to_timestamp(self.now + timedelta(days=366)),
'end': datetime_to_timestamp(self.now + timedelta(days=2*365 + 1))},
'quantity': (self.seat_count + 1)}
for key, value in line_item_params.items():
self.assertEqual(stripe_line_items[1].get(key), value)
line_item_params = {
'amount': 3 * int(8000 * (366-100) / 366 + .5),
'description': 'Additional license (Apr 11, 2012 - Jan 2, 2013)',
'discountable': False,
'period': {
'start': datetime_to_timestamp(self.now + timedelta(days=100)),
'end': datetime_to_timestamp(self.now + timedelta(days=366))},
'quantity': 3}
for key, value in line_item_params.items():
self.assertEqual(stripe_line_items[2].get(key), value)
@mock_stripe()
def test_fixed_price_plans(self, *mocks: Mock) -> None:
# Also tests charge_automatically=False
user = self.example_user("hamlet")
self.login_user(user)
with patch('corporate.lib.stripe.timezone_now', return_value=self.now):
self.upgrade(invoice=True)
plan = CustomerPlan.objects.first()
plan.fixed_price = 100
plan.price_per_license = 0
plan.save(update_fields=['fixed_price', 'price_per_license'])
invoice_plan(plan, self.next_year)
stripe_invoices = [invoice for invoice in stripe.Invoice.list(
customer=plan.customer.stripe_customer_id)]
self.assertEqual(len(stripe_invoices), 2)
self.assertEqual(stripe_invoices[0].billing, 'send_invoice')
stripe_line_items = [item for item in stripe_invoices[0].lines]
self.assertEqual(len(stripe_line_items), 1)
line_item_params = {
'amount': 100,
'description': 'Zulip Standard - renewal',
'discountable': False,
'period': {
'start': datetime_to_timestamp(self.next_year),
'end': datetime_to_timestamp(self.next_year + timedelta(days=365))},
'quantity': 1}
for key, value in line_item_params.items():
self.assertEqual(stripe_line_items[0].get(key), value)
def test_no_invoice_needed(self) -> None:
with patch('corporate.lib.stripe.timezone_now', return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
plan = CustomerPlan.objects.first()
self.assertEqual(plan.next_invoice_date, self.next_month)
# Test this doesn't make any calls to stripe.Invoice or stripe.InvoiceItem
invoice_plan(plan, self.next_month)
plan = CustomerPlan.objects.first()
# Test that we still update next_invoice_date
self.assertEqual(plan.next_invoice_date, self.next_month + timedelta(days=29))
def test_invoice_plans_as_needed(self) -> None:
with patch('corporate.lib.stripe.timezone_now', return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, 'token')
plan = CustomerPlan.objects.first()
self.assertEqual(plan.next_invoice_date, self.next_month)
# Test nothing needed to be done
with patch('corporate.lib.stripe.invoice_plan') as mocked:
invoice_plans_as_needed(self.next_month - timedelta(days=1))
mocked.assert_not_called()
# Test something needing to be done
invoice_plans_as_needed(self.next_month)
plan = CustomerPlan.objects.first()
self.assertEqual(plan.next_invoice_date, self.next_month + timedelta(days=29))
| {
"content_hash": "77412eeb5c38f847b2fc69b3d72dbd37",
"timestamp": "",
"source": "github",
"line_count": 2236,
"max_line_length": 153,
"avg_line_length": 54.319320214669055,
"alnum_prop": 0.6319468458232476,
"repo_name": "brainwane/zulip",
"id": "752fc09863735e71459243f7cfc985804faa2d86",
"size": "121458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corporate/tests/test_stripe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "423578"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "647926"
},
{
"name": "JavaScript",
"bytes": "2886792"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "90558"
},
{
"name": "Python",
"bytes": "6000548"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "110849"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
} |
__version__ = "2.0.1"
"""
:mod:`pysal.explore.esda` --- Exploratory Spatial Data Analysis
=================================================
"""
from .moran import (Moran, Moran_BV, Moran_BV_matrix,
Moran_Local, Moran_Local_BV,
Moran_Rate, Moran_Local_Rate)
from .getisord import G, G_Local
from .geary import Geary
from .join_counts import Join_Counts
from .gamma import Gamma
| {
"content_hash": "aeea4ac0726b75d24b815fd6dd1c3bc3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 63,
"avg_line_length": 32.23076923076923,
"alnum_prop": 0.5704057279236276,
"repo_name": "lixun910/pysal",
"id": "0ba28352c58ff35be5cdb55929bf90a448f79e80",
"size": "419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysal/explore/esda/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1315254"
},
{
"name": "Jupyter Notebook",
"bytes": "1407521"
},
{
"name": "Makefile",
"bytes": "526"
},
{
"name": "OpenEdge ABL",
"bytes": "595378"
},
{
"name": "Python",
"bytes": "3994938"
},
{
"name": "Shell",
"bytes": "3743"
}
],
"symlink_target": ""
} |
import sys
import LessonData
import RosterData
import HTMLRosterParser
import RosterExtractor
import Roster
listOfRosters = []
for i in range(1,len(sys.argv)) :
# try :
roster = Roster.Roster(sys.argv[i])
# except :
# print('error in :'+sys.argv[i])
| {
"content_hash": "43cdb4c83c54162c3ef97f09fa5e4736",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 40,
"avg_line_length": 18,
"alnum_prop": 0.6888888888888889,
"repo_name": "Zilleplus/KHBOLessenroosterAPI",
"id": "bda562e4c56631fadd447898601b16d7cd18a134",
"size": "316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parserLessenrooster.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8947"
}
],
"symlink_target": ""
} |
import sys
import time
import telepot
"""
$ python2.7 skeleton.py <token>
A skeleton for your telepot programs.
"""
def handle(msg):
flavor = telepot.flavor(msg)
summary = telepot.glance(msg, flavor=flavor)
print(flavor, summary)
TOKEN = sys.argv[1] # get token from command-line
bot = telepot.Bot(TOKEN)
bot.message_loop(handle)
print('Listening ...')
# Keep the program running.
while 1:
time.sleep(10)
| {
"content_hash": "8ff49a589744e94ebd578f9595041328",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 50,
"avg_line_length": 16.53846153846154,
"alnum_prop": 0.6953488372093023,
"repo_name": "mpunkenhofer/irc-telegram-bot",
"id": "9358589d0fea6d85d60e10e1981190b51c2a37dc",
"size": "430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telepot/examples/simple/skeleton.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "8068"
},
{
"name": "Python",
"bytes": "364472"
}
],
"symlink_target": ""
} |
"""
Flood Alerts Module - Controllers
@author: Fran Boon
@see: http://eden.sahanafoundation.org/wiki/Pakistan
"""
module = request.controller
if module not in deployment_settings.modules:
session.error = T("Module disabled!")
redirect(URL(r=request, c="default", f="index"))
# Options Menu (available in all Functions' Views)
response.menu_options = [
[T("Flood Reports"), False, URL(r=request, f="freport"),[
[T("List"), False, URL(r=request, f="freport")],
[T("Add"), False, URL(r=request, f="freport", args="create")],
#[T("Search"), False, URL(r=request, f="freport", args="search")]
]],
#[T("Map"), False, URL(r=request, f="maps")],
]
def index():
""" Custom View """
module_name = deployment_settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
def maps():
""" Show a Map of all Flood Alerts """
freports = db(db.gis_location.id == db.flood_freport.location_id).select()
freport_popup_url = URL(r=request, f="freport", args="read.popup?freport.location_id=")
map = gis.show_map(feature_queries = [{"name":T("Flood Reports"), "query":freports, "active":True, "popup_url": freport_popup_url}], window=True)
return dict(map=map)
def river():
""" Rivers, RESTful controller """
resource = request.function
# Post-processor
def user_postp(r, output):
shn_action_buttons(r, deletable=False)
return output
response.s3.postp = user_postp
output = s3_rest_controller(module, resource)
return output
def freport():
""" Flood Reports, RESTful controller """
resource = request.function
tablename = "%s_%s" % (module, resource)
table = db[tablename]
resource = request.function
# Disable legacy fields, unless updating, so the data can be manually transferred to new fields
#if "update" not in request.args:
# table.document.readable = table.document.writable = False
# Post-processor
def postp(r, output):
shn_action_buttons(r, deletable=False)
return output
response.s3.postp = postp
rheader = lambda r: shn_flood_rheader(r, tabs = [(T("Basic Details"), None),
(T("Locations"), "freport_location")
])
output = s3_rest_controller(module, resource, rheader=rheader)
return output
# -----------------------------------------------------------------------------
def download():
""" Download a file """
return response.download(request, db)
# -----------------------------------------------------------------------------
def shn_flood_rheader(r, tabs=[]):
""" Resource Headers """
if r.representation == "html":
if r.name == "freport":
report = r.record
if report:
rheader_tabs = shn_rheader_tabs(r, tabs)
location = report.location_id
if location:
location = shn_gis_location_represent(location)
doc_name = doc_url = None
document = db(db.doc_document.id == report.document_id).select(db.doc_document.name, db.doc_document.file, limitby=(0, 1)).first()
if document:
doc_name = document.name
doc_url = URL(r=request, f="download", args=[document.file])
#try:
# doc_name, file = r.table.document.retrieve(document)
# if hasattr(file, "close"):
# file.close()
#except:
# doc_name = document.name
rheader = DIV(TABLE(
TR(
TH(T("Location") + ": "), location,
TH(T("Date") + ": "), report.datetime
),
TR(
TH(T("Document") + ": "), A(doc_name, _href=doc_url)
)
),
rheader_tabs)
return rheader
return None
| {
"content_hash": "6ff58f278ca5c37537dd831c52ad1612",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 149,
"avg_line_length": 32.815384615384616,
"alnum_prop": 0.5100796999531176,
"repo_name": "ptressel/sahana-eden-madpub",
"id": "6cb53625c9b51bb7eff1a6c7012f9aef9f6b55ba",
"size": "4291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controllers/flood.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "14896489"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "14827014"
},
{
"name": "Shell",
"bytes": "1171"
}
],
"symlink_target": ""
} |
from azure.cli.core import AzCommandsLoader
from azure.cli.core.profiles import ResourceType
from azure.cli.command_modules.managedservices._help import helps # pylint: disable=unused-import
class ManagedServicesCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
from azure.cli.core.commands import CliCommandType
managedservices_custom = CliCommandType(operations_tmpl='azure.cli.command_modules.managedservices.custom#{}')
super(ManagedServicesCommandsLoader, self).__init__(cli_ctx=cli_ctx,
resource_type=ResourceType.MGMT_MANAGEDSERVICES,
custom_command_type=managedservices_custom)
def load_command_table(self, args):
from .commands import load_command_table
load_command_table(self, args)
return self.command_table
def load_arguments(self, command):
from ._params import load_arguments
load_arguments(self, command)
COMMAND_LOADER_CLS = ManagedServicesCommandsLoader
| {
"content_hash": "cb6a29062bf2d3f5b34c4eec55442541",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 118,
"avg_line_length": 43.76,
"alnum_prop": 0.6718464351005484,
"repo_name": "yugangw-msft/azure-cli",
"id": "8d795e4a7ba5be6863ad871fb461898befcd18d6",
"size": "1440",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "src/azure-cli/azure/cli/command_modules/managedservices/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "5355"
},
{
"name": "Batchfile",
"bytes": "14110"
},
{
"name": "Bicep",
"bytes": "1679"
},
{
"name": "C#",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "Dockerfile",
"bytes": "8427"
},
{
"name": "HTML",
"bytes": "794"
},
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Jupyter Notebook",
"bytes": "389"
},
{
"name": "PowerShell",
"bytes": "1781"
},
{
"name": "Python",
"bytes": "24270340"
},
{
"name": "Rich Text Format",
"bytes": "12032"
},
{
"name": "Roff",
"bytes": "1036959"
},
{
"name": "Shell",
"bytes": "56023"
},
{
"name": "TSQL",
"bytes": "1145"
}
],
"symlink_target": ""
} |
"""Class to hold all sensor accessories."""
from __future__ import annotations
from collections.abc import Callable
import logging
from typing import NamedTuple
from pyhap.const import CATEGORY_SENSOR
from homeassistant.components.binary_sensor import BinarySensorDeviceClass
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_CO2,
STATE_HOME,
STATE_ON,
TEMP_CELSIUS,
)
from homeassistant.core import callback
from .accessories import TYPES, HomeAccessory
from .const import (
CHAR_AIR_PARTICULATE_DENSITY,
CHAR_AIR_QUALITY,
CHAR_CARBON_DIOXIDE_DETECTED,
CHAR_CARBON_DIOXIDE_LEVEL,
CHAR_CARBON_DIOXIDE_PEAK_LEVEL,
CHAR_CARBON_MONOXIDE_DETECTED,
CHAR_CARBON_MONOXIDE_LEVEL,
CHAR_CARBON_MONOXIDE_PEAK_LEVEL,
CHAR_CONTACT_SENSOR_STATE,
CHAR_CURRENT_AMBIENT_LIGHT_LEVEL,
CHAR_CURRENT_HUMIDITY,
CHAR_CURRENT_TEMPERATURE,
CHAR_LEAK_DETECTED,
CHAR_MOTION_DETECTED,
CHAR_OCCUPANCY_DETECTED,
CHAR_SMOKE_DETECTED,
PROP_CELSIUS,
SERV_AIR_QUALITY_SENSOR,
SERV_CARBON_DIOXIDE_SENSOR,
SERV_CARBON_MONOXIDE_SENSOR,
SERV_CONTACT_SENSOR,
SERV_HUMIDITY_SENSOR,
SERV_LEAK_SENSOR,
SERV_LIGHT_SENSOR,
SERV_MOTION_SENSOR,
SERV_OCCUPANCY_SENSOR,
SERV_SMOKE_SENSOR,
SERV_TEMPERATURE_SENSOR,
THRESHOLD_CO,
THRESHOLD_CO2,
)
from .util import convert_to_float, density_to_air_quality, temperature_to_homekit
_LOGGER = logging.getLogger(__name__)
class SI(NamedTuple):
"""Service info."""
service: str
char: str
format: Callable[[bool], int | bool]
BINARY_SENSOR_SERVICE_MAP: dict[str, SI] = {
BinarySensorDeviceClass.CO: SI(
SERV_CARBON_MONOXIDE_SENSOR, CHAR_CARBON_MONOXIDE_DETECTED, int
),
DEVICE_CLASS_CO2: SI(SERV_CARBON_DIOXIDE_SENSOR, CHAR_CARBON_DIOXIDE_DETECTED, int),
BinarySensorDeviceClass.DOOR: SI(
SERV_CONTACT_SENSOR, CHAR_CONTACT_SENSOR_STATE, int
),
BinarySensorDeviceClass.GARAGE_DOOR: SI(
SERV_CONTACT_SENSOR, CHAR_CONTACT_SENSOR_STATE, int
),
BinarySensorDeviceClass.GAS: SI(
SERV_CARBON_MONOXIDE_SENSOR, CHAR_CARBON_MONOXIDE_DETECTED, int
),
BinarySensorDeviceClass.MOISTURE: SI(SERV_LEAK_SENSOR, CHAR_LEAK_DETECTED, int),
BinarySensorDeviceClass.MOTION: SI(SERV_MOTION_SENSOR, CHAR_MOTION_DETECTED, bool),
BinarySensorDeviceClass.OCCUPANCY: SI(
SERV_OCCUPANCY_SENSOR, CHAR_OCCUPANCY_DETECTED, int
),
BinarySensorDeviceClass.OPENING: SI(
SERV_CONTACT_SENSOR, CHAR_CONTACT_SENSOR_STATE, int
),
BinarySensorDeviceClass.SMOKE: SI(SERV_SMOKE_SENSOR, CHAR_SMOKE_DETECTED, int),
BinarySensorDeviceClass.WINDOW: SI(
SERV_CONTACT_SENSOR, CHAR_CONTACT_SENSOR_STATE, int
),
}
@TYPES.register("TemperatureSensor")
class TemperatureSensor(HomeAccessory):
"""Generate a TemperatureSensor accessory for a temperature sensor.
Sensor entity must return temperature in °C, °F.
"""
def __init__(self, *args):
"""Initialize a TemperatureSensor accessory object."""
super().__init__(*args, category=CATEGORY_SENSOR)
state = self.hass.states.get(self.entity_id)
serv_temp = self.add_preload_service(SERV_TEMPERATURE_SENSOR)
self.char_temp = serv_temp.configure_char(
CHAR_CURRENT_TEMPERATURE, value=0, properties=PROP_CELSIUS
)
# Set the state so it is in sync on initial
# GET to avoid an event storm after homekit startup
self.async_update_state(state)
@callback
def async_update_state(self, new_state):
"""Update temperature after state changed."""
unit = new_state.attributes.get(ATTR_UNIT_OF_MEASUREMENT, TEMP_CELSIUS)
if (temperature := convert_to_float(new_state.state)) is not None:
temperature = temperature_to_homekit(temperature, unit)
self.char_temp.set_value(temperature)
_LOGGER.debug(
"%s: Current temperature set to %.1f°C", self.entity_id, temperature
)
@TYPES.register("HumiditySensor")
class HumiditySensor(HomeAccessory):
"""Generate a HumiditySensor accessory as humidity sensor."""
def __init__(self, *args):
"""Initialize a HumiditySensor accessory object."""
super().__init__(*args, category=CATEGORY_SENSOR)
state = self.hass.states.get(self.entity_id)
serv_humidity = self.add_preload_service(SERV_HUMIDITY_SENSOR)
self.char_humidity = serv_humidity.configure_char(
CHAR_CURRENT_HUMIDITY, value=0
)
# Set the state so it is in sync on initial
# GET to avoid an event storm after homekit startup
self.async_update_state(state)
@callback
def async_update_state(self, new_state):
"""Update accessory after state change."""
if (humidity := convert_to_float(new_state.state)) is not None:
self.char_humidity.set_value(humidity)
_LOGGER.debug("%s: Percent set to %d%%", self.entity_id, humidity)
@TYPES.register("AirQualitySensor")
class AirQualitySensor(HomeAccessory):
"""Generate a AirQualitySensor accessory as air quality sensor."""
def __init__(self, *args):
"""Initialize a AirQualitySensor accessory object."""
super().__init__(*args, category=CATEGORY_SENSOR)
state = self.hass.states.get(self.entity_id)
serv_air_quality = self.add_preload_service(
SERV_AIR_QUALITY_SENSOR, [CHAR_AIR_PARTICULATE_DENSITY]
)
self.char_quality = serv_air_quality.configure_char(CHAR_AIR_QUALITY, value=0)
self.char_density = serv_air_quality.configure_char(
CHAR_AIR_PARTICULATE_DENSITY, value=0
)
# Set the state so it is in sync on initial
# GET to avoid an event storm after homekit startup
self.async_update_state(state)
@callback
def async_update_state(self, new_state):
"""Update accessory after state change."""
if (density := convert_to_float(new_state.state)) is not None:
if self.char_density.value != density:
self.char_density.set_value(density)
_LOGGER.debug("%s: Set density to %d", self.entity_id, density)
air_quality = density_to_air_quality(density)
self.char_quality.set_value(air_quality)
_LOGGER.debug("%s: Set air_quality to %d", self.entity_id, air_quality)
@TYPES.register("CarbonMonoxideSensor")
class CarbonMonoxideSensor(HomeAccessory):
"""Generate a CarbonMonoxidSensor accessory as CO sensor."""
def __init__(self, *args):
"""Initialize a CarbonMonoxideSensor accessory object."""
super().__init__(*args, category=CATEGORY_SENSOR)
state = self.hass.states.get(self.entity_id)
serv_co = self.add_preload_service(
SERV_CARBON_MONOXIDE_SENSOR,
[CHAR_CARBON_MONOXIDE_LEVEL, CHAR_CARBON_MONOXIDE_PEAK_LEVEL],
)
self.char_level = serv_co.configure_char(CHAR_CARBON_MONOXIDE_LEVEL, value=0)
self.char_peak = serv_co.configure_char(
CHAR_CARBON_MONOXIDE_PEAK_LEVEL, value=0
)
self.char_detected = serv_co.configure_char(
CHAR_CARBON_MONOXIDE_DETECTED, value=0
)
# Set the state so it is in sync on initial
# GET to avoid an event storm after homekit startup
self.async_update_state(state)
@callback
def async_update_state(self, new_state):
"""Update accessory after state change."""
if (value := convert_to_float(new_state.state)) is not None:
self.char_level.set_value(value)
if value > self.char_peak.value:
self.char_peak.set_value(value)
co_detected = value > THRESHOLD_CO
self.char_detected.set_value(co_detected)
_LOGGER.debug("%s: Set to %d", self.entity_id, value)
@TYPES.register("CarbonDioxideSensor")
class CarbonDioxideSensor(HomeAccessory):
"""Generate a CarbonDioxideSensor accessory as CO2 sensor."""
def __init__(self, *args):
"""Initialize a CarbonDioxideSensor accessory object."""
super().__init__(*args, category=CATEGORY_SENSOR)
state = self.hass.states.get(self.entity_id)
serv_co2 = self.add_preload_service(
SERV_CARBON_DIOXIDE_SENSOR,
[CHAR_CARBON_DIOXIDE_LEVEL, CHAR_CARBON_DIOXIDE_PEAK_LEVEL],
)
self.char_level = serv_co2.configure_char(CHAR_CARBON_DIOXIDE_LEVEL, value=0)
self.char_peak = serv_co2.configure_char(
CHAR_CARBON_DIOXIDE_PEAK_LEVEL, value=0
)
self.char_detected = serv_co2.configure_char(
CHAR_CARBON_DIOXIDE_DETECTED, value=0
)
# Set the state so it is in sync on initial
# GET to avoid an event storm after homekit startup
self.async_update_state(state)
@callback
def async_update_state(self, new_state):
"""Update accessory after state change."""
if (value := convert_to_float(new_state.state)) is not None:
self.char_level.set_value(value)
if value > self.char_peak.value:
self.char_peak.set_value(value)
co2_detected = value > THRESHOLD_CO2
self.char_detected.set_value(co2_detected)
_LOGGER.debug("%s: Set to %d", self.entity_id, value)
@TYPES.register("LightSensor")
class LightSensor(HomeAccessory):
"""Generate a LightSensor accessory as light sensor."""
def __init__(self, *args):
"""Initialize a LightSensor accessory object."""
super().__init__(*args, category=CATEGORY_SENSOR)
state = self.hass.states.get(self.entity_id)
serv_light = self.add_preload_service(SERV_LIGHT_SENSOR)
self.char_light = serv_light.configure_char(
CHAR_CURRENT_AMBIENT_LIGHT_LEVEL, value=0
)
# Set the state so it is in sync on initial
# GET to avoid an event storm after homekit startup
self.async_update_state(state)
@callback
def async_update_state(self, new_state):
"""Update accessory after state change."""
if (luminance := convert_to_float(new_state.state)) is not None:
self.char_light.set_value(luminance)
_LOGGER.debug("%s: Set to %d", self.entity_id, luminance)
@TYPES.register("BinarySensor")
class BinarySensor(HomeAccessory):
"""Generate a BinarySensor accessory as binary sensor."""
def __init__(self, *args):
"""Initialize a BinarySensor accessory object."""
super().__init__(*args, category=CATEGORY_SENSOR)
state = self.hass.states.get(self.entity_id)
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
service_char = (
BINARY_SENSOR_SERVICE_MAP[device_class]
if device_class in BINARY_SENSOR_SERVICE_MAP
else BINARY_SENSOR_SERVICE_MAP[BinarySensorDeviceClass.OCCUPANCY]
)
self.format = service_char.format
service = self.add_preload_service(service_char.service)
initial_value = False if self.format is bool else 0
self.char_detected = service.configure_char(
service_char.char, value=initial_value
)
# Set the state so it is in sync on initial
# GET to avoid an event storm after homekit startup
self.async_update_state(state)
@callback
def async_update_state(self, new_state):
"""Update accessory after state change."""
state = new_state.state
detected = self.format(state in (STATE_ON, STATE_HOME))
self.char_detected.set_value(detected)
_LOGGER.debug("%s: Set to %d", self.entity_id, detected)
| {
"content_hash": "7370f280355d65ea8489673173ce49d6",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 88,
"avg_line_length": 38.30944625407166,
"alnum_prop": 0.6553014199472834,
"repo_name": "rohitranjan1991/home-assistant",
"id": "46e241efab010d4f6c4b06d2e36d7f81d5fe90b5",
"size": "11764",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/homekit/type_sensors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division, unicode_literals
__doc__="""
(GUI) Lets you rename kerning names and pairs associated with them.
"""
import vanilla
import GlyphsApp
thisFont = Glyphs.font
# builing a more accessible kerning dictionary
# it's a dictionary of lists. newKernDic[master.id][left, right, value]
kernDic = thisFont.kerningDict()
newKernDic = {}
for thisMaster in thisFont.masters:
kernList = []
for key1 in kernDic[thisMaster.id]:
for key2 in kernDic[thisMaster.id][key1]:
pairInList = [key1, key2, kernDic[thisMaster.id][key1][key2]]
kernList.append(pairInList)
newKernDic[thisMaster.id] = kernList
# building popup list
# each value contains a list of glyphs involved. groupsL/R[groupName][glyph, glyph, glyph...]
groupsL = {}
groupsR = {}
for thisGlyph in thisFont.glyphs:
if thisGlyph.leftKerningGroup != None:
if not thisGlyph.leftKerningGroup in groupsL:
groupsL[thisGlyph.leftKerningGroup] = []
groupsL[thisGlyph.leftKerningGroup].append(thisGlyph.name)
if thisGlyph.rightKerningGroup != None:
if not thisGlyph.rightKerningGroup in groupsR:
groupsR[thisGlyph.rightKerningGroup] = []
groupsR[thisGlyph.rightKerningGroup].append(thisGlyph.name)
class RenameKerningGroups( object ):
def __init__( self ):
# Window 'self.w':
editX = 180
editY = 22
textY = 17
spaceX = 10
spaceY = 10
windowWidth = spaceX*3+editX*2+85
windowHeight = 150
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Rename Kerning Groups", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + 100, windowHeight ), # maximum size (for resizing)
autosaveName = "com.Tosche.RenameKerningGroups.mainwindow" # stores last window position and size
)
# UI elements:
self.w.radio = vanilla.RadioGroup( (spaceX+130, spaceY, 120, textY), ["Left", "Right"], isVertical = False, sizeStyle='regular', callback=self.switchList)
self.w.radio.set(0)
self.w.text1 = vanilla.TextBox( (spaceX, spaceY*2+textY, 120, textY), "Rename this Group", sizeStyle='regular' )
self.w.text2 = vanilla.TextBox( (spaceX, spaceY*3+editY+textY, 120, textY), "to this", sizeStyle='regular' )
self.w.popup = vanilla.PopUpButton( (spaceX+130, spaceY*2+textY, -15, editY), [str(x) for x in sorted(groupsL)], sizeStyle='regular' )
self.w.newName = vanilla.EditText( (spaceX+130, spaceY*3+editY+textY, -15, editY), "", sizeStyle = 'regular' )
# Run Button:
self.w.runButton = vanilla.Button((-80-15, spaceY*4+editY*3, -15, -15), "Run", sizeStyle='regular', callback=self.RenameKerningGroupsMain )
self.w.setDefaultButton( self.w.runButton )
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def switchList(self, sender):
try:
if self.w.radio.get() == 0:
self.w.popup.setItems(sorted(groupsL))
elif self.w.radio.get() == 1:
self.w.popup.setItems(sorted(groupsR))
except Exception as e:
print("Rename Kerning Group Error (switchList): %s" % e)
def RenameKerningGroupsMain( self, sender ):
try:
newName = self.w.newName.get()
popupNum = self.w.popup.get()
if self.w.radio.get() == 0: # it it's a left group
popup = sorted(groupsL)[popupNum]
for thisGlyphName in groupsL[popup]:
thisFont.glyphs[thisGlyphName].leftKerningGroup = newName
for thisMaster in thisFont.masters:
for thisPair in newKernDic[thisMaster.id]:
if "@MMK_R_"+popup in thisPair[1]:
thisFont.setKerningForPair(thisMaster.id, thisPair[0], "@MMK_R_"+newName, thisPair[2])
thisFont.removeKerningForPair( thisMaster.id, thisPair[0], "@MMK_R_"+popup)
# updating groupsL popup
groupsL[newName] = groupsL.pop(popup)
self.w.popup.setItems(sorted(groupsL))
self.w.popup.set(sorted(groupsL).index(newName))
# updating newKernDic
for thisMaster in thisFont.masters:
for thisPair in newKernDic[thisMaster.id]:
if thisPair[1] == "@MMK_R_"+popup:
thisPair[1] = "@MMK_R_"+newName
if self.w.radio.get() == 1: # it it's a right group
popup = sorted(groupsR)[popupNum]
for thisGlyphName in groupsR[popup]:
thisFont.glyphs[thisGlyphName].rightKerningGroup = newName
for thisMaster in thisFont.masters:
for thisPair in newKernDic[thisMaster.id]:
if "@MMK_L_"+popup in thisPair[0]:
thisFont.setKerningForPair(thisMaster.id, "@MMK_L_"+newName, thisPair[1], thisPair[2])
thisFont.removeKerningForPair(thisMaster.id, "@MMK_L_"+popup, thisPair[1])
# updating groupsR popup
groupsR[newName] = groupsR.pop(popup)
self.w.popup.setItems(sorted(groupsR))
self.w.popup.set(sorted(groupsR).index(newName))
# updating newKernDic
for thisMaster in thisFont.masters:
for thisPair in newKernDic[thisMaster.id]:
if thisPair[0] == "@MMK_L_"+popup:
thisPair[0] = "@MMK_L_"+newName
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Rename Kerning Group Error (RenameKerningGroupsMain): %s" % e)
RenameKerningGroups() | {
"content_hash": "d40d231070c4e0c7d825c68eaf1e79e4",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 156,
"avg_line_length": 40.404761904761905,
"alnum_prop": 0.7032017285405617,
"repo_name": "Tosche/Glyphs-Scripts",
"id": "b636e27a7346fc22bc42382cad3ba41dbf0ab8bb",
"size": "5152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Metrics & Kerning/Rename Kerning Groups.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "195886"
}
],
"symlink_target": ""
} |
__version__=''' $Id: rltempfile.py 3342 2008-12-12 15:55:34Z andy $ '''
__doc__='''Helper for the test suite - determines where to write output.
When our test suite runs as source, a script "test_foo.py" will typically
create "test_foo.pdf" alongside it. But if you are testing a package of
compiled code inside a zip archive, this won't work. This determines
where to write test suite output, creating a subdirectory of /tmp/ or
whatever if needed.
'''
_rl_tempdir=None
__all__ = ('get_rl_tempdir', 'get_rl_tempdir')
import os, tempfile
def _rl_getuid():
if hasattr(os,'getuid'):
return os.getuid()
else:
return ''
def get_rl_tempdir(*subdirs):
global _rl_tempdir
if _rl_tempdir is None:
_rl_tempdir = os.path.join(tempfile.gettempdir(),'ReportLab_tmp%s' % str(_rl_getuid()))
d = _rl_tempdir
if subdirs: d = os.path.join(*((d,)+subdirs))
try:
os.makedirs(d)
except:
pass
return d
def get_rl_tempfile(fn=None):
if not fn:
fn = tempfile.mktemp()
return os.path.join(get_rl_tempdir(),fn)
| {
"content_hash": "7a1b3a0dfe09ab6f2c4b68484ffee775",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 95,
"avg_line_length": 31.97142857142857,
"alnum_prop": 0.6264521894548705,
"repo_name": "commtrack/temp-aquatest",
"id": "fecf2d2eb702465e564050f49b3e1b284b6ac40b",
"size": "1211",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "reportlab/lib/rltempfile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "742874"
},
{
"name": "PHP",
"bytes": "2863"
},
{
"name": "Python",
"bytes": "3707591"
},
{
"name": "Shell",
"bytes": "490"
}
],
"symlink_target": ""
} |
from collections import Counter
def combine(*args):
return sum((Counter(a) for a in args), Counter())
| {
"content_hash": "4c85491724fbeb6a195dc38408e14968",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 53,
"avg_line_length": 21.6,
"alnum_prop": 0.7037037037037037,
"repo_name": "the-zebulan/CodeWars",
"id": "138179002b50a2a45edaee7cc73746b7d5267e52",
"size": "108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "katas/kyu_7/combine_objects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
} |
"""
Title: Convolutional autoencoder for image denoising
Author: [Santiago L. Valdarrama](https://twitter.com/svpino)
Date created: 2021/03/01
Last modified: 2021/03/01
Description: How to train a deep convolutional autoencoder for image denoising.
"""
"""
## Introduction
This example demonstrates how to implement a deep convolutional autoencoder
for image denoising, mapping noisy digits images from the MNIST dataset to
clean digits images. This implementation is based on an original blog post
titled [Building Autoencoders in Keras](https://blog.keras.io/building-autoencoders-in-keras.html)
by [François Chollet](https://twitter.com/fchollet).
"""
"""
## Setup
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Model
def preprocess(array):
"""
Normalizes the supplied array and reshapes it into the appropriate format.
"""
array = array.astype("float32") / 255.0
array = np.reshape(array, (len(array), 28, 28, 1))
return array
def noise(array):
"""
Adds random noise to each image in the supplied array.
"""
noise_factor = 0.4
noisy_array = array + noise_factor * np.random.normal(
loc=0.0, scale=1.0, size=array.shape
)
return np.clip(noisy_array, 0.0, 1.0)
def display(array1, array2):
"""
Displays ten random images from each one of the supplied arrays.
"""
n = 10
indices = np.random.randint(len(array1), size=n)
images1 = array1[indices, :]
images2 = array2[indices, :]
plt.figure(figsize=(20, 4))
for i, (image1, image2) in enumerate(zip(images1, images2)):
ax = plt.subplot(2, n, i + 1)
plt.imshow(image1.reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(image2.reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
"""
## Prepare the data
"""
# Since we only need images from the dataset to encode and decode, we
# won't use the labels.
(train_data, _), (test_data, _) = mnist.load_data()
# Normalize and reshape the data
train_data = preprocess(train_data)
test_data = preprocess(test_data)
# Create a copy of the data with added noise
noisy_train_data = noise(train_data)
noisy_test_data = noise(test_data)
# Display the train data and a version of it with added noise
display(train_data, noisy_train_data)
"""
## Build the autoencoder
We are going to use the Functional API to build our convolutional autoencoder.
"""
input = layers.Input(shape=(28, 28, 1))
# Encoder
x = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(input)
x = layers.MaxPooling2D((2, 2), padding="same")(x)
x = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(x)
x = layers.MaxPooling2D((2, 2), padding="same")(x)
# Decoder
x = layers.Conv2DTranspose(32, (3, 3), strides=2, activation="relu", padding="same")(x)
x = layers.Conv2DTranspose(32, (3, 3), strides=2, activation="relu", padding="same")(x)
x = layers.Conv2D(1, (3, 3), activation="sigmoid", padding="same")(x)
# Autoencoder
autoencoder = Model(input, x)
autoencoder.compile(optimizer="adam", loss="binary_crossentropy")
autoencoder.summary()
"""
Now we can train our autoencoder using `train_data` as both our input data
and target. Notice we are setting up the validation data using the same
format.
"""
autoencoder.fit(
x=train_data,
y=train_data,
epochs=50,
batch_size=128,
shuffle=True,
validation_data=(test_data, test_data),
)
"""
Let's predict on our test dataset and display the original image together with
the prediction from our autoencoder.
Notice how the predictions are pretty close to the original images, although
not quite the same.
"""
predictions = autoencoder.predict(test_data)
display(test_data, predictions)
"""
Now that we know that our autoencoder works, let's retrain it using the noisy
data as our input and the clean data as our target. We want our autoencoder to
learn how to denoise the images.
"""
autoencoder.fit(
x=noisy_train_data,
y=train_data,
epochs=100,
batch_size=128,
shuffle=True,
validation_data=(noisy_test_data, test_data),
)
"""
Let's now predict on the noisy data and display the results of our autoencoder.
Notice how the autoencoder does an amazing job at removing the noise from the
input images.
"""
predictions = autoencoder.predict(noisy_test_data)
display(noisy_test_data, predictions)
| {
"content_hash": "0ea43722501e6fb79c3882b1146c147a",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 98,
"avg_line_length": 26.69142857142857,
"alnum_prop": 0.6981374438021837,
"repo_name": "keras-team/keras-io",
"id": "3cc95a9ffa50ce20c7ad1fa1af6aeabb1bc876b0",
"size": "4672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/vision/autoencoder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15929"
},
{
"name": "Dockerfile",
"bytes": "188"
},
{
"name": "HTML",
"bytes": "21968"
},
{
"name": "Jupyter Notebook",
"bytes": "718942"
},
{
"name": "Makefile",
"bytes": "193"
},
{
"name": "Python",
"bytes": "680865"
}
],
"symlink_target": ""
} |
import os
from email.utils import parseaddr
from configurations import Configuration, values
ugettext = lambda s: s
class Base(Configuration):
"""
The are correct settings that are primarily targeted at the production
system but allow (where appriate) easy overrides either via subclassing
or environment variables.
"""
###########################################################################
#
# General settings
#
TEXT_HTML_SANITIZE = True
TEXT_ADDITIONAL_TAGS = ['object', 'param']
TEXT_ADDITIONAL_PROTOCOLS = ['rtmp']
PROJECT_NAME = 'pyconde'
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ADMINS = values.ListValue([], converter=parseaddr)
ALLOWED_HOSTS = values.ListValue(['localhost', '127.0.0.1'])
@property
def MANAGERS(self):
return self.ADMINS
EMAIL_HOST = values.Value()
DEFAULT_FROM_EMAIL = values.EmailValue('noreply@ep14.org')
SERVER_EMAIL = values.EmailValue('noreply@ep14.org')
SUPPORT_EMAIL = values.EmailValue('helpdesk@europython.eu')
TIME_ZONE = 'Europe/Berlin'
LANGUAGE_CODE = 'en'
SECRET_KEY = values.SecretValue()
EMAIL_SUBJECT_PREFIX = values.Value('[EuroPython 2014] ')
USE_I18N = True
USE_L10N = True
SITE_ID = values.IntegerValue(1)
CONFERENCE_ID = values.IntegerValue(1)
LANGUAGES = (
('de', ugettext('German')),
('en', ugettext('English')),
)
INTERNAL_IPS = ('127.0.0.1',)
ROOT_URLCONF = '%s.urls' % PROJECT_NAME
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'skins', 'default'),
os.path.join(BASE_DIR, 'skins', 'ep14'),
)
INSTALLED_APPS = [
# Skins
'pyconde.skins.ep14',
'pyconde.skins.default',
'djangocms_admin_style',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.markup',
'sortedm2m',
'crispy_forms',
'south',
'easy_thumbnails',
'filer',
'compressor',
'djangocms_text_ckeditor', # must be before 'cms'!
'cms',
'cms.stacks',
'mptt',
'menus',
'sekizai',
'userprofiles',
'userprofiles.contrib.accountverification',
'userprofiles.contrib.emailverification',
'userprofiles.contrib.profiles',
'taggit',
'haystack',
#'tinymce', # If you want tinymce, add it in the settings.py file.
'django_gravatar',
'social_auth',
'gunicorn',
'statici18n',
'cms.plugins.inherit',
'cms.plugins.googlemap',
'cms.plugins.link',
'cms.plugins.snippet',
#'cms.plugins.twitter',
#'cms.plugins.text',
'cmsplugin_filer_file',
'cmsplugin_filer_image',
'djangocms_style',
#'cmsplugin_news',
'pyconde.testimonials',
# Symposion apps
'pyconde.conference',
'pyconde.speakers',
'pyconde.proposals',
'pyconde.sponsorship',
# Custom apps
'pyconde.core',
'pyconde.accounts',
'pyconde.attendees',
'pyconde.events',
'pyconde.reviews',
'pyconde.schedule',
'pyconde.search',
'pyconde.helpers',
'pyconde.checkin',
'pyconde.lightningtalks',
]
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
'social_auth.middleware.SocialAuthExceptionMiddleware',
]
TEMPLATE_CONTEXT_PROCESSORS = Configuration.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.debug',
'django.core.context_processors.request',
'sekizai.context_processors.sekizai',
'pyconde.conference.context_processors.current_conference',
'pyconde.reviews.context_processors.review_roles',
# 'pyconde.context_processors.less_settings',
'social_auth.context_processors.social_auth_backends',
)
DATABASES = values.DatabaseURLValue(
'sqlite:///{0}/djep.db'.format(BASE_DIR),
environ_prefix='DJANGO')
# Disable south migrations during unittests
SOUTH_TESTS_MIGRATE = False
FIXTURE_DIRS = (
os.path.join(BASE_DIR, 'fixtures'),
)
# TODO: As soon as we move to foundation use
# https://pypi.python.org/pypi/crispy-forms-foundation
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# If the project uses Less.js, use the inline-JavaScript renderer in
# debug mode.
LESS_USE_DYNAMIC_IN_DEBUG = True
LOGGING = {
'version': 1,
'disable_existing_loggers': True
}
###########################################################################
#
# Debug settings
#
DEBUG = values.BooleanValue(False)
DEBUG_TOOLBAR_CONFIG = {'INTERCEPT_REDIRECTS': False}
@property
def TEMPLATE_DEBUG(self):
return self.DEBUG
@property
def THUMBNAIL_DEBUG(self):
return self.DEBUG
###########################################################################
#
# File settings
#
MEDIA_ROOT = values.Value()
STATIC_ROOT = values.Value()
MEDIA_URL = values.Value('/site_media/')
MEDIA_OPTIPNG_PATH = values.Value('optipng')
MEDIA_JPEGOPTIM_PATH = values.Value('jpegoptim')
STATIC_URL = values.Value('/static_media/')
STATICFILES_FINDERS = Configuration.STATICFILES_FINDERS + (
'pyconde.helpers.static.AppMediaDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
STATICFILES_DIRS = values.ListValue()
STATICI18N_ROOT = os.path.join(BASE_DIR, PROJECT_NAME, "core", "static")
COMPRESS_CSS_FILTERS = (
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter',
)
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters',
)
THUMBNAIL_SIZE = 100
###########################################################################
#
# Profile settings
# Here we configure what profile module is used and other aspects of a
# registered user's profile.
#
USERPROFILES_CHECK_UNIQUE_EMAIL = True
USERPROFILES_DOUBLE_CHECK_EMAIL = False
USERPROFILES_DOUBLE_CHECK_PASSWORD = True
USERPROFILES_REGISTRATION_FULLNAME = True
USERPROFILES_USE_ACCOUNT_VERIFICATION = True
USERPROFILES_USE_EMAIL_VERIFICATION = True
USERPROFILES_USE_PROFILE = True
USERPROFILES_INLINE_PROFILE_ADMIN = True
USERPROFILES_USE_PROFILE_VIEW = False
USERPROFILES_REGISTRATION_FORM = 'pyconde.accounts.forms.ProfileRegistrationForm'
USERPROFILES_PROFILE_FORM = 'pyconde.accounts.forms.ProfileForm'
USERPROFILES_EMAIL_VERIFICATION_DONE_URL = 'userprofiles_profile_change'
AUTH_PROFILE_MODULE = 'accounts.Profile'
ACCOUNTS_FALLBACK_TO_GRAVATAR = False
CHILDREN_DATA_DISABLED = True
###########################################################################
#
# CMS Settings
#
CMS_PERMISSION = values.BooleanValue(False)
CMS_TEMPLATES = (
('cms/default.html', ugettext('Default template')),
('cms/start.html', ugettext('Start page template')),
('cms/page_templates/fullpage.html', ugettext('Full page width (schedule, ...)')),
)
# Docs at https://django-cms.readthedocs.org/en/develop/getting_started/configuration.html#cms-languages
CMS_LANGUAGES = {
1: [
{
'code': 'en',
'name': ugettext('English'),
'public': True,
},
{
'code': 'de',
'name': ugettext('German'),
'public': True,
},
],
'default': {
'fallbacks': ['en', 'de'],
'hide_untranslated': False,
}
}
WYM_TOOLS = ",\n".join([
"{'name': 'Bold', 'title': 'Strong', 'css': 'wym_tools_strong'}",
"{'name': 'Italic', 'title': 'Emphasis', 'css': 'wym_tools_emphasis'}",
"{'name': 'Superscript', 'title': 'Superscript', 'css': 'wym_tools_superscript'}",
"{'name': 'Subscript', 'title': 'Subscript', 'css': 'wym_tools_subscript'}",
"{'name': 'InsertOrderedList', 'title': 'Ordered_List', 'css': 'wym_tools_ordered_list'}",
"{'name': 'InsertUnorderedList', 'title': 'Unordered_List', 'css': 'wym_tools_unordered_list'}",
"{'name': 'Indent', 'title': 'Indent', 'css': 'wym_tools_indent'}",
"{'name': 'Outdent', 'title': 'Outdent', 'css': 'wym_tools_outdent'}",
"{'name': 'Undo', 'title': 'Undo', 'css': 'wym_tools_undo'}",
"{'name': 'Redo', 'title': 'Redo', 'css': 'wym_tools_redo'}",
"{'name': 'Paste', 'title': 'Paste_From_Word', 'css': 'wym_tools_paste'}",
"{'name': 'ToggleHtml', 'title': 'HTML', 'css': 'wym_tools_html'}",
"{'name': 'CreateLink', 'title': 'Link', 'css': 'wym_tools_link'}",
"{'name': 'Unlink', 'title': 'Unlink', 'css': 'wym_tools_unlink'}",
"{'name': 'InsertImage', 'title': 'Image', 'css': 'wym_tools_image'}",
"{'name': 'InsertTable', 'title': 'Table', 'css': 'wym_tools_table'}",
"{'name': 'Preview', 'title': 'Preview', 'css': 'wym_tools_preview'}",
])
TINYMCE_DEFAULT_CONFIG = {
'theme': 'advanced',
'relative_urls': False,
'theme_advanced_resizing': True,
'theme_advanced_buttons1_add': 'forecolor,backcolor',
'style_formats': [
{'title': u'Heading 2 (alternative)', 'block': 'h2', 'classes': 'alt'},
{'title': u'Heading 3 (alternative)', 'block': 'h3', 'classes': 'alt'},
]
}
CMSPLUGIN_NEWS_FEED_TITLE = u'EuroPython 2014 News'
CMSPLUGIN_NEWS_FEED_DESCRIPTION = u'News from EuroPython 2014'
SCHEDULE_ATTENDING_POSSIBLE = values.ListValue(['training'])
SCHEDULE_CACHE_SCHEDULE = values.BooleanValue(True)
SCHEDULE_CACHE_TIMEOUT = values.IntegerValue(300)
###########################################################################
#
# Account and profile settings
#
AVATAR_MIN_DIMENSION = values.TupleValue(converter=int)
AVATAR_MAX_DIMENSION = values.TupleValue(converter=int)
###########################################################################
#
# Proposal and schedule settings
#
ATTENDEES_PRODUCT_NUMBER_START = 1000
PROPOSALS_SUPPORT_ADDITIONAL_SPEAKERS = True
MAX_CHECKOUT_DURATION = 1800 # 30 minutes
# This configures the form that is used for each proposal type identified
# by their respective slug.
PROPOSALS_TYPED_SUBMISSION_FORMS = {
'training': 'pyconde.proposals.forms.TrainingSubmissionForm',
'talk': 'pyconde.proposals.forms.TalkSubmissionForm',
'poster': 'pyconde.proposals.forms.PosterSubmissionForm',
}
# These languages should be available when making a session proposal.
PROPOSAL_LANGUAGES = (
('de', ugettext('German')),
('en', ugettext('English')),
)
# This setting defines the language that should be pre-selected in the
# proposal submission form.
PROPOSAL_DEFAULT_LANGUAGE = 'en'
###########################################################################
#
# Review settings
#
REVIEWER_APPLICATION_OPEN = values.BooleanValue(False)
###########################################################################
#
# Search configuration
# If no other search backend is specified, Whoosh is used to make the setup
# as simple as possible. In production we will be using a Lucene-based
# backend like SOLR or ElasticSearch.
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(BASE_DIR, 'whoosh_index'),
'STORAGE': 'file',
'INCLUDE_SPELLING': True,
'BATCH_SIZE': 100,
}
}
###########################################################################
#
# Auth settings
#
LOGIN_ERROR_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/accounts/welcome/'
LOGOUT_REDIRECT_URL = '/'
SOCIAL_AUTH_PIPELINE = (
'social_auth.backends.pipeline.social.social_auth_user',
'social_auth.backends.pipeline.user.get_username',
'social_auth.backends.pipeline.user.create_user',
'social_auth.backends.pipeline.social.associate_user',
'social_auth.backends.pipeline.social.load_extra_data',
'social_auth.backends.pipeline.user.update_user_details',
'social_auth.backends.pipeline.misc.save_status_to_session',
'pyconde.accounts.pipeline.show_request_email_form',
'pyconde.accounts.pipeline.create_profile',
)
GITHUB_APP_ID = values.Value()
GITHUB_API_SECRET = values.Value()
GITHUB_EXTENDED_PERMISSIONS = ['user:email']
TWITTER_CONSUMER_KEY = values.Value()
TWITTER_CONSUMER_SECRET = values.Value()
GOOGLE_OAUTH2_CLIENT_ID = values.Value()
GOOGLE_OAUTH2_CLIENT_SECRET = values.Value()
FACEBOOK_APP_ID = values.Value()
FACEBOOK_API_SECRET = values.Value()
@property
def AUTHENTICATION_BACKENDS(self):
backends = ['django.contrib.auth.backends.ModelBackend']
if self.GITHUB_APP_ID and self.GITHUB_API_SECRET:
backends.insert(-1, 'social_auth.backends.contrib.github.GithubBackend')
if self.TWITTER_CONSUMER_KEY and self.WITTER_CONSUMER_SECRET:
backends.insert(-1, 'social_auth.backends.twitter.TwitterBackend')
if self.FACEBOOK_API_SECRET and self.FACEBOOK_APP_ID:
backends.insert(-1, 'social_auth.backends.facebook.FacebookBackend')
if self.GOOGLE_OAUTH2_CLIENT_SECRET and self.GOOGLE_OAUTH2_CLIENT_ID:
backends.insert(-1, 'social_auth.backends.google.GoogleOAuth2Backend')
return backends
###########################################################################
#
# Payment settings
#
PAYMILL_PRIVATE_KEY = values.Value()
PAYMILL_PUBLIC_KEY = values.Value()
PAYMILL_TRANSACTION_DESCRIPTION = 'EuroPython 2014: Purchase ID {purchase_pk}'
PAYMENT_METHODS = values.ListValue(['invoice', 'creditcard'])
PAYMENT_REMINDER_DUE_DATE_OFFSET = values.Value(14)
PAYMENT_REMINDER_LATEST_DUE_DATE = values.Value('')
PURCHASE_TERMS_OF_USE_URL = values.Value("https://ep2014.europython.eu/en/registration/terms-conditions/")
PURCHASE_INVOICE_DISABLE_RENDERING = values.BooleanValue(True)
# List of emails to be notified when a purchase has been made. PDF is send
# to these addresses, too.
PURCHASE_INVOICE_EXPORT_RECIPIENTS = values.ListValue([])
PURCHASE_INVOICE_FONT_CONFIG = values.DictValue({'de': {}, 'en': {}})
PURCHASE_INVOICE_FONT_ROOT = values.Value() # absolute path on the filesystem
PURCHASE_INVOICE_NUMBER_FORMAT = values.Value('INVOICE-{0:d}')
PURCHASE_INVOICE_ROOT = values.Value() # absolute path on the filesystem
PURCHASE_INVOICE_TEMPLATE_PATH = values.Value() # absolute path to invoice template
# Mapping from logo name (key, e.g. 'logo' or 'vbb') to the image file path
ATTENDEES_BADGE_LOGOS = values.DictValue({})
CACHES = values.DictValue({
'default': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': 'localhost:6379:0',
'OPTIONS': {
'PARSER_CLASS': 'redis.connection.HiredisParser'
},
},
})
BROKER_URL = values.Value('redis://localhost:6379/0')
LOCALE_PATHS = (
os.path.join(BASE_DIR, PROJECT_NAME, 'locale'),
)
# Default settings for statici18n
STATICI18N_OUTPUT_DIR = 'jsi18n'
STATICI18N_DOMAIN = 'djangojs'
STATICI18N_FILENAME_FUNCTION = 'statici18n.utils.default_filename'
class Dev(Base):
"""
These settings are intended for the locaton development environment.
"""
DEBUG = values.BooleanValue(True)
COMPRESS_ENABLED = values.BooleanValue(False)
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
MEDIA_ROOT = os.path.join(Base.BASE_DIR, 'site_media')
STATIC_ROOT = os.path.join(Base.BASE_DIR, 'deployed_static_media')
DEBUG_TOOLBAR_PATCH_SETTINGS = False
INSTALLED_APPS = Base.INSTALLED_APPS + [
'debug_toolbar',
'django_extensions',
]
MIDDLEWARE_CLASSES = [
'debug_toolbar.middleware.DebugToolbarMiddleware'
] + Base.MIDDLEWARE_CLASSES
PURCHASE_INVOICE_ROOT = os.path.join(Base.BASE_DIR, 'invoices')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)s %(module)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'': {
'handlers': ['console'],
},
}
}
# SCHEDULE_CACHE_SCHEDULE = False
SCHEDULE_CACHE_TIMEOUT = 60
class Testing(Dev):
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache'
},
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
SECRET_KEY = "testing_secret_key"
FIXTURE_DIRS = (os.path.join(Base.BASE_DIR, 'fixtures'),)
CELERY_ALWAYS_EAGER = True
PURCHASE_INVOICE_DISABLE_RENDERING = True
class Staging(Base):
INSTALLED_APPS = Base.INSTALLED_APPS + [
'raven.contrib.django.raven_compat',
]
RAVEN_CONFIG = values.DictValue()
class Production(Base):
INSTALLED_APPS = Base.INSTALLED_APPS + [
'raven.contrib.django.raven_compat',
]
RAVEN_CONFIG = values.DictValue()
| {
"content_hash": "3be59ce2dc3dc5edaca06dc78a0168e1",
"timestamp": "",
"source": "github",
"line_count": 611,
"max_line_length": 110,
"avg_line_length": 30.79050736497545,
"alnum_prop": 0.5864561739222878,
"repo_name": "EuroPython/djep",
"id": "ce4e9cee8af993274a23b80f75e03b9e4908525a",
"size": "18813",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pyconde/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "246835"
},
{
"name": "JavaScript",
"bytes": "112740"
},
{
"name": "Puppet",
"bytes": "2679"
},
{
"name": "Python",
"bytes": "1927106"
},
{
"name": "Ruby",
"bytes": "181"
},
{
"name": "Shell",
"bytes": "6515"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='BuildData',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('project', models.CharField(max_length=200, db_index=True)),
('use', models.CharField(max_length=200, db_index=True)),
('role', models.CharField(max_length=200, db_index=True)),
('data_field', models.CharField(max_length=200, db_index=True)),
('value', models.CharField(max_length=1024)),
],
),
migrations.AlterUniqueTogether(
name='builddata',
unique_together=set([('project', 'use', 'role', 'data_field')]),
),
]
| {
"content_hash": "0592dffa5e16f2588ccd4b79bd0e4024",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 114,
"avg_line_length": 34.407407407407405,
"alnum_prop": 0.5629709364908504,
"repo_name": "vegitron/aws-mess-around",
"id": "4272827281e08ed88d6936a0b316328d99416502",
"size": "953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aws_mess_around/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "33"
},
{
"name": "Python",
"bytes": "55366"
}
],
"symlink_target": ""
} |
import os
from BeautifulSoup import BeautifulSoup
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
import re, string
from nltk.stem.porter import *
from nltk.stem import *
from nltk.stem import WordNetLemmatizer
wnl = WordNetLemmatizer()
def save(data, filename, dir=None):
try:
with open(filename, 'a') as f:
for i in data:
f.write(i+"\n")
except FileNotFoundError as e:
dir = os.path.dirname(filename)
os.makedirs(dir)
with open(filename, 'wb') as f:
f.write(data)
f.close()
return filename
def save_data(data,filename):
soup = BeautifulSoup(data)
content = soup.findAll('div',{"class":"rich-text","itemprop":"description"})
content = [str(i) for i in content]
plain_text = "".join(list(content)).decode("utf-8")
top = soup.find("div",{"class":"top-row"})
if top != None:
title = top.find("h1",{"class":"title"}).text if top.find("h1",{"class":"title"}) else ''
company = top.find("a",{"class":"company"}).text if top.find("a",{"class":"company"}) else ''
location = top.find("span",{"itemprop":"jobLocation"}).text if top.find("span",{"itemprop":"jobLocation"}) else ''
else :
title = ""
company = ""
location = ""
row = [title,company,location,plain_text]
string = "\t\t".join(row).encode("utf-8")
try:
with open(filename, 'w') as f:
f.write(string)
except :
pass
f.close()
def word_tokenize(text):
try:
return text.split("\t\t")[3].split(" ")
except:
return ""
def get_loc_job_title(text):
total = text.split("\t\t")
return total[0],total[1],total[2]
def stem(word):
try:
#stemmer = SnowballStemmer("english").stem
stemmer = PorterStemmer().stem
return stemmer(word)
except:
return ""
def clean_html(html):
# First we remove inline JavaScript/CSS:
cleaned = re.sub(r"(?is)<(script|style).*?>.*?(</\1>)", "", html.strip())
# Then we remove html comments. This has to be done before removing regular
# tags since comments can contain '>' characters.
cleaned = re.sub(r"(?s)<!--(.*?)-->[\n]?", "", cleaned)
# Next we can remove the remaining tags:
cleaned = re.sub(r"(?s)<.*?>", " ", cleaned)
# Finally, we deal with whitespace
cleaned = re.sub(r"[\x90-\xff]", " ", cleaned)
cleaned = re.sub(r"[\x80]", " ", cleaned)
cleaned = re.sub(r" ", " ", cleaned)
cleaned = re.sub(r" ", " ", cleaned)
cleaned = re.sub(r" ", " ", cleaned)
return cleaned.strip()
raise NotImplementedError ("To remove HTML markup, use BeautifulSoup's get_text() function")
def remove_punc(text):
for c in string.punctuation:
text= text.replace(c,"")
return text
def plural_to_sing(word):
try:
lemma = wnl.lemmatize(word, 'n')
plural = True if word is not lemma else False
return lemma
except:
print "exception in plural_to_sing"
return ""
def rm_stop_words(token):
if token not in stopwords.words("english"):
return token
else :
return ""
| {
"content_hash": "e9b2d7f131bf2c8a0517c4e06434d43f",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 122,
"avg_line_length": 32.03,
"alnum_prop": 0.5897596003746488,
"repo_name": "hanhaohh/cvhelper",
"id": "42ce881d92d4869aa863156ec7d52837c0a8194f",
"size": "3203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/Crawler/basic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1270"
},
{
"name": "OpenEdge ABL",
"bytes": "17864315"
},
{
"name": "Python",
"bytes": "34910"
}
],
"symlink_target": ""
} |
"""
Core: Hardtree module definition
"""
PROPERTIES = {
'title': 'Administration',
'details': 'Core Administration',
'url': '/admin/',
'system': True,
'type': 'user',
}
URL_PATTERNS = [
'^/admin/',
]
from treeio.core.cron import email_reply
#CRON = [email_reply, ]
CRON = []
| {
"content_hash": "7ec492736c3306bed0da5d0bfc15bd3a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 47,
"avg_line_length": 20.736842105263158,
"alnum_prop": 0.4467005076142132,
"repo_name": "rogeriofalcone/treeio",
"id": "d1dc225bf90196227612cf0ad3eea4a80ac4b4fa",
"size": "507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/hmodule.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from django.http import HttpResponseRedirect
class SpecialAuthenticationMiddleware(object):
def process_request(self, request):
ip = request.META["REMOTE_ADDR"]
if ip.startswith("192.168.") or ip == "127.0.0.1":
# Do not request authentication if IP is in LAN, or localhost
return None
if request.user.is_authenticated():
# Do nothing if user is already authenticated
return None
full_path = request.get_full_path()
if full_path.startswith("/homecontroller/admin/login/"):
# Do nothing if user is in authentication form
return None
# Request authentication
return HttpResponseRedirect("/homecontroller/admin/login/?next=%s" % full_path)
| {
"content_hash": "fd25c25ad8d8834d3bc1458385bd3da8",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 87,
"avg_line_length": 40.63157894736842,
"alnum_prop": 0.6437823834196891,
"repo_name": "ojarva/home-info-display",
"id": "b242dcaeafc3983de8ad45f482058806c518f62b",
"size": "772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homedisplay/display/middleware/authentication.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "22171"
},
{
"name": "CoffeeScript",
"bytes": "115283"
},
{
"name": "HTML",
"bytes": "51598"
},
{
"name": "JavaScript",
"bytes": "9902"
},
{
"name": "Python",
"bytes": "310675"
},
{
"name": "Shell",
"bytes": "1617"
}
],
"symlink_target": ""
} |
from db2charts.analysis.base import AnalysisBase
from db2charts.models import *
import json
class AnalysisCreate(AnalysisBase):
record_count_col_name = 'recordCount'
record_count_translated_name = '记录数'
def __init__(self):
super(AnalysisCreate, self).__init__()
def get_available_tables(self, db):
aams = AvailableAnalysisModel.objects.filter(db_name=db, active=True)
return [{'model_name':m.model_name, 'translated_name':m.translated_name} for m in aams]
def get_translated_cols(self, model_name):
aam = AvailableAnalysisModel.objects.filter(
model_name=model_name, active=True).first()
cols = [{
'col_name': self.record_count_col_name,
'translated_col_name': self.record_count_translated_name
}]
if aam:
cols += aam.translated_cols
return cols
def fetch_preview_data(self, db_name, model_name, xAxis_group, yAxis):
result = {
'serie_name': [],
'data': {
'xAxis': [],
'yAxis': [],
},
'count': 0,
}
module = self.modules[db_name]
model_class = getattr(module, model_name)
if not len(xAxis_group) or not yAxis:
return result
for xAxis in xAxis_group:
dataSet = {
'xAxis': [],
'yAxis': [],
}
xAxis = xAxis.split('.')[-1]
try:
all_objects = model_class.objects.all()
data = {}
for obj in all_objects:
t = getattr(obj, xAxis)
if not data.has_key(t):
data[t] = getattr(obj, yAxis) if yAxis != self.record_count_col_name else 1
else:
data[t] += getattr(obj, yAxis) if yAxis != self.record_count_col_name else 1
for (key, value) in sorted(data.items(), key=lambda x: x[0]):
dataSet['xAxis'].append(key)
dataSet['yAxis'].append(value)
except Exception, e:
raise e
result['data']['xAxis'].append(dataSet['xAxis'])
result['data']['yAxis'].append(dataSet['yAxis'])
result['serie_name'].append(xAxis)
result['count'] += 1
return result
def save_report(self, options):
report = AnalysisReportData()
report.report_name = options['chartName']
report.related_models = options['selectedTable']
tobe_saved_options = {
'db_name': options['selectedDB'],
'model_name': options['selectedTable'],
'chart_type': options['selectedChart'],
'chart_options': {
'xAxis': options['chartOptions']['selectedTypes'],
'yAxis': options['chartOptions']['selectedData'],
}
}
report.options = tobe_saved_options
report.save()
return report.id
| {
"content_hash": "c2dea7313ceb16a23f8d3df0aa4ede86",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 100,
"avg_line_length": 36.8780487804878,
"alnum_prop": 0.5231481481481481,
"repo_name": "Alfredx/django-db2charts",
"id": "dcf96f1472691c6d4f57b5af5fe373c0cd74e3b3",
"size": "3072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db2charts/analysis/create.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "283376"
},
{
"name": "HTML",
"bytes": "8621"
},
{
"name": "JavaScript",
"bytes": "5399839"
},
{
"name": "Python",
"bytes": "44515"
},
{
"name": "Vue",
"bytes": "25260"
}
],
"symlink_target": ""
} |
from .controller import AdminController
from .config import AdminConfig, CrudRestControllerConfig | {
"content_hash": "f71d4e39bd1d1f741f4af5cb654bd3e1",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 57,
"avg_line_length": 48.5,
"alnum_prop": 0.8865979381443299,
"repo_name": "TurboGears/tgext.admin",
"id": "1c66556ce77715e97ee8c6f070d820a8e7d4df92",
"size": "97",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tgext/admin/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "27956"
},
{
"name": "Makefile",
"bytes": "6778"
},
{
"name": "Python",
"bytes": "54025"
}
],
"symlink_target": ""
} |
from test.integration.base import DBTIntegrationTest, use_profile
class TestConcurrency(DBTIntegrationTest):
@property
def schema(self):
return "concurrency_021"
@property
def models(self):
return "models"
@use_profile('postgres')
def test__postgres__concurrency(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(expect_pass=False)
self.assertEqual(len(results), 7)
self.assertTablesEqual("seed", "view_model")
self.assertTablesEqual("seed", "dep")
self.assertTablesEqual("seed", "table_a")
self.assertTablesEqual("seed", "table_b")
self.assertTableDoesNotExist("invalid")
self.assertTableDoesNotExist("skip")
self.run_sql_file("update.sql")
results, output = self.run_dbt_and_capture(expect_pass=False)
self.assertEqual(len(results), 7)
self.assertTablesEqual("seed", "view_model")
self.assertTablesEqual("seed", "dep")
self.assertTablesEqual("seed", "table_a")
self.assertTablesEqual("seed", "table_b")
self.assertTableDoesNotExist("invalid")
self.assertTableDoesNotExist("skip")
self.assertIn('PASS=5 WARN=0 ERROR=1 SKIP=1 TOTAL=7', output)
| {
"content_hash": "896d8a55c19caf828b7df605a6ad038b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 69,
"avg_line_length": 32.205128205128204,
"alnum_prop": 0.6496815286624203,
"repo_name": "analyst-collective/dbt",
"id": "1a1dab724af7fc8240a43ebf1946c70137985e89",
"size": "1256",
"binary": false,
"copies": "1",
"ref": "refs/heads/ct-117-readme_docs",
"path": "test/integration/021_concurrency_tests/test_concurrency.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "769"
},
{
"name": "Python",
"bytes": "284372"
},
{
"name": "Shell",
"bytes": "625"
}
],
"symlink_target": ""
} |
"""Tests Nighthawk's basic functionality."""
import json
import logging
import math
import os
import pytest
import subprocess
import sys
import time
from threading import Thread
from test.integration.common import IpVersion
from test.integration.integration_test_fixtures import (
http_test_server_fixture, https_test_server_fixture, https_test_server_fixture,
multi_http_test_server_fixture, multi_https_test_server_fixture, quic_test_server_fixture,
server_config, server_config_quic)
from test.integration import asserts
from test.integration import utility
# TODO(oschaaf): we mostly verify stats observed from the client-side. Add expectations
# for the server side as well.
@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable and very slow in sanitizer runs")
def test_http_h1(http_test_server_fixture):
"""Test http1 over plain http.
Runs the CLI configured to use plain HTTP/1 against our test server, and sanity
checks statistics from both client and server.
"""
parsed_json, _ = http_test_server_fixture.runNighthawkClient([
http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:24"
])
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
asserts.assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400)
# It is possible that the # of upstream_cx > # of backend connections for H1
# as new connections will spawn if the existing clients cannot keep up with the RPS.
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_total", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 500)
asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_total", 1)
asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
asserts.assertCounterEqual(counters, "default.total_match_count", 1)
global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(parsed_json)
asserts.assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["count"]),
25)
asserts.assertEqual(int(global_histograms["benchmark_http_client.response_header_size"]["count"]),
25)
asserts.assertEqual(
int(global_histograms["benchmark_http_client.response_body_size"]["raw_mean"]), 10)
asserts.assertEqual(
int(global_histograms["benchmark_http_client.response_header_size"]["raw_mean"]), 97)
asserts.assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["raw_min"]),
10)
asserts.assertEqual(
int(global_histograms["benchmark_http_client.response_header_size"]["raw_min"]), 97)
asserts.assertEqual(int(global_histograms["benchmark_http_client.response_body_size"]["raw_max"]),
10)
asserts.assertEqual(
int(global_histograms["benchmark_http_client.response_header_size"]["raw_max"]), 97)
asserts.assertEqual(
int(global_histograms["benchmark_http_client.response_body_size"]["raw_pstdev"]), 0)
asserts.assertEqual(
int(global_histograms["benchmark_http_client.response_header_size"]["raw_pstdev"]), 0)
asserts.assertGreaterEqual(len(counters), 12)
def _mini_stress_test(fixture, args):
# run a test with more rps then we can handle, and a very small client-side queue.
# we should observe both lots of successfull requests as well as time spend in blocking mode.,
parsed_json, _ = fixture.runNighthawkClient(args)
counters = fixture.getNighthawkCounterMapFromJson(parsed_json)
# We set a reasonably low expectation of 100 requests. We set it low, because we want this
# test to succeed on a reasonable share of setups (hopefully practically all).
MIN_EXPECTED_REQUESTS = 100
asserts.assertCounterEqual(counters, "benchmark.http_2xx", MIN_EXPECTED_REQUESTS)
if "--h2" in args:
asserts.assertCounterEqual(counters, "upstream_cx_http2_total", 1)
else:
asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 1)
global_histograms = fixture.getNighthawkGlobalHistogramsbyIdFromJson(parsed_json)
if "--open-loop" in args:
asserts.assertEqual(int(global_histograms["sequencer.blocking"]["count"]), 0)
else:
asserts.assertGreaterEqual(int(global_histograms["sequencer.blocking"]["count"]), 1)
asserts.assertGreaterEqual(
int(global_histograms["benchmark_http_client.request_to_response"]["count"]), 1)
asserts.assertGreaterEqual(int(global_histograms["benchmark_http_client.latency_2xx"]["count"]),
1)
return counters
# The mini stress tests below are executing in closed-loop mode. As we guard the pool against
# overflows, we can set fixed expectations with respect to overflows and anticipated pending
# totals.
def test_http_h1_mini_stress_test_with_client_side_queueing(http_test_server_fixture):
"""Run a max rps test with the h1 pool against our test server, using a small client-side queue."""
counters = _mini_stress_test(http_test_server_fixture, [
http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--max-pending-requests",
"10", "--connections", "1", "--duration", "100", "--termination-predicate",
"benchmark.http_2xx:99", "--simple-warmup"
])
asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_total", 11)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_overflow", 10)
def test_http_h1_mini_stress_test_without_client_side_queueing(http_test_server_fixture):
"""Run a max rps test with the h1 pool against our test server, with no client-side queueing."""
counters = _mini_stress_test(http_test_server_fixture, [
http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--connections", "1",
"--duration", "100", "--termination-predicate", "benchmark.http_2xx:99"
])
asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
asserts.assertNotIn("upstream_cx_overflow", counters)
def test_http_h2_mini_stress_test_with_client_side_queueing(http_test_server_fixture):
"""Run a max rps test with the h2 pool against our test server, using a small client-side queue."""
counters = _mini_stress_test(http_test_server_fixture, [
http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--max-pending-requests",
"10", "--h2", "--max-active-requests", "1", "--connections", "1", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:99", "--simple-warmup"
])
asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_overflow", 10)
def test_http_h2_mini_stress_test_without_client_side_queueing(http_test_server_fixture):
"""Run a max rps test with the h2 pool against our test server, with no client-side queueing."""
counters = _mini_stress_test(http_test_server_fixture, [
http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--h2",
"--max-active-requests", "1", "--connections", "1", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:99"
])
asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
asserts.assertNotIn("upstream_rq_pending_overflow", counters)
@pytest.mark.skipif(not utility.isRunningInAzpCi(),
reason="Has very high failure rate in local executions.")
@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable and very slow in sanitizer runs")
def test_http_h1_mini_stress_test_open_loop(http_test_server_fixture):
"""Run an H1 open loop stress test. We expect higher pending and overflow counts."""
counters = _mini_stress_test(http_test_server_fixture, [
http_test_server_fixture.getTestServerRootUri(), "--rps", "10000", "--max-pending-requests",
"1", "--open-loop", "--max-active-requests", "1", "--connections", "1", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:99", "--simple-warmup"
])
# we expect pool overflows
asserts.assertCounterGreater(counters, "benchmark.pool_overflow", 10)
@pytest.mark.skipif(not utility.isRunningInAzpCi(),
reason="Has very high failure rate in local executions.")
@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable and very slow in sanitizer runs")
def test_http_h2_mini_stress_test_open_loop(http_test_server_fixture):
"""Run an H2 open loop stress test. We expect higher overflow counts."""
counters = _mini_stress_test(http_test_server_fixture, [
http_test_server_fixture.getTestServerRootUri(), "--rps", "10000", "--max-pending-requests",
"1", "--h2", "--open-loop", "--max-active-requests", "1", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:99", "--simple-warmup"
])
# we expect pool overflows
asserts.assertCounterGreater(counters, "benchmark.pool_overflow", 10)
def test_http_h2(http_test_server_fixture):
"""Test h2 over plain http.
Runs the CLI configured to use h2c against our test server, and sanity
checks statistics from both client and server.
"""
parsed_json, _ = http_test_server_fixture.runNighthawkClient([
"--h2",
http_test_server_fixture.getTestServerRootUri(), "--max-active-requests", "1", "--duration",
"100", "--termination-predicate", "benchmark.http_2xx:24", "--rps", "100"
])
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
asserts.assertCounterEqual(counters, "upstream_cx_http2_total", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_rx_bytes_total", 1030)
asserts.assertCounterEqual(counters, "upstream_cx_total", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 403)
asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
asserts.assertCounterEqual(counters, "default.total_match_count", 1)
asserts.assertGreaterEqual(len(counters), 12)
def test_http_concurrency(http_test_server_fixture):
"""Test that concurrency acts like a multiplier."""
parsed_json, _ = http_test_server_fixture.runNighthawkClient([
"--concurrency 4 --rps 100 --connections 1", "--duration", "100", "--termination-predicate",
"benchmark.http_2xx:24",
http_test_server_fixture.getTestServerRootUri()
])
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
# Quite a loose expectation, but this may fluctuate depending on server load.
# Ideally we'd see 4 workers * 5 rps * 5s = 100 requests total
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 100)
# Assert that we at least have 1 connection for each event loop (1*4). It is possible that the # of
# upstream_cx > # of backend connections for H1 as new connections will spawn if the existing clients
# cannot keep up with the RPS.
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 4)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_h1(https_test_server_fixture):
"""Test h1 over https.
Runs the CLI configured to use HTTP/1 over https against our test server, and sanity
checks statistics from both client and server.
"""
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
https_test_server_fixture.getTestServerRootUri(), "--connections", "1", "--rps", "100",
"--duration", "100", "--termination-predicate", "benchmark.http_2xx:24"
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
asserts.assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400)
# It is possible that the # of upstream_cx > # of backend connections for H1 as new connections
# will spawn if the existing clients cannot keep up with the RPS.
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_total", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 500)
asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_total", 1)
asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
asserts.assertCounterEqual(counters, "ssl.ciphers.ECDHE-RSA-AES128-GCM-SHA256", 1)
asserts.assertCounterEqual(counters, "ssl.curves.X25519", 1)
asserts.assertCounterEqual(counters, "ssl.handshake", 1)
asserts.assertCounterEqual(counters, "ssl.sigalgs.rsa_pss_rsae_sha256", 1)
asserts.assertCounterEqual(counters, "ssl.versions.TLSv1.2", 1)
asserts.assertCounterEqual(counters, "default.total_match_count", 1)
asserts.assertEqual(len(counters), 17)
server_stats = https_test_server_fixture.getTestServerStatisticsJson()
asserts.assertEqual(
https_test_server_fixture.getServerStatFromJson(server_stats,
"http.ingress_http.downstream_rq_2xx"), 25)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_h2(https_test_server_fixture):
"""Test http2 over https.
Runs the CLI configured to use HTTP/2 (using https) against our test server, and sanity
checks statistics from both client and server.
"""
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
"--h2",
https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:24", "--max-active-requests", "1"
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
asserts.assertCounterEqual(counters, "upstream_cx_http2_total", 1)
# Through emperical observation, 1030 has been determined to be the minimum of bytes
# we can expect to have received when execution has stopped.
asserts.assertCounterGreaterEqual(counters, "upstream_cx_rx_bytes_total", 1030)
asserts.assertCounterEqual(counters, "upstream_cx_total", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 403)
asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
asserts.assertCounterEqual(counters, "ssl.ciphers.ECDHE-RSA-AES128-GCM-SHA256", 1)
asserts.assertCounterEqual(counters, "ssl.curves.X25519", 1)
asserts.assertCounterEqual(counters, "ssl.handshake", 1)
asserts.assertCounterEqual(counters, "ssl.sigalgs.rsa_pss_rsae_sha256", 1)
asserts.assertCounterEqual(counters, "ssl.versions.TLSv1.2", 1)
asserts.assertCounterEqual(counters, "default.total_match_count", 1)
asserts.assertEqual(len(counters), 17)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_h2_multiple_connections(https_test_server_fixture):
"""Test that the experimental h2 pool uses multiple connections.
The burst we send ensures we will need 10 connections right away, as we
limit max active streams per connection to 1 by setting the experimental
flag to use multiple h2 connections.
"""
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
"--h2",
https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:99", "--max-active-requests", "10",
"--max-pending-requests", "10", "--max-concurrent-streams", "1", "--burst-size", "10"
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 100)
# Empirical observation shows we may end up creating more then 10 connections.
# This is stock Envoy h/2 pool behavior.
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 10)
def test_h3_quic(quic_test_server_fixture):
"""Test http3 quic.
Runs the CLI configured to use HTTP/3 Quic against our test server, and sanity
checks statistics from both client and server.
"""
parsed_json, _ = quic_test_server_fixture.runNighthawkClient([
"--protocol http3",
quic_test_server_fixture.getTestServerRootUri(),
"--rps",
"100",
"--duration",
"100",
"--termination-predicate",
"benchmark.http_2xx:24",
"--max-active-requests",
"1",
# Envoy doesn't support disabling certificate verification on Quic
# connections, so the host in our requests has to match the hostname in
# the leaf certificate.
"--request-header",
"Host:www.lyft.com"
])
counters = quic_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
asserts.assertCounterEqual(counters, "upstream_cx_http3_total", 1)
asserts.assertCounterEqual(counters, "upstream_cx_total", 1)
asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
asserts.assertCounterEqual(counters, "default.total_match_count", 1)
def test_h3_quic_with_custom_http3_protocol_options(quic_test_server_fixture):
"""Test http3 quic with custom http3 protocol options.
Sets the maximum number of concurrent streams to one and verifies that
Nighthawk uses multiple connections.
"""
http3_protocol_options = "{quic_protocol_options:{max_concurrent_streams:1}}"
parsed_json, _ = quic_test_server_fixture.runNighthawkClient([
"--protocol http3",
quic_test_server_fixture.getTestServerRootUri(),
"--rps",
"100",
"--duration",
"100",
"--termination-predicate",
"benchmark.http_2xx:99",
"--max-active-requests",
"10",
"--max-pending-requests",
"10",
"--burst-size",
"10",
# Envoy doesn't support disabling certificate verification on Quic
# connections, so the host in our requests has to match the hostname in
# the leaf certificate.
"--request-header",
"Host:www.lyft.com",
"--http3-protocol-options %s" % http3_protocol_options
])
counters = quic_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 100)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http3_total", 10)
def test_h3_quic_with_custom_upstream_bind_configuration(quic_test_server_fixture):
"""Test http3 quic with a custom upstream bind configuration.
Runs the CLI configured to use HTTP/3 Quic against our test server, and sanity
checks statistics from both client and server. Sets custom address to bind
upstream requests to.
"""
address = quic_test_server_fixture.server_ip
upstream_bind_config = f"{{source_address:{{address:\"{address}\",port_value:0}}}}"
parsed_json, _ = quic_test_server_fixture.runNighthawkClient([
"--protocol http3",
quic_test_server_fixture.getTestServerRootUri(),
"--rps",
"100",
"--duration",
"100",
"--termination-predicate",
"benchmark.http_2xx:24",
"--max-active-requests",
"1",
# Envoy doesn't support disabling certificate verification on Quic
# connections, so the host in our requests has to match the hostname in
# the leaf certificate.
"--request-header",
"Host:www.lyft.com",
"--upstream-bind-config %s" % upstream_bind_config
])
counters = quic_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
asserts.assertCounterEqual(counters, "upstream_cx_http3_total", 1)
def _do_tls_configuration_test(https_test_server_fixture, cli_parameter, use_h2, ciphers):
"""Test with different ciphers.
For a given choice of (--tls-context, --transport-socket) x (H1, H2),
run a series of traffic tests with different ciphers.
Args:
https_test_server_fixture: pytest.fixture that controls a test server and client
cli_parameter: string, --tls-context or --transport-socket
use_h2: boolean, whether to pass --h2
ciphers: list[string], list of ciphers to use with TLS
"""
if cli_parameter == "--tls-context":
json_template = "{common_tls_context:{tls_params:{cipher_suites:[\"-ALL:%s\"]}}}"
else:
json_template = "%s%s%s" % (
"{name:\"envoy.transport_sockets.tls\",typed_config:{",
"\"@type\":\"type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\",",
"common_tls_context:{tls_params:{cipher_suites:[\"-ALL:%s\"]}}}}")
for cipher in ciphers:
parsed_json, _ = https_test_server_fixture.runNighthawkClient(
(["--protocol", "http2"] if use_h2 else []) + [
"--duration", "10", "--termination-predicate", "benchmark.http_2xx:0", cli_parameter,
json_template % cipher,
https_test_server_fixture.getTestServerRootUri()
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterGreaterEqual(counters, "ssl.ciphers.%s" % cipher, 1)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_h1_tls_context_configuration_rsa(https_test_server_fixture):
"""Test that specifying RSA compatible tls cipher suites works with the h1 pool."""
_do_tls_configuration_test(https_test_server_fixture,
"--tls-context",
use_h2=False,
ciphers=["ECDHE-RSA-AES256-GCM-SHA384"])
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_h1_transport_socket_configuration_rsa(https_test_server_fixture):
"""Test that specifying RSA compatible tls cipher suites via transport socket works with the h1 pool."""
_do_tls_configuration_test(https_test_server_fixture,
"--transport-socket",
use_h2=False,
ciphers=["ECDHE-RSA-AES256-GCM-SHA384"])
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_h2_tls_context_configuration_rsa(https_test_server_fixture):
"""Test that specifying RSA compatible tls cipher suites works with the h2 pool."""
_do_tls_configuration_test(https_test_server_fixture,
"--tls-context",
use_h2=True,
ciphers=["ECDHE-RSA-AES256-GCM-SHA384"])
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_h2_transport_socket_configuration_rsa(https_test_server_fixture):
"""Test that specifying RSA compatible tls cipher suites via transport socket works with the h2 pool."""
_do_tls_configuration_test(https_test_server_fixture,
"--transport-socket",
use_h2=True,
ciphers=["ECDHE-RSA-AES256-GCM-SHA384"])
@pytest.mark.parametrize(
'server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin_dsa.yaml"])
def test_https_h1_tls_context_configuration_dsa(https_test_server_fixture):
"""Test that specifying DSA comptible tls cipher suites works with the h1 pool."""
_do_tls_configuration_test(https_test_server_fixture,
"--tls-context",
use_h2=False,
ciphers=["ECDHE-ECDSA-AES256-GCM-SHA384"])
@pytest.mark.parametrize(
'server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin_dsa.yaml"])
def test_https_h1_transport_socket_configuration_dsa(https_test_server_fixture):
"""Test that specifying DSA comptible tls cipher suites via transport socket works with the h1 pool."""
_do_tls_configuration_test(https_test_server_fixture,
"--transport-socket",
use_h2=False,
ciphers=["ECDHE-ECDSA-AES256-GCM-SHA384"])
@pytest.mark.parametrize(
'server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin_dsa.yaml"])
def test_https_h2_tls_context_configuration_dsa(https_test_server_fixture):
"""Test that specifying DSA comptible tls cipher suites works with the h2 pool."""
_do_tls_configuration_test(https_test_server_fixture,
"--tls-context",
use_h2=True,
ciphers=["ECDHE-ECDSA-AES256-GCM-SHA384"])
@pytest.mark.parametrize(
'server_config', ["nighthawk/test/integration/configurations/nighthawk_https_origin_dsa.yaml"])
def test_https_h2_transport_socket_configuration_dsa(https_test_server_fixture):
"""Test that specifying DSA comptible tls cipher suites via transport socket works with the h2 pool."""
_do_tls_configuration_test(https_test_server_fixture,
"--transport-socket",
use_h2=True,
ciphers=["ECDHE-ECDSA-AES256-GCM-SHA384"])
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_prefetching(https_test_server_fixture):
"""Test we prefetch connections.
We test for 1 second at 1 rps, which should
result in 1 connection max without prefetching. However, we specify 50 connections
and the prefetching flag, so we ought to see 50 http1 connections created.
"""
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
"--duration 1", "--rps 1", "--prefetch-connections", "--connections 50",
https_test_server_fixture.getTestServerRootUri()
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 50)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_https_log_verbosity(https_test_server_fixture):
"""Test that the specified log verbosity level is respected.
This tests for a sentinel we know is only right when the level
is set to 'trace'.
"""
# TODO(oschaaf): this is kind of fragile. Can we improve?
trace_level_sentinel = "nighthawk_service_zone"
_, logs = https_test_server_fixture.runNighthawkClient(
["--duration 1", "--rps 1", "-v debug",
https_test_server_fixture.getTestServerRootUri()])
asserts.assertNotIn(trace_level_sentinel, logs)
_, logs = https_test_server_fixture.runNighthawkClient(
["--duration 1", "--rps 1", "-v trace",
https_test_server_fixture.getTestServerRootUri()])
asserts.assertIn(trace_level_sentinel, logs)
def test_dotted_output_format(http_test_server_fixture):
"""Test that we get the dotted string output format when requested, and ensure we get latency percentiles."""
output, _ = http_test_server_fixture.runNighthawkClient([
"--duration 1", "--rps 10", "--output-format dotted",
http_test_server_fixture.getTestServerRootUri()
],
as_json=False)
asserts.assertIn("global.benchmark_http_client.request_to_response.permilles-500.microseconds",
output)
# TODO(oschaaf): add percentiles to the gold testing in the C++ output formatter
# once the fortio formatter has landed (https://github.com/envoyproxy/nighthawk/pull/168)
def test_cli_output_format(http_test_server_fixture):
"""Test that we observe latency percentiles with CLI output."""
output, _ = http_test_server_fixture.runNighthawkClient(
["--duration 1", "--rps 10",
http_test_server_fixture.getTestServerRootUri()], as_json=False)
asserts.assertIn("Initiation to completion", output)
asserts.assertIn("Percentile", output)
@pytest.mark.parametrize(
'filter_configs',
["{}", "{static_delay: \"0.01s\"}", "{emit_previous_request_delta_in_response_header: \"aa\"}"])
def test_request_body_gets_transmitted(http_test_server_fixture, filter_configs):
"""Test request body transmission handling code for our extensions.
Ensure that the number of bytes we request for the request body gets reflected in the upstream
connection transmitted bytes counter for h1 and h2.
"""
def check_upload_expectations(fixture, parsed_json, expected_transmitted_bytes,
expected_received_bytes):
counters = fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total",
expected_transmitted_bytes)
server_stats = fixture.getTestServerStatisticsJson()
# Server side expectations start failing with larger upload sizes
asserts.assertGreaterEqual(
fixture.getServerStatFromJson(server_stats,
"http.ingress_http.downstream_cx_rx_bytes_total"),
expected_received_bytes)
# TODO(#531): The dynamic-delay extension hangs unless we lower the request entity body size.
upload_bytes = 1024 * 1024 if "static_delay" in filter_configs else 1024 * 1024 * 3
requests = 10
args = [
http_test_server_fixture.getTestServerRootUri(), "--duration", "100", "--rps", "100",
"--request-body-size",
str(upload_bytes), "--termination-predicate",
"benchmark.http_2xx:%s" % str(requests), "--connections", "1", "--request-method", "POST",
"--max-active-requests", "1", "--request-header",
"x-nighthawk-dynamic-delay-config:%s" % filter_configs
]
# Test we transmit the expected amount of bytes with H1
parsed_json, _ = http_test_server_fixture.runNighthawkClient(args)
check_upload_expectations(http_test_server_fixture, parsed_json, upload_bytes * requests,
upload_bytes * requests)
# Test we transmit the expected amount of bytes with H2
args.append("--h2")
parsed_json, _ = http_test_server_fixture.runNighthawkClient(args)
# We didn't reset the server in between, so our expectation for received bytes on the server side is raised.
check_upload_expectations(http_test_server_fixture, parsed_json, upload_bytes * requests,
upload_bytes * requests * 2)
def test_http_h1_termination_predicate(http_test_server_fixture):
"""Test with a termination predicate.
Should result in successful execution, with 10 successful requests.
We would expect 25 based on rps and duration.
"""
parsed_json, _ = http_test_server_fixture.runNighthawkClient([
http_test_server_fixture.getTestServerRootUri(), "--duration", "5", "--rps", "500",
"--connections", "1", "--termination-predicate", "benchmark.http_2xx:9"
])
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 10)
def test_http_h1_failure_predicate(http_test_server_fixture):
"""Test with a failure predicate.
Should result in failing execution, with 10 successfull requests.
"""
parsed_json, _ = http_test_server_fixture.runNighthawkClient([
http_test_server_fixture.getTestServerRootUri(), "--duration", "5", "--rps", "500",
"--connections", "1", "--failure-predicate", "benchmark.http_2xx:0"
],
expect_failure=True)
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 1)
def test_bad_arg_error_messages(http_test_server_fixture):
"""Test arguments that pass proto validation, but are found to be no good nonetheless, result in reasonable error messages."""
_, err = http_test_server_fixture.runNighthawkClient(
[http_test_server_fixture.getTestServerRootUri(), "--termination-predicate ", "a:a"],
expect_failure=True,
as_json=False)
assert "Bad argument: Termination predicate 'a:a' has an out of range threshold." in err
@pytest.mark.parametrize(
'server_config',
["nighthawk/test/integration/configurations/nighthawk_5_listeners_http_origin.yaml"])
def test_multiple_listener_on_backend(http_test_server_fixture):
"""Test that we can load-test multiple listeners on a single backend.
Runs the CLI configured to use plain HTTP/1 against a single test servers, and sanity
checks statistics from both client and server.
"""
nighthawk_client_args = [
"--multi-target-path", "/", "--duration", "100", "--termination-predicate",
"benchmark.http_2xx:24"
]
for uri in http_test_server_fixture.getAllTestServerRootUris():
nighthawk_client_args.append("--multi-target-endpoint")
nighthawk_client_args.append(uri.replace("http://", "").replace("/", ""))
parsed_json, stderr = http_test_server_fixture.runNighthawkClient(nighthawk_client_args)
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
# Assert that we at least have 1 connection per backend. It is possible that
# the # of upstream_cx > # of backend connections for H1 as new connections
# will spawn if the existing clients cannot keep up with the RPS.
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 3)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_total", 3)
asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_total", 3)
asserts.assertCounterGreater(counters, "upstream_cx_rx_bytes_total", 0)
asserts.assertCounterGreater(counters, "upstream_cx_tx_bytes_total", 0)
asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
asserts.assertCounterEqual(counters, "default.total_match_count", 5)
for parsed_server_json in http_test_server_fixture.getAllTestServerStatisticsJsons():
single_2xx = http_test_server_fixture.getServerStatFromJson(
parsed_server_json, "http.ingress_http.downstream_rq_2xx")
# Confirm that each backend receives some traffic
asserts.assertGreaterEqual(single_2xx, 1)
def test_multiple_backends_http_h1(multi_http_test_server_fixture):
"""Test that we can load-test multiple backends on http.
Runs the CLI configured to use plain HTTP/1 against multiple test servers, and sanity
checks statistics from both client and server.
"""
nighthawk_client_args = [
"--multi-target-path", "/", "--duration", "100", "--termination-predicate",
"benchmark.http_2xx:24"
]
for uri in multi_http_test_server_fixture.getAllTestServerRootUris():
nighthawk_client_args.append("--multi-target-endpoint")
nighthawk_client_args.append(uri.replace("http://", "").replace("/", ""))
parsed_json, stderr = multi_http_test_server_fixture.runNighthawkClient(nighthawk_client_args)
counters = multi_http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
# Assert that we at least have 1 connection per backend. It is possible that
# the # of upstream_cx > # of backend connections for H1 as new connections
# will spawn if the existing clients cannot keep up with the RPS.
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 3)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_total", 3)
asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_total", 3)
asserts.assertCounterGreater(counters, "upstream_cx_rx_bytes_total", 0)
asserts.assertCounterGreater(counters, "upstream_cx_tx_bytes_total", 0)
asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
asserts.assertCounterEqual(counters, "default.total_match_count", 3)
for parsed_server_json in multi_http_test_server_fixture.getAllTestServerStatisticsJsons():
single_2xx = multi_http_test_server_fixture.getServerStatFromJson(
parsed_server_json, "http.ingress_http.downstream_rq_2xx")
# Confirm that each backend receives some traffic
asserts.assertGreaterEqual(single_2xx, 1)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_multiple_backends_https_h1(multi_https_test_server_fixture):
"""Test that we can load-test multiple backends on https.
Runs the CLI configured to use HTTP/1 with TLS against multiple test servers, and sanity
checks statistics from both client and server.
"""
nighthawk_client_args = [
"--multi-target-use-https", "--multi-target-path", "/", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:24"
]
for uri in multi_https_test_server_fixture.getAllTestServerRootUris():
nighthawk_client_args.append("--multi-target-endpoint")
nighthawk_client_args.append(uri.replace("https://", "").replace("/", ""))
parsed_json, stderr = multi_https_test_server_fixture.runNighthawkClient(nighthawk_client_args)
counters = multi_https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
asserts.assertCounterGreater(counters, "upstream_cx_rx_bytes_total", 0)
# Assert that we at least have 1 connection per backend. It is possible that
# the # of upstream_cx > # of backend connections for H1 as new connections
# will spawn if the existing clients cannot keep up with the RPS.
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 3)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_total", 3)
asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_total", 3)
asserts.assertCounterGreater(counters, "upstream_cx_tx_bytes_total", 0)
asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
asserts.assertCounterEqual(counters, "default.total_match_count", 3)
for parsed_server_json in multi_https_test_server_fixture.getAllTestServerStatisticsJsons():
single_2xx = multi_https_test_server_fixture.getServerStatFromJson(
parsed_server_json, "http.ingress_http.downstream_rq_2xx")
# Confirm that each backend receives some traffic
asserts.assertGreaterEqual(single_2xx, 1)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/sni_origin.yaml"])
def test_https_h1_sni(https_test_server_fixture):
"""Test that SNI indication works on https/h1."""
# Verify success when we set the right host
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:2", "--request-header", "host: sni.com"
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1)
# It is possible that the # of upstream_cx > # of backend connections for H1
# as new connections will spawn if the existing clients cannot keep up with the RPS.
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1)
asserts.assertCounterGreaterEqual(counters, "ssl.handshake", 1)
# Verify failure when we set no host (will get plain http)
parsed_json, _ = https_test_server_fixture.runNighthawkClient(
[https_test_server_fixture.getTestServerRootUri(), "--rps", "20", "--duration", "100"],
expect_failure=True)
# Verify success when we use plain http and don't request the sni host
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
https_test_server_fixture.getTestServerRootUri().replace("https://", "http://"), "--rps",
"100", "--duration", "20", "--termination-predicate", "benchmark.http_2xx:2"
],
expect_failure=False)
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1)
# It is possible that the # of upstream_cx > # of backend connections for H1
# as new connections will spawn if the existing clients cannot keep up with the RPS.
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1)
asserts.assertNotIn("ssl.handshake", counters)
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/sni_origin.yaml"])
def test_https_h2_sni(https_test_server_fixture):
"""Tests that SNI indication works on https/h1."""
# Verify success when we set the right host
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:2", "--request-header", ":authority: sni.com",
"--h2"
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 1)
asserts.assertCounterEqual(counters, "ssl.handshake", 1)
# Verify success when we set the right host
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100",
"--termination-predicate", "benchmark.http_2xx:2", "--request-header", "host: sni.com", "--h2"
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 1)
asserts.assertCounterEqual(counters, "ssl.handshake", 1)
# Verify failure when we set no host (will get plain http)
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--h2"
],
expect_failure=True)
# Verify failure when we provide both host and :authority: (will get plain http)
parsed_json, _ = https_test_server_fixture.runNighthawkClient([
https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--h2",
"--request-header", "host: sni.com", "--request-header", ":authority: sni.com"
],
expect_failure=True)
@pytest.fixture(scope="function", params=[1, 25])
def qps_parameterization_fixture(request):
"""Yield queries per second values to iterate test parameterization on."""
param = request.param
yield param
@pytest.fixture(scope="function", params=[5, 10])
def duration_parameterization_fixture(request):
"""Yield duration values to iterate test parameterization on."""
param = request.param
yield param
@pytest.mark.skipif(utility.isSanitizerRun(), reason="Unstable in sanitizer runs.")
def test_http_request_release_timing(http_test_server_fixture, qps_parameterization_fixture,
duration_parameterization_fixture):
"""Test latency-sample-, query- and reply- counts in various configurations."""
for concurrency in [1, 2]:
parsed_json, _ = http_test_server_fixture.runNighthawkClient([
http_test_server_fixture.getTestServerRootUri(), "--duration",
str(duration_parameterization_fixture), "--rps",
str(qps_parameterization_fixture), "--concurrency",
str(concurrency)
])
global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
parsed_json)
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
global_result = http_test_server_fixture.getGlobalResults(parsed_json)
actual_duration = utility.get_execution_duration_from_global_result_json(global_result)
# Ensure Nighthawk managed to execute for at least some time.
assert actual_duration >= 1
# The actual duration is a float, flooring if here allows us to use
# the GreaterEqual matchers below.
total_requests = qps_parameterization_fixture * concurrency * math.floor(actual_duration)
asserts.assertGreaterEqual(
int(global_histograms["benchmark_http_client.request_to_response"]["count"]),
total_requests)
asserts.assertGreaterEqual(
int(global_histograms["benchmark_http_client.queue_to_connect"]["count"]), total_requests)
asserts.assertGreaterEqual(int(global_histograms["benchmark_http_client.latency_2xx"]["count"]),
total_requests)
asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", (total_requests))
# Give system resources some time to recover after the last execution.
time.sleep(2)
def _send_sigterm(process):
# Sleep for a while, under tsan the client needs a lot of time
# to start up. 10 seconds has been determined to work through
# emperical observation.
time.sleep(10)
process.terminate()
def test_cancellation_with_infinite_duration(http_test_server_fixture):
"""Test that we can use signals to cancel execution."""
args = [
http_test_server_fixture.nighthawk_client_path, "--concurrency", "2",
http_test_server_fixture.getTestServerRootUri(), "--no-duration", "--output-format", "json"
]
client_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
Thread(target=(lambda: _send_sigterm(client_process))).start()
stdout, stderr = client_process.communicate()
client_process.wait()
output = stdout.decode('utf-8')
asserts.assertEqual(client_process.returncode, 0)
parsed_json = json.loads(output)
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterEqual(counters, "graceful_stop_requested", 2)
asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1)
@pytest.mark.parametrize('server_config', [
"nighthawk/test/integration/configurations/nighthawk_http_origin.yaml",
"nighthawk/test/integration/configurations/nighthawk_track_timings.yaml"
])
def test_http_h1_response_header_latency_tracking(http_test_server_fixture, server_config):
"""Test emission and tracking of response header latencies.
Run the CLI configured to track latencies delivered by response header from the test-server.
Ensure that the origin_latency_statistic histogram receives the correct number of inputs.
"""
parsed_json, _ = http_test_server_fixture.runNighthawkClient([
http_test_server_fixture.getTestServerRootUri(), "--connections", "1", "--rps", "100",
"--duration", "100", "--termination-predicate", "benchmark.http_2xx:99",
"--latency-response-header-name", "x-origin-request-receipt-delta"
])
global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(parsed_json)
asserts.assertEqual(int(global_histograms["benchmark_http_client.latency_2xx"]["count"]), 100)
# Verify behavior is correct both with and without the timing filter enabled.
expected_histogram_count = 99 if "nighthawk_track_timings.yaml" in server_config else 0
asserts.assertEqual(
int(global_histograms["benchmark_http_client.origin_latency_statistic"]["count"]),
expected_histogram_count)
def _run_client_with_args(args):
return utility.run_binary_with_args("nighthawk_client", args)
def test_client_help():
"""Test that passing --help behaves as expected."""
(exit_code, output) = _run_client_with_args("--help")
asserts.assertEqual(exit_code, 0)
asserts.assertIn("USAGE", output)
def test_client_bad_arg():
"""Test that passing bad arguments behaves as expected."""
(exit_code, output) = _run_client_with_args("127.0.0.1 --foo")
asserts.assertEqual(exit_code, 1)
asserts.assertIn("PARSE ERROR: Argument: --foo", output)
def test_client_cli_bad_uri(http_test_server_fixture):
"""Test that passing a bad URI to the client results in nice behavior."""
_, err = http_test_server_fixture.runNighthawkClient(["http://http://foo"],
expect_failure=True,
as_json=False)
assert "Invalid target URI" in err
@pytest.mark.parametrize('server_config',
["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"])
def test_drain(https_test_server_fixture):
"""Test that the pool drain timeout is effective, and we terminate in a timely fashion.
Sets up the test server to delay replies 100 seconds. Our execution will only last 30 seconds, so we
expect to observe no replies. Termination should be cut short by the drain timeout, which means
that we should have results in approximately execution duration + drain timeout = 35 seconds.
(the pool drain timeout is hard coded to 5 seconds as of writing this).
If drain timeout is reached, a message will be logged to the user.
"""
parsed_json, logs = https_test_server_fixture.runNighthawkClient([
https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "20",
"--request-header", "x-nighthawk-test-server-config: {static_delay: \"100s\"}"
])
counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1)
asserts.assertNotIn("benchmark.http_2xx", counters)
asserts.assertIn("Wait for the connection pool drain timed out, proceeding to hard shutdown",
logs)
| {
"content_hash": "4d8b411cf1fad2074b6079450cd3025c",
"timestamp": "",
"source": "github",
"line_count": 990,
"max_line_length": 128,
"avg_line_length": 50.73232323232323,
"alnum_prop": 0.7082926829268292,
"repo_name": "envoyproxy/nighthawk",
"id": "d061bb502cd75193f3ffd4c55001646ff4bad665",
"size": "50225",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/integration/test_integration_basics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1310041"
},
{
"name": "HTML",
"bytes": "5944"
},
{
"name": "Python",
"bytes": "190310"
},
{
"name": "Shell",
"bytes": "37671"
},
{
"name": "Starlark",
"bytes": "103582"
}
],
"symlink_target": ""
} |
'''
Created on Jul 9, 2013
@author: Chunwei Yan @ pkusz
@mail: yanchunwei@outlook.com
'''
from __future__ import division
from random_forest_model import *
| {
"content_hash": "9c66d3098ed8a936217effe14a766ee8",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 33,
"avg_line_length": 16,
"alnum_prop": 0.70625,
"repo_name": "Superjom/sematic",
"id": "35e81e27e380e9bbd70cf10cea0e113a526711a9",
"size": "184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gbm/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44805"
},
{
"name": "Shell",
"bytes": "19179"
}
],
"symlink_target": ""
} |
from tg import tmpl_context as c
from mock import patch
from tg import config
from allura.lib import helpers as h
from allura.tests import decorators as td
from allura import model as M
from alluratest.controller import TestRestApiBase
from forgetracker import model as TM
class TestTrackerApiBase(TestRestApiBase):
def setup_method(self, method):
super().setup_method(method)
self.setup_with_tools()
@td.with_tool('test', 'Tickets', 'bugs',
TicketMonitoringEmail='test@localhost',
TicketMonitoringType='AllTicketChanges')
def setup_with_tools(self):
h.set_context('test', 'bugs', neighborhood='Projects')
self.tracker_globals = c.app.globals
def create_ticket(self, summary=None, status=None):
return self.api_post(
'/rest/p/test/bugs/new',
wrap_args='ticket_form',
params=dict(
summary=summary or 'test new ticket',
status=self.tracker_globals.open_status_names.split()[0],
labels='',
description='',
assigned_to='',
**{'custom_fields._milestone': ''}),
status=status)
class TestRestNewTicket(TestTrackerApiBase):
def test_new_ticket(self):
summary = 'test new ticket'
ticket_view = self.api_post(
'/rest/p/test/bugs/new',
wrap_args='ticket_form',
params=dict(
summary=summary,
status=self.tracker_globals.open_status_names.split()[0],
labels='foo,bar',
description='descr',
assigned_to='',
**{'custom_fields._milestone': ''}
))
json = ticket_view.json['ticket']
assert json['status'] == 'open', json
assert json['summary'] == 'test new ticket', json
assert json['reported_by'] == 'test-admin'
assert json['labels'] == ['foo', 'bar'], json
assert json['description'] == 'descr', json
assert json['private'] is False, json
def test_invalid_ticket(self):
self.app.get('/rest/p/test/bugs/2', status=404)
def test_create_limit(self):
self.create_ticket(summary='First ticket')
# Set rate limit to unlimit
with h.push_config(config, **{'forgetracker.rate_limits': '{}'}):
summary = 'Second ticket'
self.create_ticket(summary=summary)
t = TM.Ticket.query.get(summary=summary)
assert t is not None
# Set rate limit to 1 in first hour of project
with h.push_config(config, **{'forgetracker.rate_limits': '{"3600": 1}'}):
summary = 'Third ticket'
self.create_ticket(summary=summary, status=429)
t = TM.Ticket.query.get(summary=summary)
assert t is None
class TestRestUpdateTicket(TestTrackerApiBase):
def setup_method(self, method):
super().setup_method(method)
ticket_view = self.create_ticket()
self.ticket_args = ticket_view.json['ticket']
def test_update_ticket(self):
args = dict(self.ticket_args, summary='test update ticket', labels='',
assigned_to=self.ticket_args['assigned_to_id'] or '')
for bad_key in ('ticket_num', 'assigned_to_id', 'created_date',
'reported_by', 'reported_by_id', '_id', 'votes_up', 'votes_down', 'discussion_thread'):
del args[bad_key]
args['private'] = str(args['private'])
args['discussion_disabled'] = str(args['discussion_disabled'])
ticket_view = self.api_post(
'/rest/p/test/bugs/1/save', wrap_args='ticket_form', params=h.encode_keys(args))
assert ticket_view.status_int == 200, ticket_view.showbrowser()
json = ticket_view.json['ticket']
assert int(json['ticket_num']) == 1
assert json['summary'] == 'test update ticket', json
class TestRestIndex(TestTrackerApiBase):
def setup_method(self, method):
super().setup_method(method)
self.create_ticket()
def test_ticket_index(self):
tickets = self.api_get('/rest/p/test/bugs/')
assert len(tickets.json['tickets']) == 1, tickets.json
assert (tickets.json['tickets'][0]
== dict(ticket_num=1, summary='test new ticket')), tickets.json['tickets'][0]
assert tickets.json['tracker_config']['options']['mount_point'] == 'bugs'
assert tickets.json['tracker_config']['options']['TicketMonitoringType'] == 'AllTicketChanges'
assert tickets.json['tracker_config']['options']['EnableVoting']
assert tickets.json['tracker_config']['options']['TicketMonitoringEmail'] == 'test@localhost'
assert tickets.json['tracker_config']['options']['mount_label'] == 'Tickets'
assert tickets.json['saved_bins'][0]['sort'] == 'mod_date_dt desc'
assert tickets.json['saved_bins'][0]['terms'] == '!status:closed && !status:wont-fix'
assert tickets.json['saved_bins'][0]['summary'] == 'Changes'
assert len(tickets.json['saved_bins'][0]) == 4
assert tickets.json['milestones'][0]['name'] == '1.0'
assert tickets.json['milestones'][1]['name'] == '2.0'
def test_ticket_index_noauth(self):
tickets = self.api_get('/rest/p/test/bugs', user='*anonymous')
assert 'TicketMonitoringEmail' not in tickets.json[
'tracker_config']['options']
# make sure it didn't get removed from the db too
ticket_config = M.AppConfig.query.get(
project_id=c.project._id, tool_name='tickets')
assert (ticket_config.options.get('TicketMonitoringEmail') ==
'test@localhost')
@td.with_tool('test', 'Tickets', 'dummy')
def test_move_ticket_redirect(self):
p = M.Project.query.get(shortname='test')
dummy_tracker = p.app_instance('dummy')
self.app.post(
'/p/test/bugs/1/move',
params={'tracker': str(dummy_tracker.config._id)}).follow()
ticket = self.api_get('/rest/p/test/bugs/1/')
assert ticket.request.path == '/rest/p/test/dummy/1/'
class TestRestDiscussion(TestTrackerApiBase):
def setup_method(self, method):
super().setup_method(method)
ticket_view = self.create_ticket()
self.ticket_args = ticket_view.json['ticket']
def test_post(self):
r = self.api_get('/rest/p/test/bugs/1/')
thread_id = r.json['ticket']['discussion_thread']['_id']
post = self.api_post(
'/rest/p/test/bugs/_discuss/thread/%s/new' % thread_id,
text='This is a comment', wrap_args=None)
thread = self.api_get('/rest/p/test/bugs/_discuss/thread/%s/' % thread_id)
assert len(thread.json['thread']['posts']) == 1, thread.json
assert post.json['post']['text'] == 'This is a comment', post.json
reply = self.api_post(
'/rest/p/test/bugs/_discuss/thread/{}/{}/reply'.format(thread.json['thread']
['_id'], post.json['post']['slug']),
text='This is a reply', wrap_args=None)
assert reply.json['post']['text'] == 'This is a reply', reply.json
thread = self.api_get('/rest/p/test/bugs/_discuss/thread/%s/' % thread_id)
assert len(thread.json['thread']['posts']) == 2, thread.json
class TestRestSearch(TestTrackerApiBase):
@property
def ticket(self):
return TM.Ticket(
ticket_num=5,
summary='our test ticket',
status='open',
labels=['tiny', 'minor'])
@patch('forgetracker.model.Ticket.paged_search')
def test_no_criteria(self, paged_search):
paged_search.return_value = dict(tickets=[self.ticket])
r = self.api_get('/rest/p/test/bugs/search')
assert r.status_int == 200
assert r.json['tickets'][0]['summary'] == 'our test ticket'
assert r.json['tickets'][0]['ticket_num'] == 5
assert r.json['tickets'][0]['status'] == 'open'
assert r.json['tickets'][0]['labels'] == ['tiny', 'minor']
assert 'description' not in r.json
assert 'discussion_thread' not in r.json
@patch('forgetracker.model.Ticket.paged_search')
def test_some_criteria(self, paged_search):
q = 'labels:testing && status:open'
paged_search.return_value = dict(
tickets=[self.ticket],
sort='status',
limit=2,
count=1,
page=0,
q=q,
)
r = self.api_get('/rest/p/test/bugs/search',
q=q, sort='status', limit='2')
assert r.status_int == 200
assert r.json['limit'] == 2
assert r.json['q'] == q
assert r.json['sort'] == 'status'
assert r.json['count'] == 1
assert r.json['page'] == 0
assert r.json['tickets'][0]['summary'] == 'our test ticket'
assert r.json['tickets'][0]['ticket_num'] == 5
assert r.json['tickets'][0]['status'] == 'open'
assert r.json['tickets'][0]['labels'] == ['tiny', 'minor']
assert 'description' not in r.json
assert 'discussion_thread' not in r.json
class TestRestHasAccess(TestTrackerApiBase):
def test_has_access_no_params(self):
self.api_get('/rest/p/test/bugs/has_access', status=404)
self.api_get('/rest/p/test/bugs/has_access?user=root', status=404)
self.api_get('/rest/p/test/bugs/has_access?perm=read', status=404)
def test_has_access_unknown_params(self):
"""Unknown user and/or permission always False for has_access API"""
r = self.api_get(
'/rest/p/test/bugs/has_access?user=babadook&perm=read',
user='root')
assert r.status_int == 200
assert r.json['result'] is False
r = self.api_get(
'/rest/p/test/bugs/has_access?user=test-user&perm=jump',
user='root')
assert r.status_int == 200
assert r.json['result'] is False
def test_has_access_not_admin(self):
"""
User which has no 'admin' permission on neighborhood can't use
has_access API
"""
self.api_get(
'/rest/p/test/bugs/has_access?user=test-admin&perm=admin',
user='test-user',
status=403)
def test_has_access(self):
r = self.api_get(
'/rest/p/test/bugs/has_access?user=test-admin&perm=delete',
user='root')
assert r.status_int == 200
assert r.json['result'] is True
r = self.api_get(
'/rest/p/test/bugs/has_access?user=test-user&perm=delete',
user='root')
assert r.status_int == 200
assert r.json['result'] is False
| {
"content_hash": "8b8d0a23cd45765548d1b11d337bb0ea",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 111,
"avg_line_length": 40.72075471698113,
"alnum_prop": 0.5793716986377537,
"repo_name": "apache/allura",
"id": "85cfc77eb36568f5009d7aac58af29d593550a03",
"size": "11661",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ForgeTracker/forgetracker/tests/functional/test_rest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6142"
},
{
"name": "CSS",
"bytes": "181457"
},
{
"name": "Dockerfile",
"bytes": "4748"
},
{
"name": "HTML",
"bytes": "867332"
},
{
"name": "JavaScript",
"bytes": "1191836"
},
{
"name": "Makefile",
"bytes": "6248"
},
{
"name": "Python",
"bytes": "4499987"
},
{
"name": "RAML",
"bytes": "27600"
},
{
"name": "Roff",
"bytes": "41"
},
{
"name": "Ruby",
"bytes": "1280"
},
{
"name": "SCSS",
"bytes": "27742"
},
{
"name": "Shell",
"bytes": "131207"
},
{
"name": "XSLT",
"bytes": "3357"
}
],
"symlink_target": ""
} |
import cv2
import glob
import math
import os
import sys
import tempfile
import numpy as np
from PIL import Image
from scipy import ndimage
from cnn import get_num, get_name
BUF = 5
def gap_left(hist, x, th, K=10):
"""
Checks if this x-coordinate marks the start of a new word/block.
:param hist: distribution of pixels.
:param x: x-coordinate.
:param th: threshold value.
:param K: number of columns of empty pixels to consider as new word/block.
:return: whether this x-coordinate marks the start of a new word/block.
"""
gap = hist[x+1] > th
for i in range(K):
if x - i < 0:
break
gap = gap and (i > x or hist[x-i] <= th)
return gap
def gap_right(hist, x, th, W, K=10):
"""
Checks if this x-coordinate marks the end of a word/block.
:param hist: distribution of pixels.
:param x: x-coordinate.
:param th: threshold value.
:param K: number of columns of empty pixels to consider as new word/block.
:return: whether this x-coordinate marks the end of a word/block.
"""
gap = hist[x-1] > th
for i in range(K):
if x + i > W:
break
gap = gap and (x+i >= len(hist) or hist[x+i] <= th)
return gap
def straighten(img):
"""
Deskews the input image based on the largest contour.
:param img: input image.
:return: deskewed image.
"""
contours, _ = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
max_area = 0
max_contour = None
for contour in contours:
area = cv2.contourArea(contour)
if area > max_area:
max_area = area
max_contour = contour
rect = cv2.minAreaRect(max_contour)
angle = rect[2]
if angle < -45:
angle = (90 + angle)
# rotate the image to deskew it
(h, w) = img.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(threshed, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
return rotated
def sort_contours(cnts):
"""
Sorts the contours in left-to-right order (based on x coordinate).
:param cnts: list of contours.
:return: sorted contours.
"""
# construct the list of bounding boxes and sort them from top to bottom
bounding_boxes = [cv2.boundingRect(c) for c in cnts]
(cnts, bounding_boxes) = zip(*sorted(zip(cnts, bounding_boxes), key=lambda b: b[1][0]))
return (cnts, bounding_boxes)
def get_best_shift(img):
"""
Finds x and y units to shift the image by so it is centered.
:param img: input image.
:return: best x and y units to shift by.
"""
cy,cx = ndimage.measurements.center_of_mass(img)
rows,cols = img.shape
shiftx = np.round(cols/2.0-cx).astype(int)
shifty = np.round(rows/2.0-cy).astype(int)
return shiftx, shifty
def shift(img,sx,sy):
"""
Shifts the image by the given x and y units.
:param img: input image.
:param sx: x units to shift by.
:param sy: y units to shift by.
:return: shifted image.
"""
rows, cols = img.shape
M = np.float32([[1, 0, sx], [0, 1, sy]])
shifted = cv2.warpAffine(img, M, (cols, rows))
return shifted
def process_num(gray):
"""
Process an input image of a handwritten number in the same way the MNIST dataset was processed.
:param gray: the input grayscaled image.
:return: the processed image.
"""
gray = cv2.resize(gray, (28, 28))
# strip away empty rows and columns from all sides
while np.sum(gray[0]) == 0:
gray = gray[1:]
while np.sum(gray[:, 0]) == 0:
gray = np.delete(gray, 0, 1)
while np.sum(gray[-1]) == 0:
gray = gray[:-1]
while np.sum(gray[:, -1]) == 0:
gray = np.delete(gray, -1, 1)
# reshape image to be 20x20
rows, cols = gray.shape
if rows > cols:
factor = 20.0 / rows
rows = 20
cols = int(round(cols * factor))
else:
factor = 20.0 / cols
cols = 20
rows = int(round(rows * factor))
gray = cv2.resize(gray, (cols, rows))
# pad the image to be 28x28
colsPadding = (int(math.ceil((28 - cols) / 2.0)), int(math.floor((28 - cols) / 2.0)))
rowsPadding = (int(math.ceil((28 - rows) / 2.0)), int(math.floor((28 - rows) / 2.0)))
gray = np.pad(gray, (rowsPadding, colsPadding), 'constant')
# shift the image is the written number is centered
shiftx, shifty = get_best_shift(gray)
gray = shift(gray, shiftx, shifty)
return gray
def process_char(gray):
"""
Process an input image of a handwritten character in the same way the EMNIST dataset was processed.
:param gray: the input grayscaled image.
:return: the processed image.
"""
gray = cv2.resize(gray, (128, 128))
# thicken the lines in the image
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
gray = cv2.dilate(gray, kernel, iterations=2)
gray = cv2.erode(gray, kernel, iterations=1)
gray = cv2.GaussianBlur(gray, (0, 0), 1)
# strip away empty rows and columns from all sides
while np.sum(gray[0]) == 0:
gray = gray[1:]
while np.sum(gray[:, 0]) == 0:
gray = np.delete(gray, 0, 1)
while np.sum(gray[-1]) == 0:
gray = gray[:-1]
while np.sum(gray[:, -1]) == 0:
gray = np.delete(gray, -1, 1)
# shift the image is the written character is centered
shiftx, shifty = get_best_shift(gray)
gray = shift(gray, shiftx, shifty)
# reshape image to be 24x24 and pad with black pixels on all sides to get 28x28 output
rows, cols = gray.shape
pad = 2
if rows > cols:
length = rows
rowsPadding = (pad, pad)
colsPadding = (length - cols + pad, length - cols + pad)
else:
length = cols
colsPadding = (pad, pad)
rowsPadding = (length - rows + pad, length - rows + pad)
gray = np.pad(gray, (rowsPadding, colsPadding), 'constant')
gray = cv2.resize(gray, (28, 28))
return gray
def find_boxes(img):
"""
Detects box(square) shapes in the input image.
:param img: input image.
:return: image with outlines of boxes from the original image.
"""
kernel_length = np.array(img).shape[1] // 75
verticle_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_length))
hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_length, 1))
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
# Detect vertical and horizontal lines in the image
img_temp1 = cv2.erode(img, verticle_kernel, iterations=2)
verticle_lines_img = cv2.dilate(img_temp1, verticle_kernel, iterations=2)
img_temp2 = cv2.erode(img, hori_kernel, iterations=2)
horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=2)
# Weighting parameters, this will decide the quantity of an image to be added to make a new image.
alpha = 0.5
beta = 1.0 - alpha
# Add the vertical and horizontal lines images to get a third image as summation.
img_final_bin = cv2.addWeighted(verticle_lines_img, alpha, horizontal_lines_img, beta, 0.0)
img_final_bin = cv2.erode(~img_final_bin, kernel, iterations=2)
(_, img_final_bin) = cv2.threshold(img_final_bin, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
return img_final_bin
def extract_char(img, crop_dir, num=True):
"""
Takes a block of handwritten characters and prints the recognized output.
:param img: input image of block of handwritten characters.
:param num: if the characters are numeric.
:return: a list of indices of where the spaces occur in the input.
"""
img_final_bin = find_boxes(img)
# Find contours for image, which should detect all the boxes
contours, hierarchy = cv2.findContours(img_final_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
(contours, boundingBoxes) = sort_contours(contours)
# Find the convex hull of each contour to get the correct outline of the box/square
new_contours = []
for k in range(len(contours)):
new_contours.append(cv2.convexHull(contours[k], returnPoints=True))
box_num = 0
reached_x = 0
spaces = []
for c in new_contours:
x, y, w, h = cv2.boundingRect(c)
# check if box's edges are less than half the height of image (not likely to be a box with handwritten char)
if w < np.array(img).shape[0] // 2 or h < np.array(img).shape[0] // 2:
continue
# check if this is an inner contour who's area has already been covered by another contour
if x + w // 2 < reached_x:
continue
# check the contour has a square-like shape
if abs(w - h) < abs(min(0.5*w, 0.5*h)):
box_num += 1
cropped = img[y:y + h, x:x + w]
resized = cv2.resize(cropped, (28, 28))
# check if this is an empty box (space)
pts = cv2.findNonZero(resized)
if pts is None or len(pts) < 40:
spaces.append(box_num)
continue
if num:
new_img = process_num(cropped)
else:
new_img = process_char(cropped)
cv2.imwrite(crop_dir + '/' + str(box_num).zfill(2) + '.png', new_img)
reached_x = x + w
return spaces
if __name__ == '__main__':
input_filename = sys.argv[1]
# read input and covert to grayscale
img = cv2.imread(input_filename)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# invert image and thicken pixel lines
th, threshed = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
threshed = cv2.dilate(threshed, kernel, iterations=3)
threshed = cv2.erode(threshed, kernel, iterations=3)
# deskew image
rotated = straighten(threshed)
# find and draw the upper and lower boundary of each lines
hist = cv2.reduce(rotated, 1, cv2.REDUCE_AVG).reshape(-1)
th = 2
H, W = img.shape[:2]
uppers = [y for y in range(H-1) if hist[y]<=th and hist[y+1]>th]
lowers = [y for y in range(H-1) if hist[y]>th and hist[y+1]<=th]
for i in range(min(len(uppers), len(lowers))):
# isolate each line of text
line = threshed[uppers[i]-BUF:lowers[i]+BUF, 0:W].copy()
hist = cv2.reduce(line, 0, cv2.REDUCE_AVG).reshape(-1)
H, W = line.shape[:2]
lefts = [x for x in range(W-1) if gap_left(hist, x, th)]
rights = [x for x in range(W-1) if gap_right(hist, x, th, W)]
line = rotated[uppers[i]-BUF:lowers[i]+BUF, 0:W].copy()
# ensure first right coordinate is after first left coordinate
while lefts[0] > rights[0]:
rights.pop(0)
# go through each connected word/box
for j in range(min(len(lefts), len(rights))):
# look for connected boxes that contain handwritten characters
if rights[j] - lefts[j] > 5 * H:
word = line[0:H, lefts[j]-BUF:rights[j]+BUF].copy()
hist = cv2.reduce(word, 1, cv2.REDUCE_AVG).reshape(-1)
with tempfile.TemporaryDirectory() as tmp_dir:
with tempfile.TemporaryDirectory(dir=tmp_dir) as img_dir:
spaces = extract_char(word, img_dir, num=False)
get_name(tmp_dir, img_dir, spaces)
with tempfile.TemporaryDirectory() as tmp_dir:
with tempfile.TemporaryDirectory(dir=tmp_dir) as img_dir:
spaces = extract_char(word, img_dir, num=True)
get_num(tmp_dir, img_dir, spaces)
| {
"content_hash": "08dc20f838a5bd2c99946ba19492f766",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 116,
"avg_line_length": 34.84477611940299,
"alnum_prop": 0.6108112738798938,
"repo_name": "benjaminvialle/Markus",
"id": "5441296dbc61902d46a4c45b18408046ba73168b",
"size": "11673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/scanner/read_chars.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1222"
},
{
"name": "CSS",
"bytes": "82626"
},
{
"name": "HTML",
"bytes": "279440"
},
{
"name": "Haskell",
"bytes": "3679"
},
{
"name": "Java",
"bytes": "16402"
},
{
"name": "JavaScript",
"bytes": "417036"
},
{
"name": "Python",
"bytes": "148372"
},
{
"name": "Racket",
"bytes": "5729"
},
{
"name": "Ruby",
"bytes": "1811403"
},
{
"name": "Shell",
"bytes": "11471"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wmdadict', '0004_auto_20170817_0819'),
]
operations = [
migrations.AlterModelOptions(
name='dictionaryfield',
options={'ordering': ['label']},
),
migrations.AlterField(
model_name='dictionaryfield',
name='field_type',
field=models.CharField(choices=[('N', 'Numeric'), ('A', 'Alphanumeric'), ('S', 'List Single'), ('M', 'List Multiple'), ('D', 'Date'), ('F', 'Free Form'), ('U', 'Undecided')], max_length=1),
),
]
| {
"content_hash": "e3fae8edb6627b30cf361eb48a168626",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 201,
"avg_line_length": 30.59090909090909,
"alnum_prop": 0.5601783060921248,
"repo_name": "antmont/wmda-stuff",
"id": "36c76967a6ea72a0e0f479485f64231d5118335e",
"size": "746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wmdadict/migrations/0005_auto_20170817_0906.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "254"
},
{
"name": "HTML",
"bytes": "44178"
},
{
"name": "JavaScript",
"bytes": "659"
},
{
"name": "Python",
"bytes": "67721"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import csv
import itertools
import re
from mptt.forms import TreeNodeMultipleChoiceField
from django import forms
from django.conf import settings
from django.core.validators import URLValidator
from django.urls import reverse_lazy
COLOR_CHOICES = (
('aa1409', 'Dark red'),
('f44336', 'Red'),
('e91e63', 'Pink'),
('ff66ff', 'Fuschia'),
('9c27b0', 'Purple'),
('673ab7', 'Dark purple'),
('3f51b5', 'Indigo'),
('2196f3', 'Blue'),
('03a9f4', 'Light blue'),
('00bcd4', 'Cyan'),
('009688', 'Teal'),
('2f6a31', 'Dark green'),
('4caf50', 'Green'),
('8bc34a', 'Light green'),
('cddc39', 'Lime'),
('ffeb3b', 'Yellow'),
('ffc107', 'Amber'),
('ff9800', 'Orange'),
('ff5722', 'Dark orange'),
('795548', 'Brown'),
('c0c0c0', 'Light grey'),
('9e9e9e', 'Grey'),
('607d8b', 'Dark grey'),
('111111', 'Black'),
)
NUMERIC_EXPANSION_PATTERN = '\[((?:\d+[?:,-])+\d+)\]'
IP4_EXPANSION_PATTERN = '\[((?:[0-9]{1,3}[?:,-])+[0-9]{1,3})\]'
IP6_EXPANSION_PATTERN = '\[((?:[0-9a-f]{1,4}[?:,-])+[0-9a-f]{1,4})\]'
def parse_numeric_range(string, base=10):
"""
Expand a numeric range (continuous or not) into a decimal or
hexadecimal list, as specified by the base parameter
'0-3,5' => [0, 1, 2, 3, 5]
'2,8-b,d,f' => [2, 8, 9, a, b, d, f]
"""
values = list()
for dash_range in string.split(','):
try:
begin, end = dash_range.split('-')
except ValueError:
begin, end = dash_range, dash_range
begin, end = int(begin.strip(), base=base), int(end.strip(), base=base) + 1
values.extend(range(begin, end))
return list(set(values))
def expand_numeric_pattern(string):
"""
Expand a numeric pattern into a list of strings. Examples:
'ge-0/0/[0-3,5]' => ['ge-0/0/0', 'ge-0/0/1', 'ge-0/0/2', 'ge-0/0/3', 'ge-0/0/5']
'xe-0/[0,2-3]/[0-7]' => ['xe-0/0/0', 'xe-0/0/1', 'xe-0/0/2', ... 'xe-0/3/5', 'xe-0/3/6', 'xe-0/3/7']
"""
lead, pattern, remnant = re.split(NUMERIC_EXPANSION_PATTERN, string, maxsplit=1)
parsed_range = parse_numeric_range(pattern)
for i in parsed_range:
if re.search(NUMERIC_EXPANSION_PATTERN, remnant):
for string in expand_numeric_pattern(remnant):
yield "{}{}{}".format(lead, i, string)
else:
yield "{}{}{}".format(lead, i, remnant)
def expand_ipaddress_pattern(string, family):
"""
Expand an IP address pattern into a list of strings. Examples:
'192.0.2.[1,2,100-250,254]/24' => ['192.0.2.1/24', '192.0.2.2/24', '192.0.2.100/24' ... '192.0.2.250/24', '192.0.2.254/24']
'2001:db8:0:[0,fd-ff]::/64' => ['2001:db8:0:0::/64', '2001:db8:0:fd::/64', ... '2001:db8:0:ff::/64']
"""
if family not in [4, 6]:
raise Exception("Invalid IP address family: {}".format(family))
if family == 4:
regex = IP4_EXPANSION_PATTERN
base = 10
else:
regex = IP6_EXPANSION_PATTERN
base = 16
lead, pattern, remnant = re.split(regex, string, maxsplit=1)
parsed_range = parse_numeric_range(pattern, base)
for i in parsed_range:
if re.search(regex, remnant):
for string in expand_ipaddress_pattern(remnant, family):
yield ''.join([lead, format(i, 'x' if family == 6 else 'd'), string])
else:
yield ''.join([lead, format(i, 'x' if family == 6 else 'd'), remnant])
def add_blank_choice(choices):
"""
Add a blank choice to the beginning of a choices list.
"""
return ((None, '---------'),) + tuple(choices)
#
# Widgets
#
class SmallTextarea(forms.Textarea):
pass
class ColorSelect(forms.Select):
"""
Extends the built-in Select widget to colorize each <option>.
"""
option_template_name = 'colorselect_option.html'
def __init__(self, *args, **kwargs):
kwargs['choices'] = COLOR_CHOICES
super(ColorSelect, self).__init__(*args, **kwargs)
class BulkEditNullBooleanSelect(forms.NullBooleanSelect):
def __init__(self, *args, **kwargs):
super(BulkEditNullBooleanSelect, self).__init__(*args, **kwargs)
# Override the built-in choice labels
self.choices = (
('1', '---------'),
('2', 'Yes'),
('3', 'No'),
)
class SelectWithDisabled(forms.Select):
"""
Modified the stock Select widget to accept choices using a dict() for a label. The dict for each option must include
'label' (string) and 'disabled' (boolean).
"""
option_template_name = 'selectwithdisabled_option.html'
class ArrayFieldSelectMultiple(SelectWithDisabled, forms.SelectMultiple):
"""
MultiSelect widget for a SimpleArrayField. Choices must be populated on the widget.
"""
def __init__(self, *args, **kwargs):
self.delimiter = kwargs.pop('delimiter', ',')
super(ArrayFieldSelectMultiple, self).__init__(*args, **kwargs)
def optgroups(self, name, value, attrs=None):
# Split the delimited string of values into a list
value = value[0].split(self.delimiter)
return super(ArrayFieldSelectMultiple, self).optgroups(name, value, attrs)
def value_from_datadict(self, data, files, name):
# Condense the list of selected choices into a delimited string
data = super(ArrayFieldSelectMultiple, self).value_from_datadict(data, files, name)
return self.delimiter.join(data)
class APISelect(SelectWithDisabled):
"""
A select widget populated via an API call
:param api_url: API URL
:param display_field: (Optional) Field to display for child in selection list. Defaults to `name`.
:param disabled_indicator: (Optional) Mark option as disabled if this field equates true.
"""
def __init__(self, api_url, display_field=None, disabled_indicator=None, *args, **kwargs):
super(APISelect, self).__init__(*args, **kwargs)
self.attrs['class'] = 'api-select'
self.attrs['api-url'] = '/{}{}'.format(settings.BASE_PATH, api_url.lstrip('/')) # Inject BASE_PATH
if display_field:
self.attrs['display-field'] = display_field
if disabled_indicator:
self.attrs['disabled-indicator'] = disabled_indicator
class Livesearch(forms.TextInput):
"""
A text widget that carries a few extra bits of data for use in AJAX-powered autocomplete search
:param query_key: The name of the parameter to query against
:param query_url: The name of the API URL to query
:param field_to_update: The name of the "real" form field whose value is being set
:param obj_label: The field to use as the option label (optional)
"""
def __init__(self, query_key, query_url, field_to_update, obj_label=None, *args, **kwargs):
super(Livesearch, self).__init__(*args, **kwargs)
self.attrs = {
'data-key': query_key,
'data-source': reverse_lazy(query_url),
'data-field': field_to_update,
}
if obj_label:
self.attrs['data-label'] = obj_label
#
# Form fields
#
class CSVDataField(forms.CharField):
"""
A CharField (rendered as a Textarea) which accepts CSV-formatted data. It returns a list of dictionaries mapping
column headers to values. Each dictionary represents an individual record.
"""
widget = forms.Textarea
def __init__(self, fields, required_fields=[], *args, **kwargs):
self.fields = fields
self.required_fields = required_fields
super(CSVDataField, self).__init__(*args, **kwargs)
self.strip = False
if not self.label:
self.label = 'CSV Data'
if not self.initial:
self.initial = ','.join(required_fields) + '\n'
if not self.help_text:
self.help_text = 'Enter the list of column headers followed by one line per record to be imported, using ' \
'commas to separate values. Multi-line data and values containing commas may be wrapped ' \
'in double quotes.'
def to_python(self, value):
# Python 2's csv module has problems with Unicode
if not isinstance(value, str):
value = value.encode('utf-8')
records = []
reader = csv.reader(value.splitlines())
# Consume and valdiate the first line of CSV data as column headers
headers = next(reader)
for f in self.required_fields:
if f not in headers:
raise forms.ValidationError('Required column header "{}" not found.'.format(f))
for f in headers:
if f not in self.fields:
raise forms.ValidationError('Unexpected column header "{}" found.'.format(f))
# Parse CSV data
for i, row in enumerate(reader, start=1):
if row:
if len(row) != len(headers):
raise forms.ValidationError(
"Row {}: Expected {} columns but found {}".format(i, len(headers), len(row))
)
row = [col.strip() for col in row]
record = dict(zip(headers, row))
records.append(record)
return records
class CSVChoiceField(forms.ChoiceField):
"""
Invert the provided set of choices to take the human-friendly label as input, and return the database value.
"""
def __init__(self, choices, *args, **kwargs):
super(CSVChoiceField, self).__init__(choices, *args, **kwargs)
self.choices = [(label, label) for value, label in choices]
self.choice_values = {label: value for value, label in choices}
def clean(self, value):
value = super(CSVChoiceField, self).clean(value)
if not value:
return None
if value not in self.choice_values:
raise forms.ValidationError("Invalid choice: {}".format(value))
return self.choice_values[value]
class ExpandableNameField(forms.CharField):
"""
A field which allows for numeric range expansion
Example: 'Gi0/[1-3]' => ['Gi0/1', 'Gi0/2', 'Gi0/3']
"""
def __init__(self, *args, **kwargs):
super(ExpandableNameField, self).__init__(*args, **kwargs)
if not self.help_text:
self.help_text = 'Numeric ranges are supported for bulk creation.<br />'\
'Example: <code>ge-0/0/[0-23,25,30]</code>'
def to_python(self, value):
if re.search(NUMERIC_EXPANSION_PATTERN, value):
return list(expand_numeric_pattern(value))
return [value]
class ExpandableIPAddressField(forms.CharField):
"""
A field which allows for expansion of IP address ranges
Example: '192.0.2.[1-254]/24' => ['192.0.2.1/24', '192.0.2.2/24', '192.0.2.3/24' ... '192.0.2.254/24']
"""
def __init__(self, *args, **kwargs):
super(ExpandableIPAddressField, self).__init__(*args, **kwargs)
if not self.help_text:
self.help_text = 'Specify a numeric range to create multiple IPs.<br />'\
'Example: <code>192.0.2.[1,5,100-254]/24</code>'
def to_python(self, value):
# Hackish address family detection but it's all we have to work with
if '.' in value and re.search(IP4_EXPANSION_PATTERN, value):
return list(expand_ipaddress_pattern(value, 4))
elif ':' in value and re.search(IP6_EXPANSION_PATTERN, value):
return list(expand_ipaddress_pattern(value, 6))
return [value]
class CommentField(forms.CharField):
"""
A textarea with support for GitHub-Flavored Markdown. Exists mostly just to add a standard help_text.
"""
widget = forms.Textarea
default_label = 'Comments'
# TODO: Port GFM syntax cheat sheet to internal documentation
default_helptext = '<i class="fa fa-info-circle"></i> '\
'<a href="https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet" target="_blank">'\
'GitHub-Flavored Markdown</a> syntax is supported'
def __init__(self, *args, **kwargs):
required = kwargs.pop('required', False)
label = kwargs.pop('label', self.default_label)
help_text = kwargs.pop('help_text', self.default_helptext)
super(CommentField, self).__init__(required=required, label=label, help_text=help_text, *args, **kwargs)
class FlexibleModelChoiceField(forms.ModelChoiceField):
"""
Allow a model to be reference by either '{ID}' or the field specified by `to_field_name`.
"""
def to_python(self, value):
if value in self.empty_values:
return None
try:
if not self.to_field_name:
key = 'pk'
elif re.match('^\{\d+\}$', value):
key = 'pk'
value = value.strip('{}')
else:
key = self.to_field_name
value = self.queryset.get(**{key: value})
except (ValueError, TypeError, self.queryset.model.DoesNotExist):
raise forms.ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
class ChainedModelChoiceField(forms.ModelChoiceField):
"""
A ModelChoiceField which is initialized based on the values of other fields within a form. `chains` is a dictionary
mapping of model fields to peer fields within the form. For example:
country1 = forms.ModelChoiceField(queryset=Country.objects.all())
city1 = ChainedModelChoiceField(queryset=City.objects.all(), chains={'country': 'country1'}
The queryset of the `city1` field will be modified as
.filter(country=<value>)
where <value> is the value of the `country1` field. (Note: The form must inherit from ChainedFieldsMixin.)
"""
def __init__(self, chains=None, *args, **kwargs):
self.chains = chains
super(ChainedModelChoiceField, self).__init__(*args, **kwargs)
class SlugField(forms.SlugField):
def __init__(self, slug_source='name', *args, **kwargs):
label = kwargs.pop('label', "Slug")
help_text = kwargs.pop('help_text', "URL-friendly unique shorthand")
super(SlugField, self).__init__(label=label, help_text=help_text, *args, **kwargs)
self.widget.attrs['slug-source'] = slug_source
class FilterChoiceFieldMixin(object):
iterator = forms.models.ModelChoiceIterator
def __init__(self, null_option=None, *args, **kwargs):
self.null_option = null_option
if 'required' not in kwargs:
kwargs['required'] = False
if 'widget' not in kwargs:
kwargs['widget'] = forms.SelectMultiple(attrs={'size': 6})
super(FilterChoiceFieldMixin, self).__init__(*args, **kwargs)
def label_from_instance(self, obj):
label = super(FilterChoiceFieldMixin, self).label_from_instance(obj)
if hasattr(obj, 'filter_count'):
return '{} ({})'.format(label, obj.filter_count)
return label
def _get_choices(self):
if hasattr(self, '_choices'):
return self._choices
if self.null_option is not None:
return itertools.chain([self.null_option], self.iterator(self))
return self.iterator(self)
choices = property(_get_choices, forms.ChoiceField._set_choices)
class FilterChoiceField(FilterChoiceFieldMixin, forms.ModelMultipleChoiceField):
pass
class FilterTreeNodeMultipleChoiceField(FilterChoiceFieldMixin, TreeNodeMultipleChoiceField):
pass
class LaxURLField(forms.URLField):
"""
Custom URLField which allows any valid URL scheme
"""
class AnyURLScheme(object):
# A fake URL list which "contains" all scheme names abiding by the syntax defined in RFC 3986 section 3.1
def __contains__(self, item):
if not item or not re.match('^[a-z][0-9a-z+\-.]*$', item.lower()):
return False
return True
default_validators = [URLValidator(schemes=AnyURLScheme())]
#
# Forms
#
class BootstrapMixin(forms.BaseForm):
def __init__(self, *args, **kwargs):
super(BootstrapMixin, self).__init__(*args, **kwargs)
exempt_widgets = [forms.CheckboxInput, forms.ClearableFileInput, forms.FileInput, forms.RadioSelect]
for field_name, field in self.fields.items():
if field.widget.__class__ not in exempt_widgets:
css = field.widget.attrs.get('class', '')
field.widget.attrs['class'] = ' '.join([css, 'form-control']).strip()
if field.required:
field.widget.attrs['required'] = 'required'
if 'placeholder' not in field.widget.attrs:
field.widget.attrs['placeholder'] = field.label
class ChainedFieldsMixin(forms.BaseForm):
"""
Iterate through all ChainedModelChoiceFields in the form and modify their querysets based on chained fields.
"""
def __init__(self, *args, **kwargs):
super(ChainedFieldsMixin, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
if isinstance(field, ChainedModelChoiceField):
filters_dict = {}
for (db_field, parent_field) in field.chains:
if self.is_bound and parent_field in self.data:
filters_dict[db_field] = self.data[parent_field] or None
elif self.initial.get(parent_field):
filters_dict[db_field] = self.initial[parent_field]
elif self.fields[parent_field].widget.attrs.get('nullable'):
filters_dict[db_field] = None
else:
break
if filters_dict:
field.queryset = field.queryset.filter(**filters_dict)
elif not self.is_bound and getattr(self, 'instance', None) and hasattr(self.instance, field_name):
obj = getattr(self.instance, field_name)
if obj is not None:
field.queryset = field.queryset.filter(pk=obj.pk)
else:
field.queryset = field.queryset.none()
elif not self.is_bound:
field.queryset = field.queryset.none()
class ReturnURLForm(forms.Form):
"""
Provides a hidden return URL field to control where the user is directed after the form is submitted.
"""
return_url = forms.CharField(required=False, widget=forms.HiddenInput())
class ConfirmationForm(BootstrapMixin, ReturnURLForm):
"""
A generic confirmation form. The form is not valid unless the confirm field is checked.
"""
confirm = forms.BooleanField(required=True, widget=forms.HiddenInput(), initial=True)
class BulkEditForm(forms.Form):
def __init__(self, model, *args, **kwargs):
super(BulkEditForm, self).__init__(*args, **kwargs)
self.model = model
# Copy any nullable fields defined in Meta
if hasattr(self.Meta, 'nullable_fields'):
self.nullable_fields = [field for field in self.Meta.nullable_fields]
else:
self.nullable_fields = []
| {
"content_hash": "b52f8ced980eec546317fb8612f15098",
"timestamp": "",
"source": "github",
"line_count": 525,
"max_line_length": 129,
"avg_line_length": 36.68952380952381,
"alnum_prop": 0.602481569930433,
"repo_name": "snazy2000/netbox",
"id": "0fa402d52823f1d8537647ab3f696e3f4437107c",
"size": "19262",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/utilities/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "167999"
},
{
"name": "HTML",
"bytes": "370021"
},
{
"name": "JavaScript",
"bytes": "13570"
},
{
"name": "Python",
"bytes": "1018536"
},
{
"name": "Shell",
"bytes": "3389"
}
],
"symlink_target": ""
} |
"""
QC OTA API
Test Result dictionary
----
tests:
name
status
subject
suite
description
exec_date
exec_time
bug
steps:
name
status
description
expected
actual
"""
# pylint: disable=I0011, no-member
from datetime import datetime
import fnmatch
import logging
import os
import tempfile
import zipfile
import pywintypes
from win32com.client import Dispatch
LOG = logging.getLogger(__name__)
TDATT_FILE = 1 # data file attachment.
def connect(
url='',
domain='',
project='',
username='',
password=''
):
"""
Return a connection to Quality Center using the given credentials.
"""
LOG.info("Connecting to Quality Center...")
qcc = Dispatch("TDApiole80.TDConnection")
qcc.InitConnectionEx(url)
qcc.Login(username, password)
qcc.Connect(domain, project)
LOG.info('Connected to Quality Center')
return qcc
def disconnect(qcc):
"""
Make sure the quality center connection is closed
"""
if qcc is None:
return
if not qcc.Connected:
LOG.info('Already disconnected from Quality Center.')
return
qcc.Disconnect()
qcc.Logout()
qcc.ReleaseConnection()
LOG.info('Disconnected from Quality Center.')
def create_folder(parent, name):
"""
Create a Quality Center folder.
"""
try:
child = parent.AddNode(name)
child.Post()
return child
except pywintypes.com_error as ex:
LOG.error('error creating folder: %s', name)
LOG.exception(ex)
raise
def get_qc_folder(qcc, folder, create=True):
"""
Returns a QC folder. If create=True, create subdirectories if don't exist.
"""
# check if folder is in tests lab or tests plan by seeing if it starts with
# 'Root' (plan) or 'Subject' (lab).
if folder.startswith('Root'):
treemgr = qcc.TestSetTreeManager
elif folder.startswith('Subject'):
treemgr = qcc.TreeManager
else:
raise ValueError(folder)
# if folder is there return it, otherwise walk path from root creating
# folders if needed
child = None
try:
child = treemgr.NodeByPath(folder)
except pywintypes.com_error:
if not create:
return None
LOG.debug('folder not found, creating folder structure...')
folders = folder.split('\\')
for i in range(len(folders)-1):
try:
child = treemgr.NodeByPath('\\'.join(folders[:i+2]))
except pywintypes.com_error:
LOG.debug('folder not found. creating: %s', folders[i+1])
parent = treemgr.NodeByPath('\\'.join(folders[:i+1]))
child = create_folder(parent, folders[i+1])
return child
def get_subdirectories(qcnode):
"""
Return a list of refs to sub-directories of given qcnode.
"""
subdirectories = []
nodes = qcnode.SubNodes
for node in nodes:
subdirectories.append(node)
return subdirectories
def make_test_instance(
qcc,
qcdir,
testplan,
subject='',
suite='',
name=''
):
"""
Create a TsTestInstance in QC.
"""
if not suite:
LOG.error('suite cannot be empty')
return
fldr = _to_lab_dir(qcdir, subject)
folder = get_qc_folder(qcc, fldr)
test_set_factory = folder.TestSetFactory
test_set_filter = test_set_factory.Filter
test_set_filter.Clear()
test_set_filter["CY_CYCLE"] = '"{}"'.format(suite)
test_set_list = test_set_factory.NewList(test_set_filter.Text)
if len(test_set_list) > 0:
testset = test_set_list(1)
else:
testset = test_set_factory.AddItem(None)
testset.Name = suite
testset.Post()
testset.Refresh()
test_instance_factory = testset.TsTestFactory
test_instance_filter = test_instance_factory.Filter
test_instance_filter.Clear()
test_instance_filter["TSC_NAME"] = '"{}"'.format(name)
test_instance_list = test_instance_factory.NewList(
test_instance_filter.Text)
if len(test_instance_list) == 0:
test_instance_factory.AddItem(testplan)
test_instance_list = test_instance_factory.NewList(
test_instance_filter.Text)
return test_instance_list(1)
def make_test_plan(
qcc,
qcdir,
subject='',
suite='',
name='',
description=''
):
"""
Create a TestInstance in QC.
"""
fldr = _to_plan_dir(qcdir, subject, suite)
folder = get_qc_folder(qcc, fldr)
test_factory = folder.TestFactory
test_filter = test_factory.Filter
test_filter["TS_NAME"] = '"{}"'.format(name)
test_list = test_filter.NewList()
if len(test_list) > 0:
testplan = test_list(1)
else:
testplan = test_factory.AddItem(name)
testplan.SetField("TS_DESCRIPTION", description)
testplan.SetField("TS_STATUS", "Ready")
testplan.SetField("TS_TYPE", "QUICKTEST_TEST")
testplan.Post()
return testplan
def make_test_run(
testinstance,
exec_date='',
exec_time='',
duration='0',
status='Passed'
):
"""
Create a RunInstance in QC.
"""
run = testinstance.RunFactory.AddItem("Run {}".format(datetime.now()))
run.Status = status
run.SetField('RN_DURATION', duration)
run.SetField('RN_EXECUTION_DATE', exec_date)
run.SetField('RN_EXECUTION_TIME', exec_time)
run.Post()
run.Refresh()
# do again, otherwise not showing in QC
run.SetField('RN_EXECUTION_DATE', exec_date)
run.SetField('RN_EXECUTION_TIME', exec_time)
run.Post()
run.Refresh()
return run
def import_test_result(
qcc,
qcdir,
subject='',
suite='',
name='',
description='',
exec_date='',
exec_time='',
duration='0',
status='Passed',
steps=None,
bug='0'
):
"""
Import test results to Quality Center.
"""
testplan = make_test_plan(qcc, qcdir, subject, suite, name, description)
testinstance = make_test_instance(
qcc, qcdir, testplan, subject, suite, name)
if testinstance is None:
LOG.error('error creating test instance')
return False
testrun = make_test_run(
testinstance, exec_date, exec_time, duration, status)
if steps:
for step in steps:
runstep = testrun.StepFactory.AddItem(None)
runstep.SetField('ST_STEP_NAME', step['name'])
runstep.SetField('ST_STATUS', step['status'])
runstep.SetField('ST_DESCRIPTION', step.get('description', ''))
runstep.SetField('ST_EXPECTED', step.get('expected', ''))
runstep.SetField('ST_ACTUAL', step.get('actual', ''))
runstep.SetField('ST_EXECUTION_DATE', step.get('exec_date', ''))
runstep.SetField('ST_EXECUTION_TIME', step.get('exec_time', ''))
runstep.Post()
# not seeing the step without a Refresh and Post here
runstep.Refresh()
runstep.Post()
if int(bug):
LOG.info('linking bug: %s', bug)
link_bug(qcc, testinstance, bug)
return True
def attach_report(qcc, pardir, attachments, qcdir, attachname):
"""
Zip the folder at local_path and upload it to the attachments of qcdir.
"""
# qc
fldr = '/'.join(['Root', qcdir])
fldr = os.path.normpath(fldr)
fldr = fldr.replace('/', '\\')
fldr = get_qc_folder(qcc, fldr)
afactory = fldr.Attachments
attach = afactory.AddItem(None)
# local
zipfileloc = os.path.join(tempfile.gettempdir(), attachname)
zipf = zipfile.ZipFile(zipfileloc, 'w', zipfile.ZIP_DEFLATED)
for root, dirnames, filenames in os.walk(pardir):
for filepat in attachments:
print('looking @ filepat: {}'.format(filepat))
for matched in fnmatch.filter(filenames, filepat):
print('adding file: {}'.format(matched))
filepath = os.path.join(root, matched)
zipf.write(filepath, os.path.basename(filepath))
for matched in fnmatch.filter(dirnames, filepat):
print('adding folder: {}'.format(matched))
filepath = os.path.join(root, matched)
zipf.write(filepath, os.path.basename(filepath))
zipf.close()
attach.FileName = zipfileloc.replace('/', '\\')
attach.Type = TDATT_FILE
attach.Post()
os.remove(zipfileloc)
def get_bugs(qcc):
"""
Return a list of dicts containing bug info.
"""
bug_list = qcc.BugFactory.NewList('')
bugs = []
for bug in bug_list:
bugs.append({
'id': bug.Field('BG_BUG_ID'),
'summary': bug.Field('BG_SUMMARY'),
'status': bug.Field('BG_STATUS'),
'detection_date': bug.Field('BG_DETECTION_DATE')
})
return bugs
def link_bug(qcc, testinstance, bug):
"""
link a Bug to a TsTestInstance
returns True if successful, False if not
"""
bug_filter = qcc.BugFactory.Filter
bug_filter.Clear()
bug_filter['BG_BUG_ID'] = bug
bug_list = bug_filter.NewList()
if len(bug_list) == 0:
LOG.error('no bugs found')
return False
bug = bug_list(1)
link_factory = testinstance.BugLinkFactory
try:
link = link_factory.AddItem(bug)
link.LinkType = 'Related'
link.Post()
except pywintypes.com_error as ex:
LOG.exception(ex)
return True
def _zipfolder(fldr, filename):
zipfileloc = os.path.join(fldr, os.pardir, filename)
zipf = zipfile.ZipFile(zipfileloc, 'w', zipfile.ZIP_DEFLATED)
# add entire directory to zip
for root, _, files in os.walk(fldr):
for child in files:
filepath = os.path.join(root, child)
zipf.write(filepath, os.path.basename(filepath))
zipf.close()
return zipfileloc
def _to_lab_dir(qcdir, subject):
fldr = '/'.join(['Root', qcdir, subject])
fldr = os.path.normpath(fldr)
fldr = fldr.replace('/', '\\')
return fldr
def _to_plan_dir(qcdir, subject, suite):
fldr = '/'.join(['Subject', qcdir, subject, suite])
fldr = os.path.normpath(fldr)
fldr = fldr.replace('/', '\\')
return fldr
| {
"content_hash": "1d5a39afb0f48b02428f3f62198744e3",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 79,
"avg_line_length": 27.42970822281167,
"alnum_prop": 0.5973310124746156,
"repo_name": "douville/qcri",
"id": "9547c915eafc99e73ee7c8c0b3e6dd2e19ff8214",
"size": "10341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qcri/application/qualitycenter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "63207"
}
],
"symlink_target": ""
} |
"""
Usage: analyse.py <dir_name> [<sffx>]
Arguments
dir_name : name of the directory from which to read in parameters and write data files
suff : optional suffix to add to the data file name
Options
-h : displays this help file
"""
import os
import yaml
from sys import argv
from csv import reader
import docopt
class Data():
"""
A class to hold the data
"""
def __init__(self, directory_name, sffx=None):
"""Initialises the data
>>> d = Data('datafortesting/logs_test_for_analyse/')
>>> d.directory
'/Users/geraintianpalmer/Documents/SimulatingAQingNetwork/simulatingaqingnetwork/datafortesting/logs_test_for_analyse/'
>>> d.parameter_file
'/Users/geraintianpalmer/Documents/SimulatingAQingNetwork/simulatingaqingnetwork/datafortesting/logs_test_for_analyse/parameters.yml'
>>> d.data_file
'/Users/geraintianpalmer/Documents/SimulatingAQingNetwork/simulatingaqingnetwork/datafortesting/logs_test_for_analyse/data.csv'
>>> d.data[0]
['50132', '0', '3', '1999.8061066137873', '0.0', '1999.8061066137873', '0.1923943987059666', '1999.9985010124933']
>>> d.data_per_node[1][0]
['50102', '1', '1', '1999.0025591060407', '0.0', '1999.0025591060407', '0.6092886795003283', '1999.6118477855412']
>>> d.data_per_class[0][0]
['50132', '0', '3', '1999.8061066137873', '0.0', '1999.8061066137873', '0.1923943987059666', '1999.9985010124933']
>>> d.data_per_node_per_class[2][1][0]
['50102', '1', '2', '1998.6969428179862', '0.14433402242707416', '1998.8412768404132', '0.1612822656274953', '1999.0025591060407']
"""
self.root = os.getcwd()
self.directory = os.path.join(self.root, directory_name)
self.sffx = sffx
self.parameter_file = self.directory + 'parameters.yml'
self.data_file = self.find_data_file()
self.parameters = self.load_parameters()
self.data = self.load_data()
self.data_per_node = self.find_data_per_node()
self.data_per_class = self.find_data_per_class()
self.data_per_node_per_class = self.find_data_per_node_per_class()
self.summary_statistics = {}
def find_data_file(self):
"""
Finds the data file and directory based on the name given
"""
if self.sffx:
return self.directory + 'data_' + self.sffx + '.csv'
else:
return self.directory + 'data.csv'
def load_parameters(self):
"""
Loads parameter into a dictionary
>>> d = Data('datafortesting/logs_test_for_analyse/')
>>> d.parameters['Number_of_nodes']
3
>>> d.parameters['Number_of_classes']
2
"""
parameter_file = open(self.parameter_file, 'r')
parameters = yaml.load(parameter_file)
parameter_file.close()
return parameters
def load_data(self):
"""
Loads data into an array
>>> d = Data('datafortesting/logs_test_for_analyse/')
>>> d.data[0]
['50132', '0', '3', '1999.8061066137873', '0.0', '1999.8061066137873', '0.1923943987059666', '1999.9985010124933']
"""
data_array = []
data_file = open(self.data_file, 'r')
rdr = reader(data_file)
for row in rdr:
data_array.append(row)
data_file.close()
return data_array
def find_data_per_node(self):
"""
Finds the data based on node
"""
return {node:[datapoint for datapoint in self.data if int(datapoint[2]) == node] for node in range(1, self.parameters['Number_of_nodes']+1)}
def find_data_per_class(self):
"""
Finds the data based on node
"""
return {cls:[datapoint for datapoint in self.data if int(datapoint[1]) == cls] for cls in range(self.parameters['Number_of_classes'])}
def find_data_per_node_per_class(self):
"""
Finds the data based on node
"""
return {node:{cls:[datapoint for datapoint in self.data_per_node[node] if int(datapoint[1]) == cls] for cls in range(self.parameters['Number_of_classes'])} for node in range(1, self.parameters['Number_of_nodes'])}
def mean_waits(self, data):
"""
Finds the mean waits for a subset of this data
>>> d = Data('datafortesting/logs_test_for_analyse/')
>>> round(d.mean_waits(d.data), 5)
0.05566
>>> round(d.mean_waits(d.data_per_node[1]), 5)
0.07253
"""
return sum([float(data_point[4]) for data_point in data]) / len(data)
def mean_visits(self, data):
"""
Finds the mean number of visits to each node for a subset of the data
data here must be for 1 node only
>>> d = Data('datafortesting/logs_test_for_analyse/')
>>> round(d.mean_visits(d.data_per_node[3]), 5)
1.46328
>>> round(d.mean_visits(d.data_per_node_per_class[1][1]), 5)
2.69957
"""
visits_per_customer = {}
for data_point in data:
if data_point[0] in visits_per_customer:
visits_per_customer[data_point[0]] += 1.0
else:
visits_per_customer[data_point[0]] = 1.0
return sum(visits_per_customer.values()) / len(visits_per_customer)
def mean_customers(self, data):
"""
Finds the mean customers at a node for a subset of the data
data here must be for 1 node only
>>> d = Data('datafortesting/logs_test_for_analyse/')
>>> round(d.mean_customers(d.data_per_node[1]), 5)
9.78516
"""
arrivals_and_exits = [[float(datapoint[3]), 'a'] for datapoint in data] + [[float(datapoint[7]), 'd'] for datapoint in data]
sorted_arrivals_and_exits = sorted(arrivals_and_exits, key=lambda data_point: data_point[0])
current_number_of_customers = 0
current_mean_customers = 0
previous_time = 0
for data_point in sorted_arrivals_and_exits:
if data_point[1] == 'a':
current_number_of_customers += 1
elif data_point[1] == 'd':
current_number_of_customers -= 1
current_mean_customers += current_number_of_customers * (data_point[0] - previous_time)
previous_time = data_point[0]
return current_mean_customers / sorted_arrivals_and_exits[-1][0]
def find_summary_statistics(self):
"""
Finds summary statistics for this data
>>> d = Data('datafortesting/logs_test_for_analyse/')
>>> d.find_summary_statistics()
>>> d.summary_statistics
{'Mean_Visits_per_Node': {1: 2.3176760690443254, 2: 1.4333203581159983}, 'Mean_Customers_Overall': 19.62536579897305, 'Mean_Waiting_Times_Overall': 0.055657249979677685, 'Mean_Waiting_Times_per_Node': {1: 0.07252886678329462, 2: 0.06900929721381453}, 'Mean_Waiting_Times_per_Node_per_Class': {1: {0: 0.07296209330440638, 1: 0.07229676234027073}, 2: {0: 0.06776830971106831, 1: 0.07077829841382609}}, 'Mean_Customers_per_Node': {1: 9.785161997806162, 2: 6.673346315685424}, 'Mean_Waiting_Times_per_Class': {1: 0.06011290147533826}}
"""
self.summary_statistics['Mean_Waiting_Times_Overall'] = self.mean_waits(self.data)
self.summary_statistics['Mean_Waiting_Times_per_Node'] = {node:self.mean_waits(self.data_per_node[node]) for node in range(1,self.parameters['Number_of_nodes'])}
self.summary_statistics['Mean_Waiting_Times_per_Class'] = {cls:self.mean_waits(self.data_per_class[cls]) for cls in range(1,self.parameters['Number_of_classes'])}
self.summary_statistics['Mean_Waiting_Times_per_Node_per_Class'] = {node: {cls:self.mean_waits(self.data_per_node_per_class[node][cls]) for cls in range(self.parameters['Number_of_classes'])} for node in range(1,self.parameters['Number_of_nodes'])}
self.summary_statistics['Mean_Visits_per_Node'] = {node:self.mean_visits(self.data_per_node[node]) for node in range(1,self.parameters['Number_of_nodes'])}
self.summary_statistics['Mean_Customers_per_Node'] = {node:self.mean_customers(self.data_per_node[node]) for node in range(1,self.parameters['Number_of_nodes'])}
self.summary_statistics['Mean_Customers_Overall'] = self.mean_customers(self.data)
def write_results_to_file(self):
"""
Takes the summary statistics and writes them into a .yml file
"""
if sffx:
results_file = open('%sresults_' %self.directory + self.sffx + '.yml', 'w')
else:
results_file = open('%sresults.yml' % self.directory, 'w')
results_file.write(yaml.dump(self.summary_statistics, default_flow_style=False))
results_file.close()
if __name__ == '__main__':
arguments = docopt.docopt(__doc__)
dirname = arguments['<dir_name>']
sffx = arguments['<sffx>']
d = Data(dirname, sffx)
d.find_summary_statistics()
d.write_results_to_file()
| {
"content_hash": "60ef36254e438bc0765e78a63ece73a6",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 533,
"avg_line_length": 42.02512562814071,
"alnum_prop": 0.6643548965682171,
"repo_name": "geraintpalmer/SimulatingAQingNetwork",
"id": "6726dcd060ad4095c42fdf176b2c3a43b5742277",
"size": "8363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simulatingaqingnetwork/analyse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77647"
}
],
"symlink_target": ""
} |
import json
from wtforms import ValidationError
from userver.object.application import Application
from . import api, root
from flask import request, Response
from userver.object.group import Group
from binascii import hexlify
from utils.errors import KeyDuplicateError, PatchError
from .decorators import group_belong_to_user, group_filter_valid, require_basic_or_oauth
from .forms import get_formdata_from_json_or_form
from .forms.form_group import AddGroupForm, PatchGroup, device_operate
@api.route(root + 'groups', methods=['GET', 'DELETE', 'POST'])
@require_basic_or_oauth
@group_filter_valid
def group_list(user, app=None):
if request.method == 'GET':
if app is not None:
groups = Group.objects.all_dict(app_eui=app.app_eui)
else:
groups = []
apps = Application.query.filter_by(user_id=user.id)
for app in apps:
groups.append({'app': hexlify(app.app_eui).decode(), 'groups': Group.objects.all_dict(app.app_eui)})
groups_json = json.dumps(groups)
return Response(status=200, response=groups_json)
elif request.method == 'POST':
formdata = get_formdata_from_json_or_form(request)
add_group = AddGroupForm(formdata)
try:
if add_group.validate():
if len(add_group['appskey'].data) != 0:
group = Group(add_group['app_eui'].data, add_group['name'].data, add_group['addr'].data, add_group['nwkskey'].data, appskey=add_group['appskey'].data)
else:
group = Group(add_group['app_eui'].data, add_group['name'].data, add_group['addr'].data, add_group['nwkskey'].data)
group.save()
return Response(status=201, response=json.dumps(group.obj_to_dict()))
else:
return Response(status=406, response=json.dumps({'errors': add_group.errors,
'succeed': False}))
except KeyDuplicateError as error:
return Response(status=403, response=json.dumps({"error": str(error),
"succeed": False}))
@api.route(root + 'groups/<id>', methods=['GET', 'PATCH', 'DELETE', 'POST'])
@require_basic_or_oauth
@group_belong_to_user
def group_index(user, app, group):
if request.method == 'GET':
group_json = json.dumps(group.obj_to_dict())
return group_json, 200
elif request.method == 'PATCH':
try:
formdata = get_formdata_from_json_or_form(request)
PatchGroup.patch(group, formdata)
return Response(status=200, response=json.dumps(group.obj_to_dict()))
except (AssertionError, ValidationError, PatchError) as e:
return json.dumps({"error": str(e)}), 406
# elif request.method == 'POST':
# POST Down Msg
# pass
elif request.method == 'DELETE':
try:
group.delete()
return json.dumps({'errors': "Group: %s deleted." % hexlify(group.id).decode(),
'succeed': False}), 200
except Exception as e:
return json.dumps({'errors': "Fail to delete group: %s.\n%s" % (hexlify(group.id).decode(), str(e)),
'succeed': False}), 400
elif request.method == 'POST':
formdata = get_formdata_from_json_or_form(request)
error = device_operate(group, formdata)
if error is None or len(error) == 0:
return json.dumps({'success': True}), 200
else:
return json.dumps({'error': str(error)}), 406
| {
"content_hash": "80a7ed3e6f7a239bc80d9210c04ac3cb",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 170,
"avg_line_length": 45.5625,
"alnum_prop": 0.5901234567901235,
"repo_name": "soybean217/lora-python",
"id": "858d9c712b50be292785ee9d63260dc6687fbf7c",
"size": "3645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UServer/http_api_oauth/api/api_group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "721"
},
{
"name": "JavaScript",
"bytes": "27647"
},
{
"name": "Python",
"bytes": "808327"
}
],
"symlink_target": ""
} |
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from openstack_dashboard import api
class CreateAlarm(tables.LinkAction):
name = "create"
verbose_name = _("Create Alarm")
iconfont = "iconfont icon-alarm media-object"
card = "card card-red"
attrs={"data-toggle": "modal"}
url = "#modalConfirm"
classes = ("ajax-modal", "btn-create")
class FilterAlarm(tables.LinkAction):
name = "filter"
verbose_name = _("Filter Alarm")
iconfont = "iconfont icon-hourglass media-object"
card = "card card-blue"
attrs={"data-toggle": "modal"}
url = "#modalConfirm"
classes = ("ajax-modal", "btn-create")
class MonitorIndicatorsTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("Name"))
description = tables.Column("description", verbose_name=_("Description"))
def sanitize_id(self, obj_id):
return get_int_or_uuid(obj_id)
class Meta:
name = "monitorin_dicators"
verbose_name = _("Monitor Indicators")
table_actions = (FilterAlarm, CreateAlarm) | {
"content_hash": "2515eaaf920ee16550918b3c15a22097",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 77,
"avg_line_length": 31.44736842105263,
"alnum_prop": 0.6502092050209205,
"repo_name": "MKTCloud/MKTCloud",
"id": "5d65067a15e2d7c32578f6f83b3d729cadcfaa1f",
"size": "1195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/monitor/monitor/monitor_indicators/tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "47811"
},
{
"name": "HTML",
"bytes": "210637"
},
{
"name": "JavaScript",
"bytes": "379783"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "1855886"
},
{
"name": "Shell",
"bytes": "14710"
}
],
"symlink_target": ""
} |
"""Re-segment raw read and re-label the data"""
########################################################################
# File: event_detection.py
# executable: event_detection.py
#
# Author: Andrew Bailey
# History: 10/6/17 Created
########################################################################
import sys
import os
import collections
import re
import numpy as np
import traceback
from collections import defaultdict
from timeit import default_timer as timer
from PyPore.parsers import SpeedyStatSplit
from nanotensor.fast5 import Fast5
from nanonet.eventdetection.filters import minknow_event_detect
from nanonet.segment import segment
from nanonet.features import make_basecall_input_multi
from py3helpers.utils import check_numpy_table, list_dir, TimeStamp, change_np_field_type, merge_dicts
from py3helpers.seq_tools import create_fastq_line, check_fastq_line, ReverseComplement, pairwise_alignment_accuracy
def create_speedy_event_table(signal, sampling_freq, start_time, min_width=5, max_width=80, min_gain_per_sample=0.008,
window_width=800):
"""Create new event table using SpeedyStatSplit Event detection
:param signal: list or array of signal in pA for finding events
:param sampling_freq: sampling frequency of ADC in Hz
:param start_time: start time from fast5 file (time in seconds * sampling frequency
:param min_width: param for SpeedyStatSplit
:param max_width: param for SpeedyStatSplit
:param min_gain_per_sample: param for SpeedyStatSplit
:param window_width: param for SpeedyStatSplit
:return: Table of events without model state or move information
"""
assert np.sign(start_time) == 1, "Start time has to be positive: {}".format(start_time)
assert type(signal[0]) is np.float64, "Signal needs to be in pA. Not ADC counts"
# define speedy stat split
parser = SpeedyStatSplit(min_width=min_width, max_width=max_width,
min_gain_per_sample=min_gain_per_sample,
window_width=window_width, sampling_freq=sampling_freq)
# parse events
events = parser.parse(np.asarray(signal, dtype=np.float64))
num_events = len(events)
# create empty event table
event_table = np.empty(num_events, dtype=[('start', float), ('length', float),
('mean', float), ('stdv', float),
('model_state', 'S5'), ('move', '<i4'),
('raw_start', int), ('raw_length', int),
('p_model_state', float)])
# set events into event table
for i, event in enumerate(events):
event_table['start'][i] = event.start / sampling_freq + (start_time / sampling_freq)
event_table['raw_start'][i] = event.start
event_table['length'][i] = event.duration / sampling_freq
event_table['raw_length'][i] = event.duration
event_table['mean'][i] = event.mean
event_table['stdv'][i] = event.std
return event_table
def create_minknow_event_table(signal, sampling_freq, start_time,
window_lengths=(16, 40), thresholds=(8.0, 4.0), peak_height=1.0):
"""Create new event table using minknow_event_detect event detection
:param signal: list or array of signal in pA for finding events
:param sampling_freq: sampling frequency of ADC in Hz
:param start_time: start time from fast5 file (time in seconds * sampling frequency
:param window_lengths: t-test windows for minknow_event_detect
:param thresholds: t-test thresholds for minknow_event_detect
:param peak_height: peak height param for minknow_event_detect
:return: Table of events without model state or move information
"""
assert np.sign(start_time) == 1, "Start time has to be positive: {}".format(start_time)
assert type(signal[0]) is np.float64, "Signal needs to be in pA. Not ADC counts"
events = minknow_event_detect(np.asarray(signal, dtype=float), sample_rate=sampling_freq,
get_peaks=False, window_lengths=window_lengths,
thresholds=thresholds, peak_height=peak_height)
num_events = len(events)
event_table = np.empty(num_events, dtype=[('start', float), ('length', float),
('mean', float), ('stdv', float),
('model_state', 'S5'), ('move', '<i4'),
('raw_start', int), ('raw_length', int),
('p_model_state', float)])
for i, event in enumerate(events):
event_table['start'][i] = event["start"] + (start_time / sampling_freq)
event_table['length'][i] = event["length"]
event_table['mean'][i] = event["mean"]
event_table['stdv'][i] = event["stdv"]
event_table['raw_start'][i] = np.round(event["start"] * sampling_freq)
event_table['raw_length'][i] = np.round(event["length"] * sampling_freq)
return event_table
def create_anchor_kmers(new_events, old_events):
"""
Create anchor kmers for new event table.
Basically, grab kmer and move information from previous event table and
pull events covering the same time span into new event table.
:param new_events: new event table
:param old_events: event table from Fast5 file
:return New event table
"""
num_old_events = len(old_events)
check_numpy_table(new_events, req_fields=('start', 'length', 'mean', 'stdv', 'model_state', 'move', 'p_model_state'))
check_numpy_table(old_events, req_fields=('start', 'length', 'mean', 'stdv', 'model_state', 'move', 'p_model_state'))
# index of old events
old_indx = 0
# start index to trim new_events for those with data from old_events
start_index = 0
end_index = len(new_events)
# personal tracker for dealing with how the segmentation algorithm is working
most_moves = 0
# tracking overlaped events
selected_overlap = False
check_overlap = False
homopolymer = False
# keep track of events passed
last_left_over = 0
for i, event in enumerate(new_events):
# skip events that occur before labels from old events
if old_events[0]["start"] <= event["start"]:
# time of old event in new event for a given kmer
time = []
probs = []
moves = []
kmers = []
# new event's start and end
current_event_start = round(event["start"], 7)
current_event_end = round(current_event_start + event["length"], 7)
# if first event or event start is after current old_event start.
if old_indx != num_old_events:
prev_kmer = str()
num_loops = 0
# print(round(old_events[old_indx]["start"], 7), old_events[old_indx]["length"], current_event_end, round(old_events[old_indx]["start"], 7) < current_event_end)
while round(old_events[old_indx]["start"], 7) < current_event_end and old_indx != num_old_events:
# print("INSIDE LOOP", round(old_events[old_indx]["start"], 7), old_events[old_indx]["length"], current_event_end, round(old_events[old_indx]["start"], 7) < current_event_end)
# deal with bad event files and final event
if old_indx == num_old_events-1:
old_event_end = round(old_events[old_indx]["start"] + old_events[old_indx]["length"], 7)
else:
old_event_end = round(old_events[old_indx+1]["start"], 7)
old_event_start = round(old_events[old_indx]["start"], 7)
old_kmer = bytes.decode(old_events[old_indx]["model_state"])
# homopolymers or stays should be tracked together
if old_kmer == prev_kmer:
if len(set(old_kmer)) == 1:
if not homopolymer and selected_overlap and num_loops <= 1:
moves[index] = 0
homopolymer = True
else:
homopolymer = False
index = kmers.index(old_kmer)
probs[index] = max(probs[index], old_events[old_indx]["p_model_state"])
moves[index] += old_events[old_indx]["move"]
else:
# add new kmer
index = len(time)
kmers.append(old_kmer)
probs.append(old_events[old_indx]["p_model_state"])
moves.append(old_events[old_indx]["move"])
time.append(0)
homopolymer = False
prev_kmer = old_kmer
# if old event passes through current event calculate correct time in current event
# deal with old events ending after the new event end
if old_event_end > current_event_end:
time[index] += current_event_end - old_event_start
new_check_overlap = True
break
# check if entire old event is within the new event or not
else:
if old_event_start < current_event_start:
time[index] += old_event_end - current_event_start
else:
time[index] += old_event_end - old_event_start
# if old_event_end != current_event_end:
old_indx += 1
new_check_overlap = False
num_loops += 1
# break loop at end of old events
if old_indx == num_old_events:
break
else:
end_index = i
num_kmers = len(kmers)
# select index of best kmer to assign
if num_kmers == 1:
best_index = 0
left_over = 0
elif num_kmers > 1:
# select on time in new event only
best_index = time.index(max(time))
# if there are several old events in a new event, track how many
if new_check_overlap:
left_over = sum(moves[best_index+1:-1])
else:
left_over = sum(moves[best_index+1:])
else:
# end of possible alignments
end_index = i
break
# if previous old event overlapped into current new event
# check if old event is going to be assigned twice
if selected_overlap and best_index == 0 and check_overlap:
if homopolymer:
move = moves[best_index]
else:
move = 0
elif selected_overlap and best_index != 0 and check_overlap:
move = min(5, moves[best_index] + last_left_over)
else:
move = min(5, moves[best_index]+sum(moves[:best_index])+last_left_over)
if most_moves < moves[best_index]+sum(moves[:best_index])+last_left_over:
most_moves = moves[best_index]+sum(moves[:best_index])+last_left_over
# print(kmers, moves, left_over, moves[best_index], sum(moves[:best_index]), last_left_over, move)
# if new overlap
if new_check_overlap:
# new overlapped event will be tracked on next new_event so we drop a left_over count
left_over = max(0, left_over-1)
if most_moves < left_over-1:
most_moves = left_over-1
# check if we currently selected an overlapping old event
if best_index == num_kmers-1:
selected_overlap = True
else:
selected_overlap = False
else:
selected_overlap = False
kmer = kmers[best_index]
prob = probs[best_index]
# assign event probs, move and model state
event["p_model_state"] = prob
event["move"] = move
event["model_state"] = kmer
check_overlap = new_check_overlap
last_left_over = left_over
new_check_overlap = False
homopolymer = False
else:
# skip event since the
start_index = i + 1
# print(most_moves)
return new_events[start_index:end_index]
def check_event_table_time(event_table):
"""Check if event table has correct math for start and length timing for each event
:param event_table: event table with "start" and "length" columns
"""
check_numpy_table(event_table, req_fields=('start', 'length'))
prev_end = event_table[0]["start"] + event_table[0]["length"]
for event in event_table[1:]:
if prev_end != event["start"]:
return False
prev_end = event["start"]+event["length"]
return True
def resegment_reads(fast5_path, params, speedy=False, overwrite=False):
"""Re-segment and create anchor alignment from previously base-called fast5 file
:param fast5_path: path to fast5 file
:param params: event detection parameters
:param speedy: boolean option for speedyStatSplit or minknow
:param overwrite: overwrite a previous event re-segmented event table
:param name: name of key where events table will be placed (Analyses/'name'/Events)
:return True when completed
"""
assert os.path.isfile(fast5_path), "File does not exist: {}".format(fast5_path)
name = "ReSegmentBasecall_00{}"
# create Fast5 object
f5fh = Fast5(fast5_path, read='r+')
# gather previous event detection
old_event_table = f5fh.get_basecall_data()
# assert check_event_table_time(old_event_table), "Old event is not consistent"
read_id = bytes.decode(f5fh.raw_attributes['read_id'])
sampling_freq = f5fh.sample_rate
start_time = f5fh.raw_attributes['start_time']
# pick event detection algorithm
signal = f5fh.get_read(raw=True, scale=True)
if speedy:
event_table = create_speedy_event_table(signal, sampling_freq, start_time, **params)
params = merge_dicts([params, {"event_detection": "speedy_stat_split"}])
else:
event_table = create_minknow_event_table(signal, sampling_freq, start_time, **params)
params = merge_dicts([params, {"event_detection": "minknow_event_detect"}])
keys = ["nanotensor version", "time_stamp"]
values = ["0.2.0", TimeStamp().posix_date()]
attributes = merge_dicts([params, dict(zip(keys, values)), f5fh.raw_attributes])
if f5fh.is_read_rna():
old_event_table = index_to_time(old_event_table, sampling_freq=sampling_freq, start_time=start_time)
# set event table
new_event_table = create_anchor_kmers(new_events=event_table, old_events=old_event_table)
f5fh.set_new_event_table(name, new_event_table, attributes, overwrite=overwrite)
# gather new sequence
sequence = sequence_from_events(new_event_table)
if f5fh.is_read_rna():
sequence = ReverseComplement().reverse(sequence)
sequence = sequence.replace("T", "U")
quality_scores = '!'*len(sequence)
fastq = create_fastq_line(read_id+" :", sequence, quality_scores)
# set fastq
f5fh.set_fastq(name, fastq)
return f5fh
def index_to_time(basecall_events, sampling_freq=0, start_time=0):
"""Convert RNA basecall read start and length from indexes to time stamps
:param basecall_events: basecall events from albacore/metricore basecalled event table
:param sampling_freq: sampling frequency of experiment
:param start_time: start time of experiment via fasta5 file
"""
check_numpy_table(basecall_events, req_fields=('start', 'length'))
assert basecall_events["start"].dtype is np.dtype('uint64'), "Event start should be np.int32 type: {}"\
.format(basecall_events["start"].dtype)
assert sampling_freq != 0, "Must set sampling frequency"
assert start_time != 0, "Must set start time"
event_table = change_np_field_type(basecall_events, 'start', float)
event_table = change_np_field_type(event_table, 'length', float)
event_table["start"] = (event_table["start"] / sampling_freq) + (start_time / sampling_freq)
event_table["length"] = event_table["length"] / float(sampling_freq)
return event_table
def time_to_index(event_table, sampling_freq=0, start_time=0):
"""Convert start and lengths from time to raw signal indexes
:param event_table: basecall events from albacore/metricore basecalled event table
:param sampling_freq: sampling frequency of experiment
:param start_time: start time of experiment via fasta5 file
"""
check_numpy_table(event_table, req_fields=('start', 'length'))
assert event_table["start"].dtype is not np.dtype('uint64'), "Event start should not be np.int32 type: {}" \
.format(event_table["start"].dtype)
assert sampling_freq != 0, "Must set sampling frequency"
assert start_time != 0, "Must set start time"
event_table["start"] = np.round((event_table["start"] - (start_time / float(sampling_freq))) * sampling_freq)
event_table["length"] = np.round(event_table["length"] * sampling_freq)
event_table = change_np_field_type(event_table, 'start', int)
event_table = change_np_field_type(event_table, 'length', int)
return event_table
def sequence_from_events(events):
"""Get new read from event table with 'model_state' and 'move' fields
:param events: event table with 'model_state' and 'move' fields
"""
check_numpy_table(events, req_fields=("model_state", "move"))
bases = []
for i, event in enumerate(events):
if i == 0:
bases.extend([chr(x) for x in event['model_state']])
else:
if event['move'] > 0:
bases.append(bytes.decode
(event['model_state'][-event['move']:]))
sequence = ''.join(bases)
return sequence
def get_resegment_accuracy(fast5handle, section="template"):
"""Get accuracy comparison between original sequence and resegmented generated sequence
:param fast5handle: Fast5 object with re-segemented read
"""
assert isinstance(fast5handle, Fast5), "fast5handle needs to be a Fast5 instance"
# get fastqs
resegment_fastq = fast5handle.get_fastq(analysis="ReSegmentBasecall", section=section)
original_fastq = bytes.decode(fast5handle.get_fastq(analysis="Basecall_1D", section=section))[:-1]
# make sure the underlying assumption that we can split on newline is ok
check_fastq_line(resegment_fastq)
check_fastq_line(original_fastq)
# get sequence
resegment_seq = resegment_fastq.split('\n')[1]
original_seq = original_fastq.split('\n')[1]
return pairwise_alignment_accuracy(original_seq, resegment_seq, soft_clip=True)
def main():
"""Main docstring"""
start = timer()
dna_reads = "/Users/andrewbailey/CLionProjects/nanopore-RNN/test_files/minion-reads/canonical/"
rna_reads = "/Users/andrewbailey/CLionProjects/nanopore-RNN/test_files/minion-reads/rna_reads"
dna_minknow_params = dict(window_lengths=(5, 10), thresholds=(2.0, 1.1), peak_height=1.2)
dna_speedy_params = dict(min_width=5, max_width=80, min_gain_per_sample=0.008, window_width=800)
rna_minknow_params = dict(window_lengths=(5, 10), thresholds=(2.0, 1.1), peak_height=1.2)
rna_speedy_params = dict(min_width=5, max_width=40, min_gain_per_sample=0.008, window_width=800)
rna_minknow_params = dict(window_lengths=(5, 10), thresholds=(1.9, 1.0), peak_height=1.2)
rna_speedy_params = dict(min_width=5, max_width=40, min_gain_per_sample=0.008, window_width=800)
dna_minknow_params = dict(window_lengths=(5, 10), thresholds=(2.0, 1.1), peak_height=1.2)
dna_speedy_params = dict(min_width=5, max_width=80, min_gain_per_sample=0.008, window_width=800)
rna_files = list_dir(rna_reads, ext='fast5')
dna_files = list_dir(dna_reads, ext='fast5')
print("MAX RNA SKIPS: Speedy")
for fast5_path in rna_files:
print(fast5_path)
f5fh = resegment_reads(fast5_path, rna_speedy_params, speedy=True, overwrite=True)
print(get_resegment_accuracy(f5fh))
# f5fh = resegment_reads(fast5_path, rna_minknow_params, speedy=False, overwrite=True)
# print(test_resegment_accuracy(f5fh))
print("MAX RNA SKIPS: Minknow")
for fast5_path in rna_files:
f5fh = resegment_reads(fast5_path, rna_minknow_params, speedy=False, overwrite=True)
print(get_resegment_accuracy(f5fh))
print("MAX DNA SKIPS: speedy")
for fast5_path in dna_files:
print(fast5_path)
f5fh = resegment_reads(fast5_path, dna_speedy_params, speedy=True, overwrite=True)
print(get_resegment_accuracy(f5fh))
print("MAX DNA SKIPS:Minknow")
for fast5_path in dna_files:
f5fh = resegment_reads(fast5_path, dna_minknow_params, speedy=False, overwrite=True)
print(get_resegment_accuracy(f5fh))
# print(fast5_path)
stop = timer()
print("Running Time = {} seconds".format(stop - start), file=sys.stderr)
if __name__ == "__main__":
main()
raise SystemExit
| {
"content_hash": "7766eb61cbf08fcae479a63aed734068",
"timestamp": "",
"source": "github",
"line_count": 452,
"max_line_length": 195,
"avg_line_length": 47.743362831858406,
"alnum_prop": 0.6018535681186283,
"repo_name": "UCSC-nanopore-cgl/nanopore-RNN",
"id": "27e988fa056f0809d4866855ae52c3c5ddbfe104",
"size": "21602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nanotensor/event_detection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "765"
},
{
"name": "Python",
"bytes": "440680"
},
{
"name": "Shell",
"bytes": "4625"
}
],
"symlink_target": ""
} |
import logging
import os
import paramiko
import signal
import socket
import sys
import time
import urllib.parse
import urllib.request
PROBE_TIME_OUT_SECONDS = 300
SSH_USERNAME = "tds"
TCP_PORT = 15621
class CaptainException(Exception):
pass
def setup_logger():
logger = logging.getLogger("TDSLogger")
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
return logger
def start():
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(("", TCP_PORT))
server.listen(1)
logger.info("Listening for client...")
while True:
client, addr = server.accept()
logger.info("Connection from: %s", repr(addr))
request = client.recv(4096)
response = handle_request(request)
response_message = "HTTP/1.1 " + response['status'] + "\nContent-Type: application/json\n\n{\"status\": \"" + \
response['status'] + "\", \"message\": \"" + response['message'] + "\"}\n"
client.send(response_message.encode("UTF-8"))
client.close()
def signal_handler(sig, frame):
sys.exit(0)
def handle_request(request):
decoded_request = request.decode("UTF-8")
url_part = decoded_request.split(" ")[1]
url = urllib.parse.urlparse(url_part)
path = url.path
query = urllib.parse.parse_qs(url.query)
if (path == "/update/"):
response = handle_update(query)
else:
response = {
"message": "Don't know how to handle request: " + url_part,
"status": "500"
}
return response
def handle_update(options):
response = {
"message": "OK",
"status": "200"
}
try:
if (options_correct(options)):
app = options["app"][0]
docker_image_name = options["docker_image_name"][0]
docker_image_tag = options["docker_image_tag"][0]
gracefully_kill_workers = options.get("gracefully_kill_workers", [""])[0]
probe_path = options.get("probe_path", [""])[0]
app_environment = options["env"][0].upper()
app_servers_var = "APP_SERVERS_" + app_environment
servers = os.environ[app_servers_var].split(" ")
servers_to_update = []
logger.info("Asking to update:")
logger.info("- environment: %s", app_environment)
logger.info("- servers: %s", servers)
logger.info("- app: %s", app)
logger.info("- docker_image_name: %s", docker_image_name)
logger.info("- docker_image_tag: %s", docker_image_tag)
logger.info("- gracefully_kill_workers: %s", gracefully_kill_workers)
logger.info("- probe_path: %s", probe_path)
for server in servers:
logger.info("Connecting to %s...", server)
client = paramiko.client.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy)
client.connect(server, "22", SSH_USERNAME)
service_present = app_service_present(client, app)
if (service_present):
logger.info("App present on %s", server)
servers_to_update.append(server)
else:
logger.info("App not present on %s", server)
client.close()
for server in servers_to_update:
logger.info("Updating %s:%s on %s", docker_image_name, docker_image_tag, server)
client = paramiko.client.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy)
client.connect(server, "22", SSH_USERNAME)
logger.info("Start with removing old docker images")
remove_old_docker_images(client)
update_docker_image_response = update_docker_image(client, docker_image_name, docker_image_tag)
if (update_docker_image_response != 0):
logger.warn("Updating of the docker image on %s failed, the deploy process was stopped.", server)
client.close()
raise CaptainException(
"Updating of the docker image on " + server + " failed, the deploy process was stopped.")
else:
logger.info("Updating %s:%s on %s successful", docker_image_name, docker_image_tag, server)
client.close()
for server in servers_to_update:
client = paramiko.client.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy)
client.connect(server, "22", SSH_USERNAME)
if (gracefully_kill_workers != ""):
logger.info("Waiting for the workers for %s to stop working on %s...", app, server)
gracefully_quit_workers(client, app)
logger.info("Restarting %s on %s", app, server)
restart_service_response = restart_service(client, app)
if (restart_service_response != 0):
logger.warn("Restarting %s on %s failed, the deploy process was stopped.", app, server)
client.close()
raise CaptainException(
"Restarting " + app + " on " + server + " failed, the deploy process was stopped.")
else:
logger.info("Restarting %s on %s successful", app, server)
if (probe_path != ""):
logger.info("Probing %s", probe_path)
logger.info("Waiting at most %s seconds for %s to come back on %s...", PROBE_TIME_OUT_SECONDS, app, server)
probe_success = probe_service(client, app, probe_path)
if not (probe_success):
logger.warn("It seems that %s on %s did not come back up, the deploy process was stopped.", app,
server)
client.close()
raise CaptainException(
"It seems that " + app + " on " + server + " did not come back up, the deploy process was stopped.")
else:
logger.info("App %s on %s is accepting http requests again.", app, server)
client.close()
logger.info("%s was deployed on all servers", app)
else:
logger.warn("Options are not correct", options)
response = {
"message": "missing options",
"status": "400"
}
except Exception as e:
response = {
"message": format(e),
"status": "400"
}
finally:
return response
def options_correct(options):
try:
options["app"]
options["env"]
options["docker_image_name"]
options["docker_image_tag"]
return True
except KeyError:
return False
def app_service_present(client, app):
stdin, stdout, stderr = client.exec_command("systemctl list-units | grep -w " + app)
result = stdout.read().decode()
return result != ""
def remove_old_docker_images(client):
stdin, stdout, stderr = client.exec_command("docker rmi `docker images -q -f 'dangling=true'`")
result = stdout.read().decode()
return result != ""
def update_docker_image(client, docker_image_name, docker_image_tag):
docker_image = os.environ["DOCKER_REGISTRY_HOST"] + ":" + os.environ[
"DOCKER_REGISTRY_PORT"] + "/thedutchselection/" + docker_image_name + ":" + docker_image_tag
stdin, stdout, stderr = client.exec_command("docker pull " + docker_image)
return stdout.channel.recv_exit_status()
def restart_service(client, app):
stdin, stdout, stderr = client.exec_command("sudo systemctl restart " + app)
return stdout.channel.recv_exit_status()
def gracefully_quit_workers(client, app):
stdin, stdout, stderr = client.exec_command("docker exec " + app + " /usr/local/bin/gracefully_quit_workers.sh")
return stdout.channel.recv_exit_status()
def probe_service(client, app, probe_path):
timeout = time.time() + PROBE_TIME_OUT_SECONDS
probe_result = False
while True:
time.sleep(1)
if (time.time() > timeout):
probe_result = False
break
stdin, stdout, stderr = client.exec_command("docker inspect --format '{{ .NetworkSettings.Gateway }}' " + app)
docker_gateway = stdout.read().decode().rstrip()
stdin, stdout, stderr = client.exec_command("docker port " + app)
docker_port_result = stdout.read().decode().rstrip()
docker_port = docker_port_result[docker_port_result.index(':') + 1:docker_port_result.index(':') + 5]
url = "http://" + docker_gateway + ":" + docker_port + probe_path
stdin, stdout, stderr = client.exec_command("curl -s -o /dev/null -w \"%{http_code}\" " + url)
status = stdout.read().decode()
if (status == "200"):
probe_result = True
break
return probe_result
logger = setup_logger()
start()
| {
"content_hash": "83e996a27df58367b6ee114b0146ee25",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 128,
"avg_line_length": 35.825278810408925,
"alnum_prop": 0.5716509287122549,
"repo_name": "TheDutchSelection/captain",
"id": "7af03c0cc73eff84f84aaee056a75ade94bf6088",
"size": "9637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "captain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1539"
},
{
"name": "Python",
"bytes": "8753"
}
],
"symlink_target": ""
} |
"""
Django settings for mechanical_checklist project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-cs+182p_@2veke^$z9m9@vnbd@$nat&^chw5td%mklzsp@$8m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mechanical_checklist.events',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mechanical_checklist.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, "templates"),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mechanical_checklist.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
PASSWORD_HASHERS = [
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
| {
"content_hash": "3633fe64dc049db2d6c0f5e683cf29df",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 71,
"avg_line_length": 26.229357798165136,
"alnum_prop": 0.6918502973067506,
"repo_name": "matthewjweaver/mechanical-checklist",
"id": "76ff93da000e87721416ceac83388ec92a25bf68",
"size": "2859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mechanical_checklist/settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "422"
},
{
"name": "Python",
"bytes": "9676"
}
],
"symlink_target": ""
} |
import os
from lxml import etree
from django.test import TestCase
from corehq.apps.app_manager.models import Application
from corehq.apps.app_manager.tests.util import SuiteMixin
from corehq.apps.app_manager.translations import escape_output_value
import commcare_translations
class AppManagerTranslationsTest(TestCase, SuiteMixin):
root = os.path.dirname(__file__)
file_path = ('data', 'suite')
def test_escape_output_value(self):
test_cases = [
('hello', '<value>hello</value>'),
('abc < def > abc', '<value>abc < def > abc</value>'),
("bee's knees", "<value>bee's knees</value>"),
('unfortunate <xml expression', '<value>unfortunate <xml expression</value>'),
(u'क्लिक', '<value>क्लिक</value>'),
(''', '<value>&#39</value>'),
('question1 is <output value="/data/question1" vellum:value="#form/question1"/> !',
'<value>question1 is <output value="/data/question1" vellum:value="#form/question1"/> !</value>'),
('Here is a ref <output value="/data/no_media"/> with some "trailing" text & that\'s some bad < xml.',
'<value>Here is a ref <output value="/data/no_media"/> with some "trailing" text & that\'s some bad < xml.</value>')
]
for input, expected_output in test_cases:
self.assertEqual(expected_output, etree.tostring(escape_output_value(input)))
def test_language_names(self):
app_json = self.get_json('app')
app_json['langs'] = ['en', 'fra', 'hin', 'pol']
app = Application.wrap(app_json)
app.create_suite()
app_strings = app.create_app_strings('default')
app_strings_dict = commcare_translations.loads(app_strings)
self.assertEqual(app_strings_dict['en'], 'English')
self.assertEqual(app_strings_dict['fra'], u'Français')
self.assertEqual(app_strings_dict['hin'], u'हिंदी')
self.assertEqual(app_strings_dict['pol'], 'polski')
| {
"content_hash": "20bd6e932d872b6c6ec62734b21776a4",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 142,
"avg_line_length": 46.88636363636363,
"alnum_prop": 0.619001454192923,
"repo_name": "qedsoftware/commcare-hq",
"id": "10301a0ffbb6e75b04bf601dfe4d82c7afad87b3",
"size": "2099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/app_manager/tests/test_translations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
from datetime import datetime
# Third-party libraries
from flask.ext.login import UserMixin, AnonymousUserMixin
# Other modules
from lemur import db, login_manager
# Association tables for Many-To-Many relationships between various tables
association_table_class_user = db.Table('association_class_user',
db.Column('class_id', db.String(128),
db.ForeignKey('Class.id')),
db.Column('user_id', db.String(64),
db.ForeignKey('User.id')))
association_table_user_lab = db.Table('association_user_lab',
db.Column('user_id', db.String(64),
db.ForeignKey('User.id')),
db.Column('lab_id', db.String(128),
db.ForeignKey(
'Lab.id')))
association_table_role_power = db.Table('association_role_power',
db.Column('role_name', db.String(64),
db.ForeignKey('Role.name')),
db.Column('power_id', db.String(64),
db.ForeignKey(
'Power.id')))
# set up class to track the date and time information for an object
class DateTimeInfo(object):
created_on = db.Column(db.DateTime, default=datetime.now)
updated_on = db.Column(db.DateTime, default=datetime.now,
onupdate=datetime.now)
# A Lab is in reality a form which consists a list of questions(Experiment)
class Lab(db.Model):
__tablename__ = 'Lab'
id = db.Column(db.String(128), nullable=False, unique=True,
primary_key=True)
name = db.Column(db.String(64), nullable=False)
# lab description contained in description variable
description = db.Column(db.String(4096))
status = db.Column(db.Enum('Activated', 'Downloadable', 'Unactivated',
name='status'))
# one-to-many: a lab can have multiple experiments
experiments = db.relationship('Experiment', back_populates='lab',
cascade="all, delete, delete-orphan")
# Many-to-One: a class can have multiple labs
class_id = db.Column(db.String(128), db.ForeignKey('Class.id'))
the_class = db.relationship("Class", back_populates="labs")
# Many-to-Many: a user can have multiple labs and a lab can have multiple
# users
users = db.relationship("User", secondary=association_table_user_lab,
back_populates="labs")
def __repr__(self):
# Representation of class object in string format
tpl = ('Lab<id: {id}, name: {name}'
', class_id: {class_id},'
', description: {description}, status: {status}'
', experiments: {experiments}, the_class: {the_class}'
', users: {users}>')
formatted = tpl.format(id=self.id, name=self.name,
class_id=self.class_id,
description=self.description,
status=self.status,
experiments=[e.name for e in self.experiments],
the_class=self.the_class.id,
users=[u.id for u in self.users])
return formatted
# An experiment represents a question in a lab form
class Experiment(db.Model):
__tablename__ = 'Experiment'
id = db.Column(db.String(192), nullable=False, unique=True,
primary_key=True)
name = db.Column(db.String(64), nullable=False)
description = db.Column(db.String(1024))
order = db.Column(db.Integer, nullable=False)
# Type of value expected for this experiment
value_type = db.Column(db.Enum('Number', 'Text', name='value_type'))
# Value range only applies when the type is a number
value_range = db.Column(db.String(64))
# Value candidates only applies when the type is text
value_candidates = db.Column(db.String(512))
# Many-to-One: a lab can have multiple experiments
lab_id = db.Column(db.String(128), db.ForeignKey('Lab.id'))
lab = db.relationship("Lab", back_populates="experiments")
# One-to-Many: a experiment can have multiple data
observations = db.relationship('Observation', back_populates='experiment',
cascade="all, delete, delete-orphan")
def __repr__(self):
tpl = ('Lab<id: {id}, name: {name}, description: {description}'
', order: {order}, value_type: {value_type}'
', value_range: {value_range}'
', value_candidates: {value_candidates}'
', lab_id: {lab_id}, lab:{lab}'
', observations: {observations}>')
formatted = tpl.format(id=self.id, name=self.name,
description=self.description,
order=self.order,
value_type=self.value_type,
value_range=self.value_range,
value_candidates=self.value_candidates,
lab_id=self.lab_id,
lab=self.lab.name,
observations=[ob.id for ob in self.observations])
return formatted
# An Observation a group of students' response towards a question(Experiment)
# in a lab
class Observation(db.Model, DateTimeInfo):
__tablename__ = 'Observation'
id = db.Column(db.String(320), nullable=False, unique=True,
primary_key=True)
student_name = db.Column(db.String(128), nullable=False)
datum = db.Column(db.String(512), nullable=False)
# Many-to-One: an experiment can have mulitple datasets inputted by
# different students
experiment_id = db.Column(db.String(192), db.ForeignKey('Experiment.id'))
experiment = db.relationship("Experiment", back_populates="observations")
def __repr__(self):
tpl = ('Observation<experiment_id: {experiment_id}, id: {id},'
'datum: {datum}>')
formatted = tpl.format(experiment_id=self.experiment_id, id=self.id,
datum=self.datum)
return formatted
class Class(db.Model):
__tablename__ = 'Class'
id = db.Column(db.String(128), nullable=False, unique=True,
primary_key=True)
name = db.Column(db.String(128), nullable=False)
time = db.Column(db.String(128), nullable=False)
# Many-to-Many: A Class can have multiple users(both professors and
# students)
users = db.relationship("User", secondary=association_table_class_user,
back_populates="classes")
# One-to-Many: A Class can have multiple labs
labs = db.relationship("Lab", back_populates='the_class')
def __repr__(self):
tpl = ('Class<id: {id}, time: {time}, name: {name}, users: {users},'
'labs: {labs}>')
formatted = tpl.format(id=self.id, time=self.time,
name=self.name,
users=[u.name for u in self.users],
labs=[lab.name for lab in self.labs])
return formatted
class User(UserMixin, db.Model):
__tablename__ = 'User'
id = db.Column(db.String(64), nullable=False, unique=True, primary_key=True)
name = db.Column(db.String(128))
# Many-to-One: A role can have multiple users
role_name = db.Column(db.String(64), db.ForeignKey('Role.name'))
role = db.relationship("Role", back_populates="users")
# Many-to-Many: A User can have multiple classes
classes = db.relationship("Class", secondary=association_table_class_user,
back_populates="users")
# Many-to-Many: A User can have multiple labs
labs = db.relationship("Lab", secondary=association_table_user_lab,
back_populates="users")
# Return the ids of all the powers a user has
def get_power(self):
return [p.id for p in self.role.powers]
# can function checks if user is allowed to peform an operation
def can(self, power):
return self.role is not None and power in self.get_power()
def __repr__(self):
tpl = ('User<id: {id},'
' role_name: {role_name}, classes: {classes}, labs: {labs}>')
formatted = tpl.format(id=self.id,
role_name=self.role_name,
classes=[c.id for c in self.classes],
labs=[l.id for l in self.labs])
return formatted
# Function for use when user is not logged in
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
# Associate the LoginManager with the anonymous user class
login_manager.anonymous_user = AnonymousUser
# Function accepts a user id and returns the user object of that id
@login_manager.user_loader
def load_user(id):
return User.query.get(id)
class Role(db.Model):
__tablename__ = 'Role'
name = db.Column(db.String(64), nullable=False, unique=True,
primary_key=True)
# Many-to-Many: A Role can have multiple power; a power can belong to
# roles
powers = db.relationship("Power", secondary=association_table_role_power,
back_populates="roles")
# One-to-Many: A Role can have multiple users
users = db.relationship('User', back_populates='role', lazy='dynamic')
# For database initialization, no object needed to use this method
# Load all the Powers and Roles into the database
@staticmethod
def insert_roles():
for p in Permission.all_permissions():
if db.session.query(Power).filter(Power.id == p).count() == 0:
db.session.add(Power(id=p))
roles = {
'Student': [Permission.DATA_ENTRY],
'Admin': [Permission.DATA_ENTRY,
Permission.DATA_EDIT,
Permission.LAB_SETUP,
Permission.ADMIN],
'SuperAdmin': [Permission.DATA_ENTRY,
Permission.DATA_EDIT,
Permission.LAB_SETUP,
Permission.ADMIN,
Permission.LAB_MANAGE,
Permission.USER_MANAGE,
Permission.SUPERADMIN]
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
for p in roles[r]:
role.powers.append(db.session.query(Power).filter(
Power.id == p).one())
db.session.add(role)
db.session.commit()
def __repr__(self):
tpl = ('Role<name: {name},'
' powers: {powers}>')
formatted = tpl.format(name=self.name,
powers=self.powers)
return formatted
class Power(db.Model):
__tablename__ = 'Power'
id = db.Column(db.String(64), nullable=False, unique=True,
primary_key=True)
# Many-to-Many: A Role can have multiple power; a power can belong to
# roles
roles = db.relationship("Role", secondary=association_table_role_power,
back_populates="powers")
def __repr__(self):
tpl = ('Power<id: {id}')
formatted = tpl.format(id=self.id)
return formatted
# A class which consists of all the ids of Power
class Permission:
DATA_ENTRY = 'DATA_ENTRY'
DATA_EDIT = 'DATA_EDIT'
LAB_SETUP = 'LAB_SETUP'
ADMIN = 'ADMIN'
LAB_ACCESS = 'LAB_ACCESS'
LAB_MANAGE = 'LAB_MANAGE'
USER_MANAGE = 'USER_MANAGE'
SUPERADMIN = 'SUPERADMIN'
# Return all the permissions as a list
@staticmethod
def all_permissions():
permissions_list = [Permission.DATA_ENTRY,
Permission.DATA_EDIT,
Permission.LAB_SETUP,
Permission.ADMIN,
Permission.LAB_ACCESS,
Permission.LAB_MANAGE,
Permission.USER_MANAGE,
Permission.SUPERADMIN]
return permissions_list
| {
"content_hash": "952a3a6bbc38644a21b0ae8c41eb83cc",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 80,
"avg_line_length": 42.813559322033896,
"alnum_prop": 0.5519398258115598,
"repo_name": "reed-college/lemur",
"id": "f4fb0a67b6c71575593d525091730c14e39c0392",
"size": "12749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lemur/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16478"
},
{
"name": "HTML",
"bytes": "90096"
},
{
"name": "JavaScript",
"bytes": "198165"
},
{
"name": "Python",
"bytes": "208840"
},
{
"name": "Shell",
"bytes": "3200"
}
],
"symlink_target": ""
} |
import socket
import sys
from colors import bcolors
ADDR = ''
PORT = 10000
BUFF_SIZE = 4096
device_id = None
server_address = (ADDR, PORT)
# Create a UDP socket
client_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def send_command(sock, message):
sock.sendto(message.encode(), server_address)
# Receive response
print('Waiting for response.....')
response = sock.recv(BUFF_SIZE)
return response
def make_message(device_id, action, data=''):
if data:
return '{{ "device" : "{}", "action":"{}", "data" : "{}" }}'.format(
device_id, action, data)
else:
return '{{ "device" : "{}", "action":"{}" }}'.format(device_id, action)
def run_action(device_id, action, data=''):
message = make_message(device_id, action, data)
if not message:
return
print('Send message: {}'.format(message))
event_response = send_command(client_sock, message).decode('utf-8')
print('Received response: {}'.format(event_response))
def main():
device_id = sys.argv[1]
if not device_id:
sys.exit('The device id must be specified.')
print('Bringing up device {}'.format(device_id))
try:
run_action(device_id, 'detach')
run_action(device_id, 'attach')
run_action(device_id, 'event', 'LED is online')
run_action(device_id, 'subscribe')
while True:
response = client_sock.recv(BUFF_SIZE)
message = response.decode('utf-8')
if message.find("ON") != -1:
sys.stdout.write(
'\r>> ' + bcolors.CGREEN + bcolors.CBLINK +
" LED is ON " + bcolors.ENDC + ' <<')
sys.stdout.flush()
elif message.find("OFF") != -1:
sys.stdout.write(
'\r >>' + bcolors.CRED + bcolors.BOLD +
" LED is OFF " + bcolors.ENDC + ' <<')
sys.stdout.flush()
finally:
print('closing socket')
client_sock.close()
if __name__ == '__main__':
main()
| {
"content_hash": "2901edd28c92559468476ff55d66fbcd",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 79,
"avg_line_length": 27.493333333333332,
"alnum_prop": 0.553831231813773,
"repo_name": "GoogleCloudPlatform/python-docs-samples",
"id": "ecbf765278e8503989db01fad5b10c6bc22f5158",
"size": "2642",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "iot/api-client/codelabs/ledlight.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8008"
},
{
"name": "Dockerfile",
"bytes": "62031"
},
{
"name": "HTML",
"bytes": "69878"
},
{
"name": "JavaScript",
"bytes": "26494"
},
{
"name": "Jinja",
"bytes": "1892"
},
{
"name": "Jupyter Notebook",
"bytes": "47951698"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "Procfile",
"bytes": "138"
},
{
"name": "PureBasic",
"bytes": "11115"
},
{
"name": "Python",
"bytes": "5323502"
},
{
"name": "Shell",
"bytes": "78261"
}
],
"symlink_target": ""
} |
from collections import defaultdict
import itertools
import time
class FailuresHistory(object):
""" Stores the history of recent build failures.
The failures are identified by their unique ID
(e.g. failed test, memory suppression hash, etc)
This class is similar to LRU but it also stores counts.
We don't need very precise data for "old" builds.
"""
def __init__(self, expiration_time, size_limit):
""" expiration_time: don't count failures older than that (in seconds)
size_limit: drop some old builds when we reach this size.
It's not a hard limit but rather a recommendation.
"""
self.expiration_time = expiration_time
assert size_limit > 1
self.size_limit = size_limit
self.full_cleanup_delay = 0
self.failures = {} # Key: failure_id, Value: list of failure times.
self.failures_count = 0
def Put(self, failure_id):
self.failures.setdefault(failure_id, []).append(time.time())
self.failures_count += 1
self._Cleanup(failure_id)
self._MaybeCleanupFull()
def GetCount(self, failure_id):
if failure_id in self.failures:
self._Cleanup(failure_id)
return len(self.failures.get(failure_id, []))
def _MaybeCleanupFull(self):
""" Checks the size vs size_limit and maybe aggressively
cleanup all the queues. The slow path is executed at most once of
self.size_limit invocations to avoid O(N^2) perf problems.
"""
if self.failures_count <= self.size_limit:
return # no cleanup needed yet.
# We delay full cleanups to avoid doing them on each Put() when we have many
# singular failures. Otherwise, we'd end up with a O(N^2) algorithm.
if self.full_cleanup_delay > 0:
self.full_cleanup_delay -= 1
return
self.full_cleanup_delay = self.size_limit
# If we're lucky, dropping the expired failures is enough.
for f_id in self.failures.keys():
self._Cleanup(f_id)
if self.failures_count <= self.size_limit:
return
# Slow path - flatten the dictionary of failures, sort by timestamp,
# trim the oldest ones. The complexity is O(N*log N) where N is the number
# of failures recorded.
all_items = itertools.chain.from_iterable(
((f_id, t) for t in timestamps)
for f_id, timestamps in self.failures.iteritems())
all_items = sorted(all_items, key=lambda x: x[1])
drop_items_counts = defaultdict(int)
for f_id, _ in all_items[:-self.size_limit]:
# There's a tiny chance we'll count the 'recent' failure to remove
# but we don't bother.
drop_items_counts[f_id] += 1
for f_id, drop_count in drop_items_counts.iteritems():
self.failures[f_id] = self.failures[f_id][drop_count:]
self.failures_count -= drop_count
if not self.failures[f_id]:
del self.failures[f_id]
assert self.failures_count <= self.size_limit
def _Cleanup(self, failure_id):
""" Drops old builds for a given failure ID. """
drop_older_than = time.time() - self.expiration_time
assert failure_id in self.failures
if self.failures[failure_id][0] >= drop_older_than:
return
old = self.failures[failure_id]
# Make sure the list of failure times is sorted.
assert all(old[i] <= old[i+1] for i in xrange(len(old) - 1))
self.failures[failure_id] = [x for x in old if x > drop_older_than]
self.failures_count += len(self.failures[failure_id]) - len(old)
if not self.failures[failure_id]:
del self.failures[failure_id]
| {
"content_hash": "372350bda34d9228e2e80e21e814f995",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 80,
"avg_line_length": 37.659574468085104,
"alnum_prop": 0.6621468926553672,
"repo_name": "eunchong/build",
"id": "60fea569e84ce07150f63d938cd164243330a798",
"size": "3707",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/master/failures_history.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3128"
},
{
"name": "CSS",
"bytes": "211818"
},
{
"name": "HTML",
"bytes": "429981"
},
{
"name": "JavaScript",
"bytes": "75624"
},
{
"name": "Makefile",
"bytes": "21204"
},
{
"name": "Python",
"bytes": "6143109"
},
{
"name": "Shell",
"bytes": "23512"
}
],
"symlink_target": ""
} |
from app.model.post.service import PostService
from app.util.handler_util import BasicHandler
class IframeHandler(BasicHandler):
def get(self, url):
page_params = {"url": url}
return self.page("iframe.html", **page_params)
class IndexHandler(BasicHandler):
def get(self):
total_page = PostService.get_total_page()
page_params = {"total_page": total_page}
return self.page("index.html", **page_params)
class AchiveHandler(BasicHandler):
def get(self):
posts = PostService.get_posts()
page_params = {"posts": posts}
self.page("achive.html", **page_params)
class ProjectHandler(BasicHandler):
def get(self):
self.page("project.html")
class MovieHandler(BasicHandler):
def get(self):
self.page("movie.html")
class AboutHandler(BasicHandler):
def get(self):
self.page("about.html")
| {
"content_hash": "908016d55a3488af149b10d446c129ec",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 54,
"avg_line_length": 23.736842105263158,
"alnum_prop": 0.6529933481152993,
"repo_name": "Jackeriss/Typora-Blog",
"id": "ffc947d4a35a0f7312429f129b49dd3cc38d12d6",
"size": "902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/model/static_page/handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "38138"
},
{
"name": "JavaScript",
"bytes": "6300"
},
{
"name": "Python",
"bytes": "38750"
},
{
"name": "Shell",
"bytes": "709"
}
],
"symlink_target": ""
} |
import unittest
import os
import coverage
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from project import app, db, models
app.config.from_object(os.environ['APP_SETTINGS'])
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Runs the unit tests without coverage."""
tests = unittest.TestLoader().discover('.')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def cov():
"""Runs the unit tests with coverage."""
cov = coverage.coverage(branch=True, include='project/*')
cov.start()
tests = unittest.TestLoader().discover('.')
unittest.TextTestRunner(verbosity=2).run(tests)
cov.stop()
cov.save()
print 'Coverage Summary:'
cov.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'coverage')
cov.html_report(directory=covdir)
cov.erase()
if __name__ == '__main__':
manager.run()
| {
"content_hash": "348ebf0bc1eec4a01de590b1fb8094a3",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 61,
"avg_line_length": 23.454545454545453,
"alnum_prop": 0.687015503875969,
"repo_name": "Kirza/sv-blog",
"id": "4b02aee4bb7a981063e97edbef66eec83061bdba",
"size": "1032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15035"
},
{
"name": "HTML",
"bytes": "31947"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "23135"
}
],
"symlink_target": ""
} |
"""Test that all GOATOOLS package dirs are in the setup.py file"""
# pylint: disable=wrong-import-position
from os import walk
from os.path import join
from os.path import abspath
import sys
sys.argv = [abspath(__file__), '--help']
from setup import NAME # goatools
from setup import PACKAGES # modules in goatools
from tests.utils import REPO
def test_setup_dirs():
"""Test that all GOATOOLS package dirs are in the setup.py file"""
pkgs_setup = set(m for m in PACKAGES if 'test_' not in m)
pkgs_dirs = _get_pkgmods()
assert pkgs_dirs.issubset(pkgs_setup), _errmsg(pkgs_setup, pkgs_dirs)
print('**NOTE: TEST PASSED')
def _errmsg(pkgs_setup, pkgs_dirs):
"""Print the packages which are not found in setup.py"""
len_name = len(NAME) + 1
missing = set(m[len_name:] for m in pkgs_dirs.difference(pkgs_setup))
return '**FATAL: MISSING PACKAGES in setup.py:\n NAME + ".{P}",'.format(
P='",\n NAME + ".'.join(sorted(missing)))
def _get_pkgmods():
"""Get the GOATOOLS package modules by walking the package dirs"""
pkgs = set()
len_repo = len(REPO) + 1
for root, _, _ in walk(join(REPO, NAME)):
if root[-11:] != '__pycache__':
pkg = root[len_repo:].replace('/', '.')
if 'test_' not in pkg:
pkgs.add(pkg)
return pkgs
if __name__ == '__main__':
test_setup_dirs()
| {
"content_hash": "65d0c34270e7b38873255712d3b6bcba",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 79,
"avg_line_length": 33.095238095238095,
"alnum_prop": 0.6215827338129496,
"repo_name": "tanghaibao/goatools",
"id": "1930a0429ef090475bac6acdc218dd59b7a730d9",
"size": "1413",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_setup_dirs.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "316670"
},
{
"name": "Makefile",
"bytes": "25213"
},
{
"name": "Python",
"bytes": "146769147"
},
{
"name": "Shell",
"bytes": "1107"
}
],
"symlink_target": ""
} |
from pygaps.iast.pgiast import iast_point
from pygaps.iast.pgiast import iast_point_fraction
from pygaps.iast.pgiast import iast_binary_svp
from pygaps.iast.pgiast import iast_binary_vle
from pygaps.iast.pgiast import reverse_iast
| {
"content_hash": "d56d7f70431b3b03b65a003b034bd904",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 50,
"avg_line_length": 46.2,
"alnum_prop": 0.8354978354978355,
"repo_name": "pauliacomi/pyGAPS",
"id": "bbeeaa3bca87798aa69fec071fc383b189982857",
"size": "300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pygaps/iast/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3600"
},
{
"name": "PowerShell",
"bytes": "2995"
},
{
"name": "Python",
"bytes": "800102"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import os
import subprocess
import tempfile
from flexmock import flexmock
import pytest
import json
import tarfile
import re
from atomic_reactor.constants import PLUGIN_FETCH_SOURCES_KEY
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.constants import EXPORTED_SQUASHED_IMAGE_NAME
from atomic_reactor.core import DockerTasker
from atomic_reactor.plugin import BuildStepPluginsRunner, PluginFailedException
from atomic_reactor.plugins.build_source_container import SourceContainerPlugin
from atomic_reactor.plugins.pre_reactor_config import (
ReactorConfigPlugin,
)
from tests.docker_mock import mock_docker
from tests.constants import MOCK_SOURCE
class MockSource(object):
def __init__(self, tmpdir):
tmpdir = str(tmpdir)
self.dockerfile_path = os.path.join(tmpdir, 'Dockerfile')
self.path = tmpdir
self.config = flexmock(image_build_method=None)
def get_build_file_path(self):
return self.dockerfile_path, self.path
class MockInsideBuilder(object):
def __init__(self):
mock_docker()
self.tasker = DockerTasker()
self.base_image = None
self.image_id = None
self.image = None
self.df_path = None
self.df_dir = None
self.parent_images_digests = {}
def ensure_not_built(self):
pass
def mock_workflow(tmpdir, sources_dir='', remote_dir=''):
workflow = DockerBuildWorkflow(source=MOCK_SOURCE)
builder = MockInsideBuilder()
source = MockSource(tmpdir)
setattr(builder, 'source', source)
setattr(workflow, 'source', source)
setattr(workflow, 'builder', builder)
workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
workflow.prebuild_results[PLUGIN_FETCH_SOURCES_KEY] = {
'image_sources_dir': os.path.join(tmpdir.strpath, sources_dir),
'remote_sources_dir': os.path.join(tmpdir.strpath, remote_dir),
}
return workflow
@pytest.mark.parametrize('sources_dir, sources_dir_exists, sources_dir_empty', [
('sources_dir', False, True),
('sources_dir', True, True),
('sources_dir', True, False)])
@pytest.mark.parametrize('remote_dir, remote_dir_exists, remote_dir_empty', [
('remote_sources_dir', False, True),
('remote_sources_dir', True, True),
('remote_sources_dir', True, False)])
@pytest.mark.parametrize('export_failed', (True, False))
def test_running_build(tmpdir, caplog, user_params,
sources_dir, sources_dir_exists, sources_dir_empty,
remote_dir, remote_dir_exists, remote_dir_empty,
export_failed):
"""
Test if proper result is returned and if plugin works
"""
sources_dir_path = os.path.join(tmpdir.strpath, sources_dir)
if sources_dir_exists:
os.mkdir(sources_dir_path)
if not sources_dir_empty:
os.mknod(os.path.join(sources_dir_path, 'stub.srpm'))
remote_dir_path = os.path.join(tmpdir.strpath, remote_dir)
if remote_dir_exists:
os.mkdir(remote_dir_path)
if not remote_dir_empty:
os.mknod(os.path.join(remote_dir_path, 'remote-sources.tar.gz'))
workflow = mock_workflow(tmpdir, sources_dir, remote_dir)
mocked_tasker = flexmock(workflow.builder.tasker)
mocked_tasker.should_receive('wait').and_return(0)
runner = BuildStepPluginsRunner(
mocked_tasker,
workflow,
[{
'name': SourceContainerPlugin.key,
'args': {},
}]
)
temp_image_output_dir = os.path.join(str(tmpdir), 'image_output_dir')
temp_image_export_dir = os.path.join(str(tmpdir), 'image_export_dir')
tempfile_chain = flexmock(tempfile).should_receive("mkdtemp").and_return(temp_image_output_dir)
tempfile_chain.and_return(temp_image_export_dir)
os.mkdir(temp_image_export_dir)
os.makedirs(os.path.join(temp_image_output_dir, 'blobs', 'sha256'))
def check_check_output(args, **kwargs):
if args[0] == 'skopeo':
assert args[0] == 'skopeo'
assert args[1] == 'copy'
assert args[2] == 'oci:%s' % temp_image_output_dir
assert args[3] == 'docker-archive:%s' % os.path.join(temp_image_export_dir,
EXPORTED_SQUASHED_IMAGE_NAME)
if export_failed:
raise subprocess.CalledProcessError(returncode=1, cmd=args, output="Failed")
return ''
else:
args_expect = ['bsi', '-d']
drivers = []
if sources_dir and sources_dir_exists:
drivers.append('sourcedriver_rpm_dir')
if remote_dir and remote_dir_exists:
drivers.append('sourcedriver_extra_src_dir')
args_expect.append(','.join(drivers))
if sources_dir and sources_dir_exists:
args_expect.append('-s')
args_expect.append(sources_dir_path)
if remote_dir and remote_dir_exists:
args_expect.append('-e')
args_expect.append(remote_dir_path)
args_expect.append('-o')
args_expect.append(temp_image_output_dir)
assert args == args_expect
return 'stub stdout'
check_output_times = 2
if not sources_dir_exists and not remote_dir_exists:
check_output_times = 0
(flexmock(subprocess)
.should_receive("check_output")
.times(check_output_times)
.replace_with(check_check_output))
blob_sha = "f568c411849e21aa3917973f1c5b120f6b52fe69b1944dfb977bc11bed6fbb6d"
index_json = {"schemaVersion": 2,
"manifests":
[{"mediaType": "application/vnd.oci.image.manifest.v1+json",
"digest": "sha256:%s" % blob_sha,
"size": 645,
"annotations": {"org.opencontainers.image.ref.name": "latest-source"},
"platform": {"architecture": "amd64", "os": "linux"}}]}
blob_json = {"schemaVersion": 2, "layers": []}
with open(os.path.join(temp_image_output_dir, 'index.json'), 'w') as fp:
fp.write(json.dumps(index_json))
with open(os.path.join(temp_image_output_dir, 'blobs', 'sha256', blob_sha), 'w') as fp:
fp.write(json.dumps(blob_json))
if not export_failed:
export_tar = os.path.join(temp_image_export_dir, EXPORTED_SQUASHED_IMAGE_NAME)
with open(export_tar, "wb") as f:
with tarfile.TarFile(mode="w", fileobj=f) as tf:
for f in os.listdir(temp_image_output_dir):
tf.add(os.path.join(temp_image_output_dir, f), f)
if not sources_dir_exists and not remote_dir_exists:
build_result = runner.run()
err_msg = "No SRPMs directory '{}' available".format(sources_dir_path)
err_msg += "\nNo Remote source directory '{}' available".format(remote_dir_path)
# Since Python 3.7 logger adds additional whitespaces by default -> checking without them
assert re.sub(r'\s+', " ", err_msg) in re.sub(r'\s+', " ", caplog.text)
assert build_result.is_failed()
elif export_failed:
with pytest.raises(PluginFailedException):
runner.run()
else:
build_result = runner.run()
assert not build_result.is_failed()
assert build_result.oci_image_path
assert 'stub stdout' in caplog.text
empty_srpm_msg = "SRPMs directory '{}' is empty".format(sources_dir_path)
empty_remote_msg = "Remote source directory '{}' is empty".format(remote_dir_path)
if sources_dir_exists and sources_dir_empty:
assert empty_srpm_msg in caplog.text
else:
assert empty_srpm_msg not in caplog.text
if remote_dir_exists and remote_dir_empty:
assert empty_remote_msg in caplog.text
else:
assert empty_remote_msg not in caplog.text
def test_failed_build(tmpdir, caplog, user_params):
"""
Test if proper error state is returned when build inside build
container failed
"""
(flexmock(subprocess).should_receive('check_output')
.and_raise(subprocess.CalledProcessError(1, 'cmd', output='stub stdout')))
workflow = mock_workflow(tmpdir)
mocked_tasker = flexmock(workflow.builder.tasker)
mocked_tasker.should_receive('wait').and_return(1)
runner = BuildStepPluginsRunner(
mocked_tasker,
workflow,
[{
'name': SourceContainerPlugin.key,
'args': {},
}]
)
build_result = runner.run()
assert build_result.is_failed()
assert 'BSI failed with output:' in caplog.text
assert 'stub stdout' in caplog.text
| {
"content_hash": "86a85da4810b9c186a77c83625933296",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 99,
"avg_line_length": 37.51898734177215,
"alnum_prop": 0.627642825011246,
"repo_name": "DBuildService/atomic-reactor",
"id": "fbe09d9e39992e71b86e08ccefc8f8a16941db37",
"size": "8892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/plugins/test_build_source_container.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "506236"
},
{
"name": "Shell",
"bytes": "3589"
}
],
"symlink_target": ""
} |
import ninjag
import os
from read_all import read_all
def test():
f_inputs = [
"input/in3.yaml",
]
f_answer = "output/build.ninja"
f_solution = "solution/sol3.ninja"
cmd = " ".join(["ninjag", *f_inputs])
os.system(cmd)
os.remove(f_answer)
os.rename('build.ninja', f_answer)
answer = read_all(f_answer)
solution = read_all(f_solution)
assert answer == solution
| {
"content_hash": "33793f737c149c6298c7ff9e368a9c20",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 41,
"avg_line_length": 23.22222222222222,
"alnum_prop": 0.6076555023923444,
"repo_name": "yuhangwang/ninjag-python",
"id": "7f0386234dcf966d09de973047e1f3bd52b3e91b",
"size": "418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/frontend/c/test_5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24739"
}
],
"symlink_target": ""
} |
"""Custom topology example
Two directly connected switches plus a host for each switch:
host --- switch --- switch --- host
Adding the 'topos' dict with a key/value pair to generate our newly defined
topology enables one to pass in '--topo=mytopo' from the command line.
"""
from mininet.topo import Topo
class MyTopo( Topo ):
"Simple topology example."
def __init__( self ):
"Create custom topo."
# Initialize topology
Topo.__init__( self )
# Add hosts and switches
s1 = self.addSwitch('s1')
# Add links
topos = { 'mytopo': ( lambda: MyTopo() ) }
| {
"content_hash": "3118e5601d31a2e4d63629a4ff7a4657",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 75,
"avg_line_length": 22.85185185185185,
"alnum_prop": 0.6320907617504052,
"repo_name": "hchunhui/pop",
"id": "70e36c1fda433afcfd7f5ca08dad043c78759a40",
"size": "617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/topo5.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "370013"
},
{
"name": "C++",
"bytes": "1335"
},
{
"name": "HTML",
"bytes": "15643"
},
{
"name": "Makefile",
"bytes": "3479"
},
{
"name": "Python",
"bytes": "6887"
},
{
"name": "Ruby",
"bytes": "1718"
},
{
"name": "Shell",
"bytes": "4293"
}
],
"symlink_target": ""
} |
from numpy import *
from numpy.linalg import pinv
from numpy.linalg import svd
from numpy.random import permutation
from numpy.random import random_sample
def _onehot_encode(y):
C = unique(y)
N = len(y)
Y = zeros((N, len(C)))
Y[arange(N), y] = 1.0
return Y
def _train_test_split(X, T, test_size=0.25):
N = len(T)
S = floor(N * test_size)
idx = permutation(N)
idx_test = zeros(N, dtype=bool)
idx_test[idx[:S]] = True
return X[idx[S:]], X[idx[:S]], T[idx[S:]], T[idx[:S]], idx_test
class ShuffleSplit(object):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_test = int(floor(n * test_size))
self.n_train = n - self.n_test
def __iter__(self):
for i in range(self.n_iter):
# random partition
permutation = permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
class PCAData(object):
def __init__(self, data):
X = data.copy()
self.mean_ = X.mean(axis=0)
X -= self.mean_
U, S, V = svd(X, full_matrices=False)
self.V = V
self.explained_variance_ = ((S ** 2) / X.shape[0])
@property
def explained_variance(self):
return self.explained_variance_ / self.explained_variance_.sum()
class PCATransformer(object):
def __init__(self, pca_data, pca_components):
self.mean_ = pca_data.mean_
self.V = pca_data.V[:pca_components]
self.explained_variance_ = pca_data.explained_variance_[:pca_components]
def transform(self, X):
return dot(X - self.mean_, self.V.T) / sqrt(self.explained_variance_)
def inverse_transform(self, Xt):
return dot(Xt * sqrt(self.explained_variance_), self.V) + self.mean_
def reconstruct(self, X):
image_shape = X.shape
return self.inverse_transform(self.transform(X.flatten())).reshape(image_shape)
class NeuralNetwork(object):
def __init__(self, app):
self.app = app
def reset_training(self):
"""Initialize the network for training."""
app = self.app
self.targets = app.dataset[app.target_name] - 1
target_code_name = app.target_name + '_Code'
if app.dataset.has_key(target_code_name):
self.target_names = app.dataset[target_code_name]
else:
self.target_names = dict([(t + 1, str(t + 1)) for t in unique(self.targets)])
self.n_output = len(unique(self.targets))
self.V_hidden = zeros((app.pca_components + 1, app.num_hidden_units))
self.W_hidden = random_sample(self.V_hidden.shape)
self.V_output = zeros((app.num_hidden_units + 1, self.n_output))
self.W_output = random_sample(self.V_output.shape)
self.hidden_units_learning_rate = app.hidden_units_learning_rate
self.output_units_learning_rate = app.output_units_learning_rate
self.X = X = app.dataset['data']
# Split into training and test
X_train, X_test, self.y_train, self.y_test, self.idx_test = \
_train_test_split(X, self.targets, test_size=app.num_test_data)
# Preprocess the data using PCA
self.pca_transformer = PCATransformer(PCAData(X_train), app.pca_components)
self.X_train = self.pca_transformer.transform(X_train)
self.X_test = self.pca_transformer.transform(X_test)
# Epochs
self.epoch = 0
self.minimum_rmse = app.minimum_rmse
if self.minimum_rmse > 0:
# Use Training RMSE to stop
self.should_keep_training = lambda: self.rmse[self.epoch, 1] > app.minimum_rmse
self.rmse = zeros((0, 2))
self.cerr = zeros((0, 2))
else:
# Use epochs to stop
self.should_keep_training = lambda: self.epoch < app.epochs
self.rmse = zeros((app.epochs, 2))
self.cerr = zeros((app.epochs, 2))
def resume_training(self):
"""Resume training the network"""
while self.should_keep_training():
# Test then Train, since we'll use the training errors
for i, (inputs, y) in enumerate([[self.X_test, self.y_test], [self.X_train, self.y_train]]):
outputs, hidden = self.feed_forward(inputs)
target = ones(outputs.shape) * (-1.0)
target[arange(target.shape[0]), y] = 1.0
errors = target - outputs
self.rmse[self.epoch, i] = sqrt((errors ** 2).mean()) # RMSE
self.cerr[self.epoch, i] = (y != argmax(outputs, axis=1)).mean()
# Yield the results to outside
yield self.epoch, self.rmse.shape[0], self.rmse[:self.epoch], self.cerr[:self.epoch], False
# Update weights using backpropagation
self.back_propagate(inputs, hidden, outputs, errors)
self.epoch += 1
# Do once more for the very last epoch
yield self.epoch, self.rmse.shape[0], self.rmse[:self.epoch], self.cerr[:self.epoch], True
def _activation(self, x):
""" Funny tanh function. """
z = x * 2 / 3
y = (exp(z) - exp(-z)) / (exp(z) + exp(-z))
return 1.7159 * y
def _da(self, x):
return (1.7159 - multiply(x, x) / 1.7159) * 2 / 3
def _inverse_activation(self, x):
z = x / 1.7159
return z
# z[z<-.999] = -.999; z[z>.999] = .999
# return arctanh(z)*3/2
def feed_forward(self, X):
"""From the input X, calculate the activations at the hidden layer and the output layer."""
Z = self._activation(dot(c_[X, ones((X.shape[0], 1))], self.W_hidden))
return self._activation(dot(c_[Z, ones((X.shape[0], 1))], self.W_output)), Z
def back_propagate(self, inputs, hidden, output, errors):
"""Back-propagate the errors and update the weights."""
d_output = self._da(output) * errors
d_hidden = self._da(hidden) * dot(d_output, self.W_output[:-1].T)
n_samples = inputs.shape[0]
bias = ones((n_samples, 1))
# Update momentum and weights
self.V_output = self.output_units_learning_rate * dot(c_[hidden, bias].T, d_output) / n_samples
self.W_output += self.V_output
self.V_hidden = self.hidden_units_learning_rate * dot(c_[inputs, bias].T, d_hidden) / n_samples
self.W_hidden += self.V_hidden
def predict_all(self):
"""Returns the predictions and the reconstructions for all training / test data."""
outputs, hidden = self.feed_forward(self.pca_transformer.transform(self.X))
hidden_expected = dot(self._inverse_activation(outputs), pinv(self.W_output))[:, :-1]
hidden_reconstruction = self.pca_transformer.inverse_transform(
dot(self._inverse_activation(hidden_expected), pinv(self.W_hidden))[:, :-1])
return outputs.argmax(axis=1), hidden_reconstruction.reshape(self.app.dataset['images'].shape)
| {
"content_hash": "ce9264e2f497674438886b877d042d7f",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 104,
"avg_line_length": 38.56613756613756,
"alnum_prop": 0.5929482782274661,
"repo_name": "ttsuchi/neural-network-demo",
"id": "c665c72d31b5a70da0e324b27310e67d1aa13196",
"size": "7289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/neural_network.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "160"
},
{
"name": "Matlab",
"bytes": "17232"
},
{
"name": "Python",
"bytes": "27583"
}
],
"symlink_target": ""
} |
"""
Stack-In-A-WSGI: StackInAWsgiAdmin
"""
import json
import logging
import re
from stackinabox.services.service import StackInABoxService
from stackinawsgi.exceptions import InvalidSessionId
from stackinawsgi.session.service import (
global_sessions,
session_regex
)
logger = logging.getLogger(__name__)
class StackInAWsgiAdmin(StackInABoxService):
"""
Stack-In-A-WSGI RESTful Admin API
:ivar :obj:`StackInAWsgiSessionManager` manager: session manager instance
:ivar text_type base_uri: base URI for accessing the session to which the
session uuid will be appended, http://localhost/stackinabox/ which
would result in http://localhost/stackinabox/<session-id>/
"""
def __init__(self, session_manager, base_uri):
"""
Initialize the Admin Interface
"""
super(StackInAWsgiAdmin, self).__init__('admin')
self.manager = session_manager
self.base_uri = base_uri
self.register(
StackInABoxService.GET,
re.compile('^{0}$'.format(session_regex)),
StackInAWsgiAdmin.get_session_info
)
self.register(
StackInABoxService.DELETE, '/', StackInAWsgiAdmin.remove_session
)
self.register(
StackInABoxService.POST, '/', StackInAWsgiAdmin.create_session
)
self.register(
StackInABoxService.PUT, '/', StackInAWsgiAdmin.reset_session
)
self.register(
StackInABoxService.GET, '/', StackInAWsgiAdmin.get_sessions
)
@property
def base_uri(self):
"""
Base URI of the WSGI App
"""
return self.__base_uri
@base_uri.setter
def base_uri(self, value):
"""
Update the Base URI of the WSGI App
"""
if value.startswith('/'):
value = value[1:]
if value.endswith('/'):
value = value[:-1]
self.__base_uri = value
logger.debug(
'Received Base URI: {0}'.format(
self.__base_uri
)
)
def helper_get_session_id(self, headers):
"""
Helper to retrieve the session id or build a new one
:param dict headers: case insensitive header dictionary
:returns: text_type with the UUID of the session
"""
session_id = None
if 'x-session-id' in headers:
session_id = headers['x-session-id']
else:
logger.debug('x-session-id not in headers')
logger.debug('Found Session Id: {0}'.format(session_id))
return session_id
def helper_get_session_id_from_uri(self, uri):
"""
Helper to retrieve the Session-ID FROM a URI
:param text_type uri: complete URI
:returns: text_type with the session-id
"""
matcher = re.compile(session_regex)
try:
matched_groups = matcher.match(uri)
session_id = matched_groups.group(0)[1:]
logger.debug(
'Helper Get Session From URI - URI: "{0}", '
'Session ID: "{1}"'.format(
uri,
session_id
)
)
except Exception:
logger.exception('Failed to find session-id')
session_id = None
return session_id
def helper_get_uri(self, session_id):
"""
Helper to build the session URL
:param text_type session_id: session-id for URL is for
:returns: text_type, the URL for the session-id
"""
logger.debug(
'Helper Get URI - Base URI: "{0}", Session ID: "{1}"'.format(
self.base_uri,
session_id
)
)
return '{0}/{1}/'.format(
self.base_uri,
session_id
)
def create_session(self, request, uri, headers):
"""
Create a new session
:param :obj:`Request` request: object containing the HTTP Request
:param text_type uri: the URI for the request per StackInABox
:param dict headers: case insensitive header dictionary
:returns: tuple for StackInABox HTTP Response
HTTP Request:
POST /admin/
X-Session-ID: (Optional) Session-ID to use when creating the
new session
HTTP Responses:
201 - Session Created
X-Session-ID header contains the session-id
Location header contains the URL for the session
"""
requested_session_id = self.helper_get_session_id(
headers
)
logging.debug(
'Requested Session Id: {0}'.format(requested_session_id)
)
session_id = self.manager.create_session(
requested_session_id
)
logging.debug(
'Created Session Id: {0}'.format(session_id)
)
headers['x-session-id'] = session_id
headers['location'] = self.helper_get_uri(
session_id
)
return (201, headers, '')
def remove_session(self, request, uri, headers):
"""
Remove an existing session
:param :obj:`Request` request: object containing the HTTP Request
:param text_type uri: the URI for the request per StackInABox
:param dict headers: case insensitive header dictionary
:returns: tuple for StackInABox HTTP Response
HTTP Request:
DELETE /admin/
X-Session-ID: (Required) Session-ID to destroy
HTTP Responses:
204 - Session Destroyed
404 - Session-ID Not Found
"""
try:
self.manager.remove_session(
self.helper_get_session_id(
headers
)
)
except InvalidSessionId as ex:
return (404, headers, str(ex))
else:
return (204, headers, '')
def reset_session(self, request, uri, headers):
"""
Reset the session; shortcut for removing and creating the session
while preserving the session-id.
:param :obj:`Request` request: object containing the HTTP Request
:param text_type uri: the URI for the request per StackInABox
:param dict headers: case insensitive header dictionary
:returns: tuple for StackInABox HTTP Response
HTTP Request:
PUT /admin/
X-Session-ID: (Required) Session-ID to reset
HTTP Responses:
205 - Session Reset
404 - Session-ID Not Found
"""
try:
self.manager.reset_session(
self.helper_get_session_id(
headers
)
)
except InvalidSessionId as ex:
return (404, headers, str(ex))
else:
return (205, headers, '')
def get_session_info(self, request, uri, headers):
"""
Get Session Information - TBD
:param :obj:`Request` request: object containing the HTTP Request
:param text_type uri: the URI for the request per StackInABox
:param dict headers: case insensitive header dictionary
:returns: tuple for StackInABox HTTP Response
HTTP Request:
GET /admin/{X-Session-ID}
X-Session-ID: (Required) Session-ID to reset
HTTP Responses:
200 - Session Data in JSON format
"""
requested_session_id = self.helper_get_session_id_from_uri(
uri
)
session_info = {
'session_valid': requested_session_id in global_sessions,
'created-time': None,
'accessed-time': None,
'accessed-count': 0,
'http-status': {}
}
if session_info['session_valid']:
session = global_sessions[requested_session_id]
session_info['created-time'] = session.created_at.isoformat()
session_info['accessed-time'] = (
session.last_accessed_at.isoformat()
)
session_info['accessed-count'] = session.access_count
session_info['http-status'] = session.status_tracker
data = {
'base_url': self.base_uri,
'services': {
svc().name: svc.__name__
for svc in self.manager.services
},
'trackers': {
'created-time': session_info['created-time'],
'accessed': {
'time': session_info['accessed-time'],
'count': session_info['accessed-count']
},
'status': session_info['http-status']
},
'session_valid': session_info['session_valid']
}
return (200, headers, json.dumps(data))
def get_sessions(self, request, uri, headers):
"""
Get Session List - TBD
:param :obj:`Request` request: object containing the HTTP Request
:param text_type uri: the URI for the request per StackInABox
:param dict headers: case insensitive header dictionary
:returns: tuple for StackInABox HTTP Response
HTTP Request:
GET /admin/
HTTP Responses:
200 - Session List in JSON format
"""
data = {
'base_url': self.base_uri,
'services': {
svc().name: svc.__name__
for svc in self.manager.services
},
'sessions': [
requested_session_id
for requested_session_id in global_sessions
]
}
return (200, headers, json.dumps(data))
| {
"content_hash": "a4d57bcba27f0e626997ced6eb506403",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 77,
"avg_line_length": 29.91131498470948,
"alnum_prop": 0.5471833145895103,
"repo_name": "TestInABox/stackInAWSGI",
"id": "5933dd5df4a23ff4f18dc316fb98d653598f5cbe",
"size": "9781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stackinawsgi/admin/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "93089"
}
],
"symlink_target": ""
} |
"""
sentry.scripts.data_faker
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from sentry import VERSION, app, capture
from sentry.interfaces import Http
import os.path
import random
import sys
from optparse import OptionParser
def main():
parser = OptionParser(version="%%prog %s" % VERSION)
parser.add_option('--config', metavar='CONFIG')
parser.add_option('--num', default=100)
(options, args) = parser.parse_args()
if options.config:
app.config.from_pyfile(options.config)
else:
config_path = os.path.expanduser(os.path.join('~', '.sentry', 'sentry.conf.py'))
if os.path.exists(config_path):
app.config.from_pyfile(config_path)
exceptions = [ValueError, SyntaxError, KeyError, IndexError, OSError]
messages = [
'Old Man, sorry. What knight live in that castle over there?',
'You fight with the strength of many men, Sir knight.',
'A witch! A witch! A witch! We\'ve got a witch! A witch!',
'Does wood sink in water?',
'The wise Sir Bedemir was the first to join King Arthur\'s knights, but other illustrious names were soon to follow',
]
urls = [
'http://example.com',
'http://example.com/foo/bar/',
'http://example.com/foo/bar/?baz=biz',
]
sql_queries = ['SELECT * FROM table', 'INSERT INTO FOO (a, b, c) VALUES (1, 2, 3)', 'TRUNCATE TABLE baz']
sql_engines = ['psycopg2', 'mysqldb', 'oracle']
http_methods = Http.METHODS
for n in xrange(options.num):
x = random.randint(0, 2)
if x == 0:
event = 'Exception'
kwargs = {}
exc_class = exceptions[n % len(exceptions)]
exc_value = messages[n % len(messages)]
try:
raise exc_class(exc_value)
except:
kwargs = {'exc_info': sys.exc_info()}
elif x == 1:
event = 'Message'
kwargs = {'message': messages[n % len(messages)]}
elif x == 2:
event = 'Query'
kwargs = {'query': sql_queries[n % len(sql_queries)], 'engine': sql_engines[n % len(sql_engines)]}
if random.randint(0, 1) == 1:
kwargs['data'] = {
'sentry.interfaces.Http': {
'url': urls[n % len(urls)],
'method': http_methods[n % len(http_methods)],
}
}
capture(event, **kwargs)
sys.exit(0)
if __name__ == '__main__':
main() | {
"content_hash": "c86aaa6f5bfc24f0750fbe8e99e09fff",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 125,
"avg_line_length": 33.37179487179487,
"alnum_prop": 0.5566653860929697,
"repo_name": "dcramer/sentry-old",
"id": "6cb876fadf144770606377c877bcf4a00e447417",
"size": "2625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentry/web/scripts/data_faker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "10085"
},
{
"name": "Python",
"bytes": "183975"
},
{
"name": "Shell",
"bytes": "4106"
}
],
"symlink_target": ""
} |
from iptest.assert_util import *
add_clr_assemblies("loadorder_1a")
# namespace NamespaceOrType {
# public class C {
# public static string Flag = typeof(C).FullName;
# }
# }
import NamespaceOrType
add_clr_assemblies("loadorder_1c")
# public class NamespaceOrType<T> {
# public static string Flag = typeof(NamespaceOrType<>).FullName;
# }
AreEqual(NamespaceOrType.C.Flag, "NamespaceOrType.C")
import NamespaceOrType
AssertError(AttributeError, lambda: NamespaceOrType.C)
AreEqual(NamespaceOrType[int].Flag, "NamespaceOrType`1") | {
"content_hash": "b68530c25ccd6b0dcaadde000af7a158",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 69,
"avg_line_length": 22.4,
"alnum_prop": 0.7321428571428571,
"repo_name": "IronLanguages/ironpython3",
"id": "47c7f0c90a691398428b9c1a8fdda18b9fe8d8e4",
"size": "767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tests/interop/net/loadorder/t1b.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6855"
},
{
"name": "C",
"bytes": "239473"
},
{
"name": "C#",
"bytes": "12619304"
},
{
"name": "C++",
"bytes": "28403"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "13157428"
},
{
"name": "Makefile",
"bytes": "332"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "84504"
},
{
"name": "Python",
"bytes": "29490541"
},
{
"name": "Roff",
"bytes": "21080"
},
{
"name": "Shell",
"bytes": "4872"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
} |
def cost(matrix1, matrix2):
return matrix1[0]*matrix1[1]*matrix2[1]
def single_to_double(elements):
return [(elements[i], elements[i+1]) for i in range(len(elements)-1)]
def minimum_cost(i, j, matrix_array, cache=None):
if not cache:
cache={}
if not (i,j) in cache:
if i==j:
result=0
elif j==i+1:
result=cost(matrix_array[i], matrix_array[j])
else:
result=min(minimum_cost(i, k, matrix_array, cache) + minimum_cost(k+1, j, matrix_array, cache) + matrix_array[i][0]*matrix_array[k][1]*matrix_array[j][1] for k in range(i, j))
cache[(i,j)]=result
return cache[(i,j)]
def minimum_multiplication(i, j, matrix_array, cache=None):
if not cache:
cache={}
if not (i,j) in cache:
if i==j:
result=(0, [])
elif j==i+1:
result=(cost(matrix_array[i], matrix_array[j]) ,[i])
else:
result=None
for k in range(i, j):
left_cost,left_path=minimum_multiplication(i, k, matrix_array, cache)
right_cost, right_path=minimum_multiplication(k+1, j, matrix_array, cache)
extra_residue=matrix_array[i][0]*matrix_array[k][1]*matrix_array[j][1]
current=(left_cost+right_cost+extra_residue, left_path+right_path+[i])
if not result:
result=current
else:
if current<result:
result=current
cache[(i,j)]=result
return cache[(i,j)]
| {
"content_hash": "4d78fff19a73f72157db9008cabc435b",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 187,
"avg_line_length": 37.97560975609756,
"alnum_prop": 0.5433526011560693,
"repo_name": "Bolt64/my_code",
"id": "08089bc5cb636cea926d7a07563478d2daa84ec8",
"size": "1581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algorithms/minimum_matrix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "57094"
},
{
"name": "C++",
"bytes": "12255"
},
{
"name": "Haskell",
"bytes": "27215"
},
{
"name": "Jupyter Notebook",
"bytes": "18418"
},
{
"name": "Python",
"bytes": "308871"
},
{
"name": "Racket",
"bytes": "3888"
},
{
"name": "Rust",
"bytes": "22856"
},
{
"name": "Scala",
"bytes": "51026"
},
{
"name": "Shell",
"bytes": "514"
},
{
"name": "Vim script",
"bytes": "341"
}
],
"symlink_target": ""
} |
"""
sentry.conf.urls
~~~~~~~~~~~~~~~~
These are additional urls used by the Sentry-provided web server
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
from sentry.web.urls import *
from sentry.web.frontend import generic
from django.conf.urls.defaults import *
from django.contrib import admin
from django.views.defaults import page_not_found
admin.autodiscover()
admin_media_dir = os.path.join(os.path.dirname(admin.__file__), 'media')
handler404 = lambda x: page_not_found(x, template_name='sentry/404.html')
def handler500(request):
"""
500 error handler.
Templates: `500.html`
Context: None
"""
from django.template import Context, loader
from django.http import HttpResponseServerError
context = {'request': request}
t = loader.get_template('sentry/500.html')
return HttpResponseServerError(t.render(Context(context)))
urlpatterns = patterns('',
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^admin/', include(admin.site.urls)),
url(r'^_admin_media/(?P<path>.*)$', generic.static_media,
kwargs={'root': admin_media_dir},
name='admin-media'),
) + urlpatterns
| {
"content_hash": "19c2a8d7b3ff4f93dc7a61daab80a04d",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 73,
"avg_line_length": 26.29787234042553,
"alnum_prop": 0.6901294498381877,
"repo_name": "chayapan/django-sentry",
"id": "0042b8638758ca58989f578a8b10ea58d4a8979b",
"size": "1236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/conf/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "126130"
},
{
"name": "HTML",
"bytes": "174367"
},
{
"name": "JavaScript",
"bytes": "54696"
},
{
"name": "Makefile",
"bytes": "1867"
},
{
"name": "Python",
"bytes": "1330807"
}
],
"symlink_target": ""
} |
import mock
import pytest
from hpe_test_utils import ImageStreamerBaseTest
from oneview_module_loader import GoldenImageModule
FAKE_MSG_ERROR = 'Fake message error'
@pytest.mark.resource(TestGoldenImageModule='golden_images')
class TestGoldenImageModule(ImageStreamerBaseTest):
"""
ImageStreamerBaseTest has common test for main function,
also provides the mocks used in this test case
"""
@pytest.fixture(autouse=True)
def specific_set_up(self):
# Load scenarios from module examples
self.GOLDEN_IMAGE_UPLOAD = dict(
config='config.json',
state='present',
data=dict(
name='Demo Golden Image upload',
description='Test',
localImageFilePath='~/image_file.zip'
)
)
self.GOLDEN_IMAGE_CREATE = dict(
config='config.json',
state='present',
data=dict(
name='Demo Golden Image creation',
description="Test Description",
imageCapture="true",
osVolumeName='OSVolume-20',
buildPlanName='Buld Plan name'
)
)
self.GOLDEN_IMAGE_UPDATE = dict(
config='config.json',
state='present',
data=dict(
name='Demo Golden Image update',
description='Test',
newName='Golden Image Renamed')
)
self.GOLDEN_IMAGE_DOWNLOAD = dict(
config='config.json',
state='downloaded',
data=dict(
name='Demo Golden Image',
destination_file_path='~/downloaded_image.zip'
)
)
self.GOLDEN_IMAGE_ARCHIVE_DOWNLOAD = dict(
config='config.json',
state='archive_downloaded',
data=dict(
name='Demo Golden Image',
destination_file_path='~/archive.log'
)
)
self.GOLDEN_IMAGE_DELETE = dict(
config='config.json',
state='absent',
data=dict(
name='Golden Image name'
)
)
def test_create_new_golden_image(self):
self.resource.get_by.return_value = []
self.resource.create.return_value = {"name": "name"}
self.mock_ov_client.os_volumes.get_by_name.return_value = {'uri': '/rest/os-volumes/1'}
self.mock_ov_client.build_plans.get_by.return_value = [{'uri': '/rest/build-plans/1'}]
self.mock_ansible_module.params = self.GOLDEN_IMAGE_CREATE
GoldenImageModule().run()
self.resource.create.assert_called_once_with(
{'osVolumeURI': '/rest/os-volumes/1',
'description': 'Test Description',
'buildPlanUri': '/rest/build-plans/1',
'name': 'Demo Golden Image creation',
'imageCapture': 'true'})
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=GoldenImageModule.MSG_CREATED,
ansible_facts=dict(golden_image={"name": "name"})
)
def test_upload_a_golden_image(self):
self.resource.get_by.return_value = []
self.resource.upload.return_value = {"name": "name"}
self.mock_ansible_module.params = self.GOLDEN_IMAGE_UPLOAD
file_path = self.GOLDEN_IMAGE_UPLOAD['data']['localImageFilePath']
GoldenImageModule().run()
self.resource.upload.assert_called_once_with(
file_path,
self.GOLDEN_IMAGE_UPLOAD['data'])
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=GoldenImageModule.MSG_UPLOADED,
ansible_facts=dict(golden_image={"name": "name"})
)
def test_update_golden_image(self):
self.resource.get_by.return_value = [self.GOLDEN_IMAGE_CREATE['data']]
self.resource.update.return_value = {"name": "name"}
self.mock_ansible_module.params = self.GOLDEN_IMAGE_UPDATE
GoldenImageModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=GoldenImageModule.MSG_UPDATED,
ansible_facts=dict(golden_image={"name": "name"})
)
def test_golden_image_download(self):
golden_image = self.GOLDEN_IMAGE_CREATE['data']
golden_image['uri'] = '/rest/golden-images/1'
self.resource.get_by.return_value = [golden_image]
self.mock_ansible_module.params = self.GOLDEN_IMAGE_DOWNLOAD
GoldenImageModule().run()
download_file = self.GOLDEN_IMAGE_DOWNLOAD['data']['destination_file_path']
self.resource.download.assert_called_once_with('/rest/golden-images/1', download_file)
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=GoldenImageModule.MSG_DOWNLOADED,
ansible_facts={})
def test_golden_image_download_nonexistent(self):
self.resource.get_by.return_value = []
self.mock_ansible_module.params = self.GOLDEN_IMAGE_DOWNLOAD
GoldenImageModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=GoldenImageModule.MSG_WAS_NOT_FOUND,)
def test_golden_image_archive_download(self):
golden_image = self.GOLDEN_IMAGE_CREATE['data']
golden_image['uri'] = '/rest/golden-images/1'
self.resource.get_by.return_value = [golden_image]
self.mock_ansible_module.params = self.GOLDEN_IMAGE_ARCHIVE_DOWNLOAD
GoldenImageModule().run()
download_file = self.GOLDEN_IMAGE_ARCHIVE_DOWNLOAD['data']['destination_file_path']
self.resource.download_archive.assert_called_once_with('/rest/golden-images/1', download_file)
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=GoldenImageModule.MSG_ARCHIVE_DOWNLOADED,
ansible_facts={})
def test_golden_image_archive_download_nonexistent(self):
self.resource.get_by.return_value = []
self.mock_ansible_module.params = self.GOLDEN_IMAGE_ARCHIVE_DOWNLOAD
GoldenImageModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=GoldenImageModule.MSG_WAS_NOT_FOUND)
def test_should_not_update_when_data_is_equals(self):
self.resource.get_by.return_value = [self.GOLDEN_IMAGE_UPDATE['data']]
del self.GOLDEN_IMAGE_UPDATE['data']['newName']
self.mock_ansible_module.params = self.GOLDEN_IMAGE_UPDATE
GoldenImageModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=GoldenImageModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(golden_image=self.GOLDEN_IMAGE_UPDATE['data'])
)
def test_delete_golden_image(self):
self.resource.get_by.return_value = [self.GOLDEN_IMAGE_CREATE['data']]
self.mock_ansible_module.params = self.GOLDEN_IMAGE_DELETE
GoldenImageModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=GoldenImageModule.MSG_DELETED
)
def test_should_do_nothing_when_deleting_a_non_existent_golden_image(self):
self.resource.get_by.return_value = []
self.mock_ansible_module.params = self.GOLDEN_IMAGE_DELETE
GoldenImageModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=GoldenImageModule.MSG_ALREADY_ABSENT
)
def test_should_fail_when_present_is_incosistent(self):
self.resource.get_by.return_value = []
self.mock_ov_client.os_volumes.get_by_name.return_value = {'uri': '/rest/os-volumes/1'}
self.GOLDEN_IMAGE_CREATE['data']['localImageFilePath'] = 'filename'
self.mock_ansible_module.params = self.GOLDEN_IMAGE_CREATE
GoldenImageModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=GoldenImageModule.MSG_CANT_CREATE_AND_UPLOAD)
def test_should_fail_when_mandatory_attributes_are_missing(self):
self.resource.get_by.return_value = []
del self.GOLDEN_IMAGE_CREATE['data']['osVolumeName']
self.mock_ansible_module.params = self.GOLDEN_IMAGE_CREATE
GoldenImageModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=GoldenImageModule.MSG_MISSING_MANDATORY_ATTRIBUTES)
def test_should_fail_when_os_volume_not_found(self):
self.resource.get_by.return_value = []
self.mock_ov_client.os_volumes.get_by_name.return_value = None
self.mock_ansible_module.params = self.GOLDEN_IMAGE_CREATE
GoldenImageModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=GoldenImageModule.MSG_OS_VOLUME_WAS_NOT_FOUND)
def test_should_fail_when_build_plan_not_found(self):
self.resource.get_by.return_value = []
self.mock_ov_client.build_plans.get_by.return_value = None
self.mock_ansible_module.params = self.GOLDEN_IMAGE_CREATE
GoldenImageModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=GoldenImageModule.MSG_BUILD_PLAN_WAS_NOT_FOUND)
if __name__ == '__main__':
pytest.main([__file__])
| {
"content_hash": "c7bcbf6136ac3b9644c04c7954b0bc1f",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 142,
"avg_line_length": 35.39033457249071,
"alnum_prop": 0.6269957983193277,
"repo_name": "HewlettPackard/oneview-ansible",
"id": "098dd6b9d19dadefe0fac5783fd51eaf6fc56ba6",
"size": "10179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_image_streamer_golden_image.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1716153"
},
{
"name": "Shell",
"bytes": "5675"
}
],
"symlink_target": ""
} |
from feedz.processors.content_filter import ContentFilterProcessor
| {
"content_hash": "a92cf56c7d809cc6b30eb3c443a7d33d",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 66,
"avg_line_length": 67,
"alnum_prop": 0.8955223880597015,
"repo_name": "indexofire/gork",
"id": "d9f85b63adbb3083071e4eba3a673c4a93f8de7c",
"size": "91",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/gork/application/feedz/processors/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "199039"
},
{
"name": "JavaScript",
"bytes": "89817"
},
{
"name": "Python",
"bytes": "1120919"
},
{
"name": "Shell",
"bytes": "6713"
}
],
"symlink_target": ""
} |
""" Test the signal module. """
import os.path, time, signal
import srllib.process as _process
from srllib import util
from _common import *
class TestError(_process.PickleableException):
pass
def _childfunc_writes(outstr, errstr):
for i in range(10):
sys.stdout.write("%s\n" % outstr)
sys.stderr.write("%s\n" % errstr)
def _childfunc_raises():
raise TestError("TestError")
def _childfunc_sleeps():
import time
while True:
time.sleep(0.1)
class ProcessTest(TestCase):
def test_child_exception(self):
""" Test catching an exception raised in the child. """
proc = _process.Process(_childfunc_raises)
try:
try: proc.wait()
except _process.ChildError, err:
self.assert_(isinstance(err.orig_exception, TestError))
else:
print proc.stderr.read()
raise AssertionError, "Exception not raised"
# Polling should raise the same error
self.assertRaises(_process.ChildError, proc.poll)
finally:
proc.close()
def test_terminate(self):
""" Test terminating the child process. """
if util.get_os_name() == util.Os_Windows:
# This method is only usable if pywin32 is installed
try: import win32process
except ImportError: return
proc = _process.Process(_childfunc_sleeps)
self.assertEqual(proc.terminate(), -signal.SIGTERM)
# Make sure that it is safe to call this after the process has exited
self.assertEqual(proc.terminate(), -signal.SIGTERM)
proc.close()
'''
def test_run_in_terminal(self):
""" Test running code in virtual terminal. """
if get_os_name() == Os_Linux:
# Execute child function under supervision and observe outputs
def slot_stdout(txt):
self.__stdout += txt
def slot_stderr(txt):
self.__stderr += txt
self.__stdout, self.__stderr = "", ""
procmon = _process.ThreadedProcessMonitor(use_pty=True)
self._connect_to(procmon.sig_stdout, slot_stdout)
self._connect_to(procmon.sig_stderr, slot_stderr)
procmon(_childfunc_writes, ["Test out", "Test err"])
procmon.wait()
for l in self.__stdout.splitlines():
self.assertEqual(l, "Test out")
for l in self.__stderr.splitlines():
self.assertEqual(l, "Test err")
'''
def _childfunc_succeeds():
pass
def _childfunc_fails():
raise Exception("Failure")
class ThreadedProcessMonitorTest(TestCase):
""" Test the threaded process monitor. """
'''
def test_capture_output(self):
""" Test capturing textual output from child process. """
def slot_stdout(text):
self.__stdout += text
def slot_stderr(text):
self.__stderr += text
def childfunc(process):
import sys
sys.stdout.write("Test stdout")
sys.stderr.write("Test stderr" )
procmon = _process.ThreadedProcessMonitor()
self.__stdout, self.__stderr = "", ""
self._connect_to(procmon.sig_stdout, slot_stdout)
self._connect_to(procmon.sig_stderr, slot_stderr)
procmon(childfunc)
procmon.wait()
self.assertEqual(self.__stdout, "Test stdout")
self.assertEqual(self.__stderr, "Test stderr")
'''
def test_success(self):
""" Verify that sig_finished is received when the process finishes
successfully. """
def slot_finished():
self.__finished = True
def slot_failed(err):
self.__failed = True
procmon = _process.ThreadedProcessMonitor()
self.__finished = self.__failed = False
self._connect_to(procmon.sig_finished, slot_finished)
self._connect_to(procmon.sig_failed, slot_failed)
procmon(_childfunc_succeeds)
procmon.wait()
self.assert_(self.__finished)
self.assertNot(self.__failed)
def test_failure(self):
""" Verify that sig_failed is received when the process fails. """
def slot_finished():
self.__finished = True
def slot_failed(err):
self.__error = err
procmon = _process.ThreadedProcessMonitor()
self.__finished = False
self.__error = None
self._connect_to(procmon.sig_finished, slot_finished)
self._connect_to(procmon.sig_failed, slot_failed)
procmon(_childfunc_fails)
procmon.wait()
self.assert_(isinstance(self.__error, _process.ChildError), self.__error)
self.assertNot(self.__finished)
def test_command(self):
""" Test monitoring a command (invoke a command instead of a Python
callable).
"""
procmon = _process.ThreadedProcessMonitor()
# Use realpath in case we get a path with symlink(s)
cwd = os.path.realpath(self._get_tempdir())
prcs = procmon.monitor_command(["python", "-c", "import os; print "
"os.getcwd()"], cwd=cwd)
self.assertEqual(prcs.stdout.read().strip(), cwd)
| {
"content_hash": "3296fd722d2938f635eea1c174740bcb",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 81,
"avg_line_length": 35.10457516339869,
"alnum_prop": 0.5758704151927015,
"repo_name": "aknuds1/srl-python-lib",
"id": "1d1fe4d3f6cab88bdbae0060c6d2ebe2d346735b",
"size": "5371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tests/testprocess.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "262264"
}
],
"symlink_target": ""
} |
__author__ = 'Administrator'
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def merge_list(head1, head2):
fakehead = ListNode(0)
lastnode = fakehead
while head1 != None and head2 != None:
if head1.val <= head2.val:
lastnode.next = head1
lastnode = head1
head1 = head1.next
else:
lastnode.next = head2
lastnode = head2
head2 = head2.next
if head1:
lastnode.next = head1
if head2:
lastnode.next = head2
return fakehead.next
def print_list(head):
while head != None:
print head.val
head = head.next
array1 = [13]
array2 = [1, 12, 22, 25]
fakehead1 = ListNode(0)
lasthead1 = fakehead1
for a in array1:
lasthead1.next = ListNode(a)
lasthead1 = lasthead1.next
fakehead2 = ListNode(0)
lasthead2 = fakehead2
for a in array2:
lasthead2.next = ListNode(a)
lasthead2 = lasthead2.next
print_list(fakehead1.next)
print_list(fakehead2.next)
print '======================='
print_list(merge_list(fakehead1.next, fakehead2.next)) | {
"content_hash": "9fce84428d248ad1a08eb0a26ef67c49",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 54,
"avg_line_length": 24.41304347826087,
"alnum_prop": 0.5983971504897596,
"repo_name": "deepbluech/leetcode",
"id": "0afef05c2ec4b4e45a28cdedc4f82702da995ef3",
"size": "1123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SwordOffer17_MergeList.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25521"
}
],
"symlink_target": ""
} |
import os
import logging
import traceback
import requests
import zipfile
from tempfile import NamedTemporaryFile
from django.conf import settings
from django.core.files.storage import get_storage_class, FileSystemStorage
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.mail import mail_admins
from django.utils.translation import gettext as t
SLASH = "/"
class MyError(Exception):
pass
class EnketoError(Exception):
pass
def get_path(path, suffix):
fileName, fileExtension = os.path.splitext(path)
return fileName + suffix + fileExtension
def image_urls_dict(instance):
"""
Returns a dict of attachments with keys as base filename
and values link through `kobocat` redirector.
Only exposes `suffix` version of it. It will be created on the fly by the
redirector
:param instance: Instance
:return: dict
"""
urls = dict()
# Remove leading dash from suffix
suffix = settings.THUMB_CONF['medium']['suffix'][1:]
for a in instance.attachments.all():
urls[a.filename] = a.secure_url(suffix=suffix)
return urls
def report_exception(subject, info, exc_info=None):
if exc_info:
cls, err = exc_info[:2]
info += t("Exception in request: %(class)s: %(error)s") \
% {'class': cls.__name__, 'error': err}
info += "".join(traceback.format_exception(*exc_info))
if settings.DEBUG:
print(subject, flush=True)
print(info, flush=True)
else:
mail_admins(subject=subject, message=info)
logging.error(info, exc_info=exc_info)
def django_file(path, field_name, content_type):
# adapted from here: http://groups.google.com/group/django-users/browse_th\
# read/thread/834f988876ff3c45/
f = open(path, 'rb')
return InMemoryUploadedFile(
file=f,
field_name=field_name,
name=f.name,
content_type=content_type,
size=os.path.getsize(path),
charset=None
)
def export_def_from_filename(filename):
# TODO fix circular import and move to top
from onadata.apps.viewer.models.export import Export
path, ext = os.path.splitext(filename)
ext = ext[1:]
# try get the def from extension
mime_type = Export.EXPORT_MIMES[ext]
return ext, mime_type
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def enketo_url(
form_url,
id_string,
instance_xml=None,
instance_id=None,
return_url=None,
instance_attachments=None,
action=None,
):
if (
not hasattr(settings, 'ENKETO_URL')
and not hasattr(settings, 'ENKETO_API_SURVEY_PATH')
):
return False
if instance_attachments is None:
instance_attachments = {}
url = settings.ENKETO_URL + settings.ENKETO_API_SURVEY_PATH
values = {
'form_id': id_string,
'server_url': form_url
}
if instance_id is not None and instance_xml is not None:
url = settings.ENKETO_URL + settings.ENKETO_API_INSTANCE_PATH
values.update({
'instance': instance_xml,
'instance_id': instance_id,
'return_url': return_url
})
for key, value in instance_attachments.items():
values.update({
'instance_attachments[' + key + ']': value
})
# The Enketo view-only endpoint differs to the edit by the addition of /view
# as shown in the docs: https://apidocs.enketo.org/v2#/post-instance-view
if action == 'view':
url = f'{url}/view'
req = requests.post(url, data=values,
auth=(settings.ENKETO_API_TOKEN, ''), verify=False)
if req.status_code in [200, 201]:
try:
response = req.json()
except ValueError:
pass
else:
if 'edit_url' in response:
return response['edit_url']
elif 'view_url' in response:
return response['view_url']
if settings.ENKETO_OFFLINE_SURVEYS and ('offline_url' in response):
return response['offline_url']
if 'url' in response:
return response['url']
else:
try:
response = req.json()
except ValueError:
pass
else:
if 'message' in response:
raise EnketoError(response['message'])
return False
def create_attachments_zipfile(attachments, output_file=None):
if not output_file:
output_file = NamedTemporaryFile()
else:
# Disable seeking in a way understood by Python's zipfile module. See
# https://github.com/python/cpython/blob/ca2009d72a52a98bf43aafa9ad270a4fcfabfc89/Lib/zipfile.py#L1270-L1274
# This is a workaround for https://github.com/kobotoolbox/kobocat/issues/475
# and https://github.com/jschneier/django-storages/issues/566
def no_seeking(*a, **kw):
raise AttributeError(
'Seeking disabled! See '
'https://github.com/kobotoolbox/kobocat/issues/475'
)
try:
output_file.seek = no_seeking
except AttributeError as e:
# The default, file-system storage won't allow changing the `seek`
# attribute, which is fine because seeking on local files works
# perfectly anyway
storage_class = get_storage_class()
if not issubclass(storage_class, FileSystemStorage):
logging.warning(
f'{storage_class} may not be a local storage class, but '
f'disabling seeking failed: {e}'
)
storage = get_storage_class()()
with zipfile.ZipFile(output_file, 'w', zipfile.ZIP_STORED, allowZip64=True) as zip_file:
for attachment in attachments:
if storage.exists(attachment.media_file.name):
try:
with storage.open(attachment.media_file.name, 'rb') as source_file:
zip_file.writestr(attachment.media_file.name, source_file.read())
except Exception as e:
report_exception("Error adding file \"{}\" to archive.".format(attachment.media_file.name), e)
return output_file
def _get_form_url(username):
if settings.TESTING_MODE:
http_host = 'http://{}'.format(settings.TEST_HTTP_HOST)
username = settings.TEST_USERNAME
else:
# Always use a public url to prevent Enketo SSRF from blocking request
http_host = settings.KOBOCAT_URL
# Internal requests use the public url, KOBOCAT_URL already has the protocol
return '{http_host}/{username}'.format(
http_host=http_host,
username=username
)
def get_enketo_submission_url(request, instance, return_url, action=None):
form_url = _get_form_url(instance.xform.user.username)
instance_attachments = image_urls_dict(instance)
url = enketo_url(
form_url, instance.xform.id_string, instance_xml=instance.xml,
instance_id=instance.uuid, return_url=return_url,
instance_attachments=instance_attachments, action=action)
return url
| {
"content_hash": "b6534098356f313d19bc471190055fa8",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 116,
"avg_line_length": 31.908695652173915,
"alnum_prop": 0.622428123722578,
"repo_name": "kobotoolbox/kobocat",
"id": "ccdf964482cf58f8b529c94f7d8994bdb4256b1c",
"size": "7355",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "onadata/libs/utils/viewer_tools.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "146326"
},
{
"name": "Dockerfile",
"bytes": "3965"
},
{
"name": "HTML",
"bytes": "136962"
},
{
"name": "JavaScript",
"bytes": "734122"
},
{
"name": "Less",
"bytes": "19821"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "1264157"
},
{
"name": "Shell",
"bytes": "9858"
}
],
"symlink_target": ""
} |
"""
Copyright Theophile Rotoudjimaye <rotoudjimaye.theo@gmail.com>
All Rights Reserved
"""
from django.db import models
from django.utils.translation import ugettext as _
import datetime
def datetime2millis(timestamp):
delta = timestamp - datetime.datetime(1970, 1, 1)
return (delta.days * 24 * 60 * 60 + delta.seconds) * 1000 # + delta.microseconds/1000
def millis2datetime(millisecs):
return datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=millisecs / 1000)
class OutboxSm(models.Model):
source_addr = models.CharField(_(u"Source Address"), max_length=255)
dest_addr = models.CharField(_(u"Destination Address"), max_length=255)
message = models.TextField(_(u"Message Content"))
status = models.CharField(_(u"Status"), max_length=255, blank=True, null=True)
created_date = models.DateTimeField(_(u"Created At"), auto_now=True)
submit_date = models.DateTimeField(_(u"Submited At"), blank=True, null=True)
delivery_date = models.DateTimeField(_(u"Delivered At"), blank=True, null=True)
deleted_date = models.IntegerField(_(u"Deleted At"), blank=True, null=True)
carrier_id = models.CharField(_(u"Status"), max_length=255, blank=True, null=True)
gateway_ip = models.CharField(_(u"Status"), max_length=255, blank=True, null=True)
message_id = models.CharField(_(u"Status"), max_length=255, blank=True, null=True)
def getSourceAddr(self): return self.source_addr
def setSourceAddr(self, sourceAddr): self.source_addr = sourceAddr
def getDestAddr(self): return self.dest_addr
def setDestAddr(self, destAddr): self.dest_addr = destAddr
def getMessage(self): return self.message
def setMessage(self, message): self.message = message
def getCarrierId(self): return self.carrier_id
def setCarrierId(self, carrierId): self.carrier_id = carrierId
def getCreationDate(self):
return java.sql.Timestamp(datetime2millis(self.created_date)) if self.created_date else None
def setCreationDate(self, creationDate):
self.created_date = millis2datetime(creationDate.getTime()) if creationDate is not None else None
def getSubmitDate(self):
return java.sql.Timestamp(datetime2millis(self.submit_date)) if self.submit_date else None
def setSubmitDate(self, submitDate):
self.submit_date = millis2datetime(submitDate.getTime()) if submitDate is not None else None
def getDeliveryDate(self):
return java.sql.Timestamp(datetime2millis(self.delivery_date)) if self.delivery_date else None
def setDeliveryDate(self, deliveryDate):
self.delivery_date = millis2datetime(deliveryDate.getTime) if deliveryDate is not None else None
def getArchived(self):
return java.sql.Timestamp(datetime2millis(self.deleted_date)) if self.deleted_date else None
def setArchived(self, archived):
self.deleted_date = millis2datetime(archived.getTime()) if archived is not None else None
#def getId(self): return self.id
def setMessageId(self, message_id): self.message_id = message_id
def getMessageId(self): return self.message_id
def setGateway(self, gateway): self.gateway_ip = gateway
def getGateway(self): return self.gateway_ip
def getLongMessageId(self):
return self.gateway_ip + "-" + self.message_id
def getStatus(self): return self.status
def setStatus(self, status): self.status = status
#class InboxSm(models.Model, djangojy.jysms.IInboxSm): pass | {
"content_hash": "fd6a4c5c95586e30273ddb0bce3bbddd",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 105,
"avg_line_length": 39.07865168539326,
"alnum_prop": 0.7239792984473835,
"repo_name": "rotoudjimaye/django-jy",
"id": "02e9b5b32f5a31d943c33d8c92af694dbcb1e1ea",
"size": "3503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/djangojy/jysms/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "94302"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "Python",
"bytes": "197142"
}
],
"symlink_target": ""
} |
"""
nmeta tc_identity.py Tests
Uses pytest, install with:
sudo apt-get install python-pytest
To run test, type in:
py.test -vs
Note that packets + metadata are imported from local packets_* modules
TBD: everything
"""
#*** Handle tests being in different directory branch to app code:
import sys
sys.path.insert(0, '../nmeta')
import logging
#*** JSON imports:
import json
from json import JSONEncoder
import binascii
#*** For timestamps:
import datetime
#*** Import dpkt for packet parsing:
import dpkt
#*** nmeta imports:
import nmeta
import config
import flows as flows_module
import identities as identities_module
import policy as policy_module
import tc_identity
#*** nmeta test packet imports:
import packets_ipv4_ARP as pkts_arp
import packets_ipv4_DHCP_firsttime as pkts_dhcp
import packets_lldp as pkts_lldp
import packets_ipv4_dns as pkts_dns
import packets_ipv4_dns_4 as pkts_dns4
import packets_ipv4_tcp_facebook as pkts_facebook
import packets_ipv4_http as pkts_http_pc1
import packets_ipv4_http_lg1 as pkts_http_lg1
#*** Instantiate Config class:
config = config.Config()
logger = logging.getLogger(__name__)
#*** Test DPIDs and in ports:
DPID1 = 1
INPORT1 = 1
INPORT2 = 2
#======================== tc_identity.py Tests ================================
def test_LLDP_identity():
"""
Test harvesting identity metadata from LLDP packets and then
using this to validate an identity
"""
#*** Instantiate class objects:
flow = flows_module.Flow(config)
policy = policy_module.Policy(config)
identities = identities_module.Identities(config, policy)
tc_ident = tc_identity.IdentityInspect(config)
#*** LLDP packet 0:
flow.ingest_packet(DPID1, INPORT1, pkts_lldp.RAW[0], datetime.datetime.now())
identities.harvest(pkts_lldp.RAW[0], flow.packet)
result_identity = identities.findbynode(pkts_lldp.LLDP_SYSTEM_NAME[0])
assert result_identity['host_name'] == pkts_lldp.LLDP_SYSTEM_NAME[0]
assert result_identity['host_desc'] == pkts_lldp.LLDP_SYSTEM_DESC[0]
assert result_identity['dpid'] == DPID1
assert result_identity['in_port'] == INPORT1
assert result_identity['mac_address'] == pkts_lldp.ETH_SRC[0]
assert result_identity['harvest_type'] == 'LLDP'
#*** Test tc_identity (foo should fail)
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_lldp_systemname'
classifier_result.policy_value = 'foo'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == False
#*** Test tc_identity (pc1.example.com should match)
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_lldp_systemname'
classifier_result.policy_value = 'pc1.example.com'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == True
#*** Test tc_identity regular expression (*.example.com should match)
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_lldp_systemname_re'
classifier_result.policy_value = '^.*\.example\.com'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == True
#*** Test tc_identity regular expression (pc1.* should match)
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_lldp_systemname_re'
classifier_result.policy_value = '^pc1\.*'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == True
#*** Test tc_identity regular expression (*.example.org should fail)
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_lldp_systemname_re'
classifier_result.policy_value = '^.*\.example\.org'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == False
#*** LLDP packet 1 - test time-based invalidity of stale data
flow.ingest_packet(DPID1, INPORT1, pkts_lldp.RAW[1], datetime.datetime.now() - datetime.timedelta(seconds=125))
identities.harvest(pkts_lldp.RAW[1], flow.packet)
#*** Test tc_identity (sw1.example.com shouldn't match as data is stale as past LLDP TTL)
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_lldp_systemname'
classifier_result.policy_value = 'sw1.example.com'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == False
#*** Reingest with current time to check it does work
flow.ingest_packet(DPID1, INPORT1, pkts_lldp.RAW[1], datetime.datetime.now())
identities.harvest(pkts_lldp.RAW[1], flow.packet)
#*** Test tc_identity (sw1.example.com should match as data is no longer stale)
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_lldp_systemname'
classifier_result.policy_value = 'sw1.example.com'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == True
def test_DHCP_identity():
"""
Test harvesting identity metadata from DHCP packets and then
using this to validate an identities against the learnt DHCP hostname
"""
#*** Instantiate class objects:
flow = flows_module.Flow(config)
policy = policy_module.Policy(config)
identities = identities_module.Identities(config, policy)
tc_ident = tc_identity.IdentityInspect(config)
#*** Ingest packet from pc1:
flow.ingest_packet(DPID1, INPORT1, pkts_http_pc1.RAW[0], datetime.datetime.now())
#*** Test tc_identity (pc1 should fail as haven't harvested DHCP yet)
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_dhcp_hostname'
classifier_result.policy_value = 'pc1'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == False
#*** Harvesting DHCP host name for pc1 against IP 10.1.0.1
#*** Client to Server DHCP Request (DHCP Option 12 host name is pc1):
flow.ingest_packet(DPID1, INPORT1, pkts_dhcp.RAW[2], datetime.datetime.now())
identities.harvest(pkts_dhcp.RAW[2], flow.packet)
#*** Server to Client DHCP ACK:
flow.ingest_packet(DPID1, INPORT2, pkts_dhcp.RAW[3], datetime.datetime.now())
identities.harvest(pkts_dhcp.RAW[3], flow.packet)
#*** Ingest packet from pc1:
flow.ingest_packet(DPID1, INPORT1, pkts_http_pc1.RAW[0], datetime.datetime.now())
#*** Test tc_identity (pc1 should pass)
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_dhcp_hostname'
classifier_result.policy_value = 'pc1'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == True
#*** Ingest packet *to* pc1:
flow.ingest_packet(DPID1, INPORT2, pkts_http_pc1.RAW[1], datetime.datetime.now())
#*** Test tc_identity (pc1 should pass)
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_dhcp_hostname'
classifier_result.policy_value = 'pc1'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == True
#*** Ingest packet from lg1:
flow.ingest_packet(DPID1, INPORT1, pkts_http_lg1.RAW[0], datetime.datetime.now())
#*** Test tc_identity (pc1 should fail, as packet is from lg1)
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_dhcp_hostname'
classifier_result.policy_value = 'pc1'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == False
#*** Ingest packet from pc1:
flow.ingest_packet(DPID1, INPORT1, pkts_http_pc1.RAW[0], datetime.datetime.now())
#*** Test tc_identity (Regex pc.* should pass)
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_dhcp_hostname_re'
classifier_result.policy_value = 'pc.*'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == True
#*** Test tc_identity (Regex ac.* should fail)
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_dhcp_hostname_re'
classifier_result.policy_value = 'ac.*'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == False
def test_DNS_identity():
"""
Test harvesting identity metadata from DNS packets and then
using this to validate an identity
"""
#*** Instantiate class objects:
flow = flows_module.Flow(config)
policy = policy_module.Policy(config)
identities = identities_module.Identities(config, policy)
tc_ident = tc_identity.IdentityInspect(config)
#*** DNS packet 1 (NAME to CNAME, then second answer with IP for CNAME):
flow.ingest_packet(DPID1, INPORT1, pkts_dns.RAW[1], datetime.datetime.now())
identities.harvest(pkts_dns.RAW[1], flow.packet)
result_identity = identities.findbyservice(pkts_dns.DNS_NAME[1])
assert result_identity['service_name'] == pkts_dns.DNS_NAME[1]
assert result_identity['service_alias'] == pkts_dns.DNS_CNAME[1]
result_identity = identities.findbyservice(pkts_dns.DNS_CNAME[1])
assert result_identity['service_name'] == pkts_dns.DNS_CNAME[1]
assert result_identity['ip_address'] == pkts_dns.DNS_IP[1]
#*** Ingest TCP SYN to www.facebook.com (CNAME star-mini.c10r.facebook.com,
#*** IP 179.60.193.36)
flow.ingest_packet(DPID1, INPORT1, pkts_facebook.RAW[0], datetime.datetime.now())
#*** Test tc_identity (foo should fail)
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_service_dns'
classifier_result.policy_value = 'foo'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == False
#*** Test tc_identity (www.facebook.com should pass)
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_service_dns'
classifier_result.policy_value = 'www.facebook.com'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == True
#*** Now, harvest another DNS packet with different A record for
#*** www.facebook.com (CNAME star-mini.c10r.facebook.com A 31.13.95.36):
flow.ingest_packet(DPID1, INPORT1, pkts_dns4.RAW[1], datetime.datetime.now())
identities.harvest(pkts_dns4.RAW[1], flow.packet)
#*** Ingest TCP SYN to www.facebook.com (CNAME star-mini.c10r.facebook.com,
#*** IP 179.60.193.36)
flow.ingest_packet(DPID1, INPORT1, pkts_facebook.RAW[0], datetime.datetime.now())
#*** Test tc_identity (www.facebook.com, should pass even though there's
#*** another A record against the CNAME, i.e. should handle one to many)
#*** Test tc_identity (www.facebook.com should pass)
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_service_dns'
classifier_result.policy_value = 'www.facebook.com'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == True
#*** Test regular expression match of previous test:
#*** Test tc_identity (www.facebook.com should pass)
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_service_dns_re'
classifier_result.policy_value = '^.*\.facebook\.com'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == True
#*** Test regular expression that shouldn't match:
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_service_dns_re'
classifier_result.policy_value = '^.*\.facebook\.org'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == False
#*** Ingest TCP SYN+ACK from www.facebook.com (CNAME star-mini.c10r.facebook.com,
#*** IP 179.60.193.36) to test matching on source IP address:
flow.ingest_packet(DPID1, INPORT1, pkts_facebook.RAW[1], datetime.datetime.now())
classifier_result = policy_module.TCClassifierResult("", "")
classifier_result.policy_attr = 'identity_service_dns'
classifier_result.policy_value = 'www.facebook.com'
tc_ident.check_identity(classifier_result, flow.packet, identities)
assert classifier_result.match == True
#================= HELPER FUNCTIONS ===========================================
def mac_addr(address):
"""
Convert a MAC address to a readable/printable string
"""
return ':'.join('%02x' % ord(b) for b in address)
| {
"content_hash": "5c5b7068d098010edfba192d5e7ba146",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 115,
"avg_line_length": 43.04248366013072,
"alnum_prop": 0.706704122693797,
"repo_name": "mattjhayes/nmeta",
"id": "be1c0943b2ce6c0dd1e848428c9da63b26d9d6fe",
"size": "13171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tc_identity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5608"
},
{
"name": "HTML",
"bytes": "15623"
},
{
"name": "JavaScript",
"bytes": "97890"
},
{
"name": "Python",
"bytes": "519273"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import creditor.models
def generate_tids(apps, schema_editor):
Transaction = apps.get_model('creditor', 'Transaction')
for t in Transaction.objects.all().iterator():
t.unique_id = creditor.models.generate_transaction_id()
t.save()
class Migration(migrations.Migration):
dependencies = [
('creditor', '0003_auto_20151128_1923'),
]
operations = [
migrations.RunPython(
generate_tids,
),
migrations.AlterField(
model_name='transaction',
name='unique_id',
field=models.CharField(unique=True, verbose_name='Unique transaction id', max_length=32),
),
]
| {
"content_hash": "a5150a2b91197e2f1ff4afba5a1f6c5b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 101,
"avg_line_length": 28.185185185185187,
"alnum_prop": 0.6294349540078844,
"repo_name": "ojousima/asylum",
"id": "2a96072b64922d6889c674c9569899ca528d7160",
"size": "785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/creditor/migrations/0004_auto_20151128_1933.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31215"
},
{
"name": "HTML",
"bytes": "6869"
},
{
"name": "JavaScript",
"bytes": "2487"
},
{
"name": "Python",
"bytes": "135691"
},
{
"name": "Shell",
"bytes": "2066"
}
],
"symlink_target": ""
} |
"""Daemon launched at startup to handle skill activities.
In this repo, you will not find an entry called mycroft-skills in the bin
directory. The executable gets added to the bin directory when installed
(see setup.py)
"""
import time
from threading import Event
import mycroft.lock
from msm.exceptions import MsmException
from mycroft import dialog
from mycroft.api import is_paired, BackendDown, DeviceApi
from mycroft.audio import wait_while_speaking
from mycroft.enclosure.api import EnclosureAPI
from mycroft.configuration import Configuration
from mycroft.messagebus.client import MessageBusClient
from mycroft.messagebus.message import Message
from mycroft.util import (
connected,
create_echo_function,
create_daemon,
reset_sigint_handler,
wait_for_exit_signal
)
from mycroft.util.lang import set_active_lang
from mycroft.util.log import LOG
from .core import FallbackSkill
from .event_scheduler import EventScheduler
from .intent_service import IntentService
from .padatious_service import PadatiousService
from .skill_manager import SkillManager
RASPBERRY_PI_PLATFORMS = ('mycroft_mark_1', 'picroft', 'mycroft_mark_2pi')
class DevicePrimer(object):
"""Container handling the device preparation.
Arguments:
message_bus_client: Bus client used to interact with the system
config (dict): Mycroft configuration
"""
def __init__(self, message_bus_client, config):
self.bus = message_bus_client
self.platform = config['enclosure'].get("platform", "unknown")
self.enclosure = EnclosureAPI(self.bus)
self.is_paired = False
self.backend_down = False
# Remember "now" at startup. Used to detect clock changes.
def prepare_device(self):
"""Internet dependent updates of various aspects of the device."""
self._get_pairing_status()
self._update_system_clock()
self._update_system()
# Above will block during update process and kill this instance if
# new software is installed
if self.backend_down:
self._notify_backend_down()
else:
self._display_skill_loading_notification()
self.bus.emit(Message('mycroft.internet.connected'))
self._ensure_device_is_paired()
self._update_device_attributes_on_backend()
def _get_pairing_status(self):
"""Set an instance attribute indicating the device's pairing status"""
try:
self.is_paired = is_paired(ignore_errors=False)
except BackendDown:
LOG.error('Cannot complete device updates due to backend issues.')
self.backend_down = True
if self.is_paired:
LOG.info('Device is paired')
def _update_system_clock(self):
"""Force a sync of the local clock with the Network Time Protocol.
The NTP sync is only forced on Raspberry Pi based devices. The
assumption being that these devices are only running Mycroft services.
We don't want to sync the time on a Linux desktop device, for example,
because it could have a negative impact on other software running on
that device.
"""
if self.platform in RASPBERRY_PI_PLATFORMS:
LOG.info('Updating the system clock via NTP...')
if self.is_paired:
# Only display time sync message when paired because the prompt
# to go to home.mycroft.ai will be displayed by the pairing
# skill when pairing
self.enclosure.mouth_text(dialog.get("message_synching.clock"))
self.bus.wait_for_response(
Message('system.ntp.sync'),
'system.ntp.sync.complete',
15
)
def _notify_backend_down(self):
"""Notify user of inability to communicate with the backend."""
self._speak_dialog(dialog_id="backend.down")
self.bus.emit(Message("backend.down"))
def _display_skill_loading_notification(self):
"""Indicate to the user that skills are being loaded."""
self.enclosure.eyes_color(189, 183, 107) # dark khaki
self.enclosure.mouth_text(dialog.get("message_loading.skills"))
def _ensure_device_is_paired(self):
"""Determine if device is paired, if not automatically start pairing.
Pairing cannot be performed if there is no connection to the back end.
So skip pairing if the backend is down.
"""
if not self.is_paired and not self.backend_down:
LOG.info('Device not paired, invoking the pairing skill')
payload = dict(utterances=["pair my device"], lang="en-us")
self.bus.emit(Message("recognizer_loop:utterance", payload))
def _update_device_attributes_on_backend(self):
"""Communicate version information to the backend.
The backend tracks core version, enclosure version, platform build
and platform name for each device, if it is known.
"""
if self.is_paired:
LOG.info('Sending updated device attributes to the backend...')
try:
api = DeviceApi()
api.update_version()
except Exception:
self._notify_backend_down()
def _update_system(self):
"""Emit an update event that will be handled by the admin service."""
if not self.is_paired:
LOG.info('Attempting system update...')
self.bus.emit(Message('system.update'))
msg = Message(
'system.update',
dict(paired=self.is_paired, platform=self.platform)
)
resp = self.bus.wait_for_response(msg, 'system.update.processing')
if resp and (resp.data or {}).get('processing', True):
self.bus.wait_for_response(
Message('system.update.waiting'),
'system.update.complete',
1000
)
def _speak_dialog(self, dialog_id, wait=False):
data = {'utterance': dialog.get(dialog_id)}
self.bus.emit(Message("speak", data))
if wait:
wait_while_speaking()
def main():
reset_sigint_handler()
# Create PID file, prevent multiple instances of this service
mycroft.lock.Lock('skills')
config = Configuration.get()
# Set the active lang to match the configured one
set_active_lang(config.get('lang', 'en-us'))
# Connect this process to the Mycroft message bus
bus = _start_message_bus_client()
_register_intent_services(bus)
event_scheduler = EventScheduler(bus)
skill_manager = _initialize_skill_manager(bus)
_wait_for_internet_connection()
if skill_manager is None:
skill_manager = _initialize_skill_manager(bus)
device_primer = DevicePrimer(bus, config)
device_primer.prepare_device()
skill_manager.start()
wait_for_exit_signal()
shutdown(skill_manager, event_scheduler)
def _start_message_bus_client():
"""Start the bus client daemon and wait for connection."""
bus = MessageBusClient()
Configuration.set_config_update_handlers(bus)
bus_connected = Event()
bus.on('message', create_echo_function('SKILLS'))
# Set the bus connected event when connection is established
bus.once('open', bus_connected.set)
create_daemon(bus.run_forever)
# Wait for connection
bus_connected.wait()
LOG.info('Connected to messagebus')
return bus
def _register_intent_services(bus):
"""Start up the all intent services and connect them as needed.
Arguments:
bus: messagebus client to register the services on
"""
service = IntentService(bus)
try:
PadatiousService(bus, service)
except Exception as e:
LOG.exception('Failed to create padatious handlers '
'({})'.format(repr(e)))
# Register handler to trigger fallback system
bus.on('intent_failure', FallbackSkill.make_intent_failure_handler(bus))
def _initialize_skill_manager(bus):
"""Create a thread that monitors the loaded skills, looking for updates
Returns:
SkillManager instance or None if it couldn't be initialized
"""
try:
skill_manager = SkillManager(bus)
skill_manager.load_priority()
except MsmException:
# skill manager couldn't be created, wait for network connection and
# retry
skill_manager = None
LOG.info(
'MSM is uninitialized and requires network connection to fetch '
'skill information\nWill retry after internet connection is '
'established.'
)
return skill_manager
def _wait_for_internet_connection():
while not connected():
time.sleep(1)
def shutdown(skill_manager, event_scheduler):
LOG.info('Shutting down skill service')
if event_scheduler is not None:
event_scheduler.shutdown()
# Terminate all running threads that update skills
if skill_manager is not None:
skill_manager.stop()
skill_manager.join()
LOG.info('Skill service shutdown complete!')
if __name__ == "__main__":
main()
| {
"content_hash": "0e6a5b88590fc11a74232458f2d53e99",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 79,
"avg_line_length": 35.156488549618324,
"alnum_prop": 0.6486809249810009,
"repo_name": "Dark5ide/mycroft-core",
"id": "30a1d6d8d8794dfc067b50b6aa2c081bbdfd50f3",
"size": "9791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mycroft/skills/__main__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1846776"
},
{
"name": "QML",
"bytes": "9903"
},
{
"name": "Shell",
"bytes": "80311"
}
],
"symlink_target": ""
} |
import serial
import time
import rospy
import math
import thread
import tty
import sys
import termios
from std_msgs.msg import String
from geometry_msgs.msg import Twist
from tms_msg_rc.msg import arduino_serial
#global_cmd
global_cmd = Twist()
#callback_function
def callback(cmd):
global global_cmd
global_cmd = cmd
#advertise_function
def advertise_data(p, ser_):
#ros setting
global global_cmd
pub = p.Publisher("arduino_serial", arduino_serial , queue_size = 1000);
plot = arduino_serial()
rate = p.Rate(100)
L_ser = []
#vel setting
linear_x = 0.00 # up
linear_y = 0.00 # right
angular_z = 0.00 # reverse rotation
string_ser = "%.2f,%.2f,\0" %(linear_x, angular_z,)
#timer setting
now = rospy.Time.now()
while not p.is_shutdown():
#time
now = rospy.Time.now()
plot.header.stamp = now
#gloabal -> local
send_ser = global_cmd
#serial
string_ser = "%.2f,%.2f,\0" %(send_ser.linear.x , send_ser.angular.z)
ser_.write(string_ser)
#ros publish
L_ser = ser_.readline().split(",")
print L_ser[0], "," , L_ser[1] , "," , L_ser[2], ",\r"
#ros publish
plot.encorder_front = long(L_ser[0])
plot.encorder_left = long(L_ser[1])
plot.encorder_back = long(L_ser[2])
pub.publish(plot)
rate.sleep()
#subscribe_command
def subscribe_command():
#ros setting
rospy.Subscriber("mobile_base/commands/velocity", Twist, callback)
rospy.init_node('portable_driver', anonymous=True)
#serial setting
ser = serial.Serial()
ser.port = '/dev/ttyUSB0'
ser.baudrate = 115200
ser.open()
time.sleep(1.5) #necessary for arudino serial wait time
#thread
p = rospy
thread.start_new_thread(advertise_data, (p, ser))
#ros spin
rospy.spin()
if __name__ == '__main__':
try:
subscribe_command()
except rospy.ROSInterruptException:
pass | {
"content_hash": "045435a75f5b704efe3b85e0f64cd6c8",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 82,
"avg_line_length": 23.674418604651162,
"alnum_prop": 0.5962671905697446,
"repo_name": "irvs/ros_tms",
"id": "11a041087f66612df0463a108c28d50e99499f4e",
"size": "2090",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tms_rc/tms_rc_ninebot/scripts/__arduino_serial.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "75"
},
{
"name": "C",
"bytes": "1105412"
},
{
"name": "C#",
"bytes": "8112"
},
{
"name": "C++",
"bytes": "5839153"
},
{
"name": "CMake",
"bytes": "94659"
},
{
"name": "CSS",
"bytes": "30956"
},
{
"name": "HTML",
"bytes": "982020"
},
{
"name": "Inno Setup",
"bytes": "3254"
},
{
"name": "Java",
"bytes": "419664"
},
{
"name": "JavaScript",
"bytes": "68803"
},
{
"name": "M4",
"bytes": "1180"
},
{
"name": "MATLAB",
"bytes": "6958"
},
{
"name": "Makefile",
"bytes": "102553"
},
{
"name": "Max",
"bytes": "49936"
},
{
"name": "Objective-C",
"bytes": "80085"
},
{
"name": "Python",
"bytes": "591691"
},
{
"name": "SWIG",
"bytes": "1336"
},
{
"name": "Shell",
"bytes": "2654"
},
{
"name": "TSQL",
"bytes": "15032971"
},
{
"name": "TeX",
"bytes": "5590"
}
],
"symlink_target": ""
} |
import os
import sys
kernels = {
'triad' : 'triad',
}
def main (directory, source):
if not 'TRACER_HOME' in os.environ:
raise Exception('Set TRACER_HOME directory as an environment variable')
os.chdir(directory)
obj = source + '.llvm'
opt_obj = source + '-opt.llvm'
executable = source + '-instrumented'
os.environ['WORKLOAD']=kernels[source]
source_file = source + '.c'
os.system('clang -static -g -O1 -S -fno-slp-vectorize -fno-vectorize ' + \
' -fno-unroll-loops -fno-inline -fno-builtin -emit-llvm -o ' + \
obj + ' ' + source_file)
os.system('opt -disable-inlining -S -load=' + os.getenv('TRACER_HOME') + \
'/full-trace/full_trace.so -fulltrace ' + obj + ' -o ' + opt_obj)
os.system('llvm-link -o full.llvm ' + opt_obj + ' ' + \
os.getenv('TRACER_HOME') + '/profile-func/trace_logger.llvm')
os.system('llc -O0 -disable-fp-elim -filetype=asm -o full.s full.llvm')
os.system('gcc -static -O0 -fno-inline -o ' + executable + ' full.s -lm -lz')
os.system('./' + executable)
if __name__ == '__main__':
directory = sys.argv[1]
source = sys.argv[2]
print directory, source
main(directory, source)
| {
"content_hash": "fa4f6616cebf52e898973847e3170671",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 79,
"avg_line_length": 33.91428571428571,
"alnum_prop": 0.6141533277169334,
"repo_name": "amritamaz/LLVM-Tracer",
"id": "8f32a586cf147f39f67d7d8f0b58de38c7777a72",
"size": "1209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/triad/llvm_compile.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3065"
},
{
"name": "C++",
"bytes": "41687"
},
{
"name": "CMake",
"bytes": "31704"
},
{
"name": "Makefile",
"bytes": "1490"
},
{
"name": "Python",
"bytes": "1209"
}
],
"symlink_target": ""
} |
"""Mutators for evolutionary algorithms."""
import random
from typing import List, Optional, Tuple
import pyglove.core as pg
from pyglove.ext.evolution import base
# We disable implicit str concat as it is commonly used class schema docstr.
# pylint: disable=implicit-str-concat
def where_fn_spec():
"""Returns ValueSpec for 'where' function."""
return pg.typing.Callable([pg.typing.Object(pg.DNA)],
returns=pg.typing.Bool()).noneable()
@pg.members([
('where', where_fn_spec(),
'A callable to determine which nodes of the DNA are mutable. By default, '
'all nodes are mutable. When writing a custom `where`, it can be '
'assumed that the DNA arg has a `spec` field with its DNASpec.'),
('seed', pg.typing.Int().noneable(), 'Random seed for mutation.')
], init_arg_list=['where', 'seed'])
class Uniform(base.Mutator):
"""Mutates a DNA by randomizing a branch of the DNA.
This is a minimal mutator. It acts as follows. PyGlove represents a DNA as a
tree, with information at each node, where child nodes are conditional on the
value of parent nodes. This mutator will pick a node uniformly at random and
mutate the subtree rooted at that node (inclusive), respecting dependencies
specified in the DNASpec.
However, in general, we recommended that you write your own Mutator subclass
so you can tailor it to your search space. This would allow you, for example:
i) to modify a value drawing from a custom distribution: e.g. a
gaussian-distributed additive change may be more appropriate in many cases.
ii) to choose a node in the tree with a non-uniform distribution. E.g. you
may want to modify some nodes more frequently if they encode areas of the
space that should be explored more thoroughly.
iii) perform mutations that implement a different type of locality than that
represented by the tree structure. E.g. if two nodes at the same level need
to be modified in a coordinated way.
"""
def _on_bound(self):
super()._on_bound()
if self.seed is None:
self._random = random
else:
self._random = random.Random(self.seed)
def mutate(self, dna: pg.DNA, step: int = 0) -> pg.DNA:
"""Mutates the DNA at a given step."""
del step
dna = dna.clone(deep=True) # Prevent overwriting argument.
child_nodes, parent_nodes, child_indexes = self._get_relationships(dna)
if not child_nodes:
raise RuntimeError(f'Immutable DNA: {dna!r}')
child_node, parent_node, child_index = self._random.choice(list(zip(
child_nodes, parent_nodes, child_indexes)))
if parent_node is None:
# The node mutated ("child") is the root of the DNA tree.
return pg.random_dna(
child_node.spec, self._random, previous_dna=child_node)
else:
# The node mutated is not the root of the DNA tree.
if _node_needs_distinct(child_node.spec):
# The approach taken here is inefficient in the special case when there
# are many choices. If a random choice is likely to succeed, that
# scenario can be sped up by redrawing random choices until success.
# Consider adding a branch to handle that case, depending on need.
# Compute mutated node value, enforcing distinct constraint.
distinct_candidates = (set(range(len(child_node.spec.candidates)))
- set([c.value for c in parent_node.children]))
if distinct_candidates:
new_child_value = self._random.choice(list(distinct_candidates))
# Create a new sub-tree.
new_child_node = pg.DNA(
new_child_value,
children=[pg.random_dna(
child_node.spec.candidates[new_child_value],
# Choice has changed for the new node,
# thus previous_dna does not apply.
self._random, previous_dna=None)])
new_child_node.use_spec(child_node.spec)
else:
new_child_node = None
else:
new_child_node = pg.random_dna(
child_node.spec, self._random, previous_dna=child_node)
if new_child_node is not None:
# NOTE(daiyip): we update the children without invalidating the internal
# states of the DNA for better performance.
parent_node.children.rebind(
{child_index: new_child_node}, skip_notification=True)
if _node_needs_sorting(child_node.spec):
parent_node.rebind(
children=sorted(parent_node.children, key=lambda c: c.value),
skip_notification=True)
return dna
def _get_relationships(self, dna: pg.DNA) -> Tuple[
List[pg.DNA], List[Optional[pg.DNA]], List[Optional[int]]]:
"""Extracts the parent-child node relationships in a DNA.
Note that PyGlove represents the nodes in a DNA instance as DNA instances
themselves.
Args:
dna: the DNA that will be mutated.
Returns:
A tuple of 3 lists of the same length with corresponding elements:
-child_nodes: a list of every node in the DNA.
-parent_nodes: a list of the parent node of the corresponding node in
`child_nodes`.
-child_indexes: a list of indexes. For all j, child_nodes[j] is the i-th
child of parent_nodes[j], where i = child_indexes[j].
Note that the root is included as a "child" with a `None` parent.
"""
# This method uses the word "child" and "parent" to refer to the node
# relationships in the tree structure of a DNA. This should not be confused
# with the standard use of "child" and "parent" as the genealogic
# relationship of DNAs generated by an evolutionary algorithm.
def is_mutable_node(obj):
return self._is_mutable_node(obj)
results = pg.query(dna, where=is_mutable_node, enter_selected=True)
child_nodes = list(results.values())
parent_nodes = [n.parent_dna for n in child_nodes]
child_indexes = [
n.sym_path.key if n.parent_dna else None for n in child_nodes]
return child_nodes, parent_nodes, child_indexes
def _is_mutable_node(self, obj: pg.Object) -> bool:
"""Returns whether the branch contains mutateble values."""
if not isinstance(obj, pg.DNA):
return False
if (obj.sym_parent is None and
# `_immutable_root` is only set by unit tests.
getattr(self, '_immutable_root', None)):
return False
if self.where and not self.where(obj):
return False
return isinstance(obj.spec, (pg.geno.Choices,
pg.geno.Float,
pg.geno.CustomDecisionPoint))
@pg.members([
('where', where_fn_spec(),
'A callable to determine which nodes of the DNA are mutable. By default, '
'all nodes are mutable. When writing a custom `where`, it can be '
'assumed that the DNA arg has a `spec` field with its DNASpec.'),
('seed', pg.typing.Int().noneable(), 'Random seed for mutation.')
], init_arg_list=['where', 'seed'])
class Swap(base.Mutator):
"""Specialized mutator that swaps DNA branches rooted at sibling nodes."""
def _on_bound(self):
super()._on_bound()
if self.seed is None:
self._random = random
else:
self._random = random.Random(self.seed)
def mutate(self, dna: pg.DNA, step: int = 0) -> pg.DNA:
"""Mutates the DNA. If impossible, returns a clone."""
dna = dna.clone(deep=True) # Prevent overwriting argument.
parent_node_candidates = self._get_candidate_nodes(dna)
self._random.shuffle(parent_node_candidates)
parent_node = None
child_indexes = []
for parent_node in parent_node_candidates:
if not parent_node.spec.sorted:
# If no sorting is required, any swap is valid.
child_indexes = self._random.sample(range(len(parent_node.children)), 2)
break # Found a pair to swap.
if child_indexes:
# Swap the two indexes.
assert len(child_indexes) == 2
child0 = parent_node.children[child_indexes[0]]
child1 = parent_node.children[child_indexes[1]]
parent_node.children.rebind({child_indexes[0]: child1})
parent_node.children.rebind({child_indexes[1]: child0})
return dna
def _get_candidate_nodes(self, dna: pg.DNA) -> List[pg.DNA]:
"""Returns a list of nodes with potentially swappable children."""
def is_candidate_node(obj):
if not isinstance(obj, pg.DNA):
return False
if self.where and not self.where(obj):
return False
return (isinstance(obj.spec, pg.geno.Choices) and
obj.spec.num_choices > 1)
selected_nodes = pg.query(
dna, where=is_candidate_node, enter_selected=True)
return list(selected_nodes.values())
def _node_needs_distinct(dna_spec: pg.DNASpec) -> bool:
"""Returns whether this node requires distinct children."""
return (isinstance(dna_spec, pg.geno.Choices)
and dna_spec.is_subchoice and dna_spec.distinct)
def _node_needs_sorting(dna_spec: pg.DNASpec) -> bool:
"""Returns whether this node requires distinct children."""
return (isinstance(dna_spec, pg.geno.Choices)
and dna_spec.is_subchoice and dna_spec.sorted)
| {
"content_hash": "51f7fcc5a86b93ddd59d1c35e3b6a9c0",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 80,
"avg_line_length": 42.07798165137615,
"alnum_prop": 0.6608525019077728,
"repo_name": "google/pyglove",
"id": "e56e7540edbe48ae67c58d55777578cf3da3c156",
"size": "9757",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pyglove/ext/evolution/mutators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1796188"
}
],
"symlink_target": ""
} |
"""
Django settings for trello_reporter project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from django.core.exceptions import ImproperlyConfigured
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5n_v*opwlu_u@i1nw=rdheam4#hr+1$x93_qj2go5jhb5u&q-7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
INTERNAL_IPS = ['172.17.0.1', '127.0.0.1', '::1']
ALLOWED_HOSTS = ['172.17.0.1', '127.0.0.1']
DEBUG_TOOLBAR_PATCH_SETTINGS = False
AUTH_USER_MODEL = "authentication.TrelloUser"
AUTHENTICATION_BACKENDS = (
"trello_reporter.authentication.backend.TrelloAuthBackend",
)
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'trello_reporter.charting',
'trello_reporter.authentication', # not to conflict with django.c.auth
'trello_reporter.harvesting',
'trello_reporter.static_data', # used for easier development
'django_extensions',
'debug_toolbar',
# 'channels',
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'trello_reporter.authentication.middleware.TrelloAuthMiddleware',
'trello_reporter.authentication.middleware.TimezoneMiddleware',
]
ROOT_URLCONF = 'trello_reporter.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'trello_reporter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ["POSTGRESQL_DATABASE"],
'USER': os.environ["POSTGRESQL_USER"],
'PASSWORD': os.environ["POSTGRESQL_PASSWORD"],
'HOST': "db"
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s\t%(asctime)s\t%(filename)s %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'formatter': "verbose",
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
'trello_reporter': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),
}
},
}
# Application settings
try:
API_KEY = os.environ["API_KEY"]
except KeyError:
raise ImproperlyConfigured(
"Developer API key not found, please obtain it from:\n"
"https://trello.com/app-key\n"
"and set it as environment variable 'API_KEY'"
)
| {
"content_hash": "03df76d4791a9a0fe6c8d205d6065f8e",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 76,
"avg_line_length": 27.925925925925927,
"alnum_prop": 0.6489832007073386,
"repo_name": "TomasTomecek/trello-reporter",
"id": "9d0cb086d579eac4b2277840e18e23498e508dc5",
"size": "4524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trello_reporter/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "419"
},
{
"name": "HTML",
"bytes": "30644"
},
{
"name": "JavaScript",
"bytes": "11694"
},
{
"name": "Makefile",
"bytes": "618"
},
{
"name": "Python",
"bytes": "129269"
},
{
"name": "Shell",
"bytes": "483"
}
],
"symlink_target": ""
} |
__author__ = 'linlin'
import os
import logging
import re
import pdb
logger = logging.getLogger(__name__)
################################################################
root_dir = '/home/linlin/time/0903_classify_false_start/1003_raw_features/'
separator = '\t\t'
################################################################
def MakeNewFolderVersionHigher(data_directory, dir_name):
## 在选定的文件夹里生成更高版本号的文件夹 data_directory - can be relative directory
## dir_name - the new folder name you want to creat
abs_data_directory = os.path.abspath(os.path.dirname(data_directory))
version_number = 1
dirs = os.listdir(abs_data_directory)
for dir in dirs:
if dir_name in dir:
version_str = re.findall(r'Dir_\d+',dir)
number_str =''.join((version_str[-1])[4:])
if True == number_str.isdigit():
number= int (number_str)
if number>version_number:
version_number = number
new_folder_name = dir_name + "_%d" %(version_number+1)
folderFullPath = os.path.join(abs_data_directory,new_folder_name )
os.makedirs(folderFullPath)
return folderFullPath
#########################################################
output_root_dir = MakeNewFolderVersionHigher(root_dir, 'processDir' )
data_dir = root_dir + 'data1'
code_dir = root_dir + 'src/'
##############################################################
def DirProcessing(source_path, dest_path):
path = source_path
for root, dirs, files in os.walk(path):
for filespath in files:
abs_file_path = os.path.join(root, filespath)
logger.debug("Visited one file!")
Standardize(abs_file_path, dest_path, ' ')
def DirProcessingForSSR(source_path, dest_path):
path = source_path
for root, dirs, files in os.walk(path):
for filespath in files:
abs_file_path = os.path.join(root, filespath)
logger.debug("Visited one file!")
GetSsrFeature(abs_file_path, dest_path, '\t')
def GetAttributes(source_path, dest_path):
################################################################
script_file = code_dir + 'chunker6_only_ssr_repetition.py'
################################################################
path = source_path
for root, dirs, files in os.walk(path):
for filespath in files:
abs_file_path = os.path.join(root, filespath)
logger.debug("Visited one file!")
crf_path = dest_path + '/' + os.path.basename(abs_file_path) + '.crfsuite'
os.system('cat ' + abs_file_path +' | python ' + script_file + " > " + crf_path )
def RunClassifier(source_path, dest_path):
path = source_path
for root, dirs, files in os.walk(path):
for filespath in files:
if 'tr.txt' in filespath:
train_path = os.path.join(root, filespath)
elif 'te.txt' in filespath:
test_path = os.path.join(root, filespath)
# result_path = dest_path + '/' + 'result_lbfgs.txt'
# model_path = dest_path + '/' + 'lbfgs.model'
# os.system('crfsuite learn -m ' + model_path + ' -a lbfgs' + ' -e2 ' +
# train_path + " " + test_path + " > " + result_path )
# result_path = dest_path + '/' + 'result_lbfgs_c1_0.5_force_generate.txt'
# model_path = dest_path + '/' + 'lbfgs.model'
# os.system('crfsuite learn ' + ' -a lbfgs ' +
# ' -p feature.possible_states=1 -p feature.possible_transitions=1 ' +
# ' -p c1=0.5 ' + ' -e2 ' +
# train_path + " " + test_path + " > " + result_path )
# result_path = dest_path + '/' + 'result_lbfgs_c1_2.txt'
# model_path = dest_path + '/' + 'lbfgs.model'
# os.system('crfsuite learn ' + ' -a lbfgs ' +
# ' -p c1=2 ' + ' -e2 ' +
# train_path + " " + test_path + " > " + result_path )
result_path = dest_path + '/' + 'result_l2sgd_c2_2.txt'
model_path = dest_path + '/' + 'l2sgd.model'
os.system('crfsuite learn -m '+ model_path + " -a l2sgd " +' -p c2=2 ' + ' -e2 ' +
train_path + " " + test_path + " > " + result_path )
# result_path = dest_path + '/' + 'result_l2sgd_c2_15.txt'
# model_path = dest_path + '/' + 'l2sgd.model'
# os.system('crfsuite learn ' + " -a l2sgd " +' -p c2=15 ' + ' -e2 ' +
# train_path + " " + test_path + " > " + result_path )
# result_path = dest_path + '/' + 'result_ap.txt'
# model_path = dest_path + '/' + 'ap.model'
# os.system('crfsuite learn ' +" -a ap " +' -p max_iterations=500 ' + ' -e2 ' +
# train_path + " " + test_path + " > " + result_path )
# result_path = dest_path + '/' + 'result_pa.txt'
# model_path = dest_path + '/' + 'pa.model'
# os.system('crfsuite learn '+ " -a pa " +' -p max_iterations=500 '+ ' -e2 ' +
# train_path + " " + test_path + " > " + result_path )
# result_path = dest_path + '/' + 'result_arow.txt'
# model_path = dest_path + '/' + 'arow.model'
# os.system('crfsuite learn ' + " -a arow" +' -p max_iterations=500 '+ ' -e2 ' +
# train_path + " " + test_path + " > " + result_path )
def Count(reference_file_obj):
num_lines = 0
reference_file_obj.seek(0,0)
not_match = 0
num_match = 0
for line in reference_file_obj:
word_list = line.split()
if len(line) <3:
num_match = num_match+1 if not_match > 0 else num_match
not_match = 0
num_lines += 1
elif len(word_list) == 2:
if word_list[0] == word_list[1] or (word_list[0] != 'OK' and word_list[1] != 'OK'):
pass
else:
not_match = not_match +1
else:
pass
return [num_match, num_lines]
def SentAccuracy(source_path, dest_path):
path = source_path
parent_dir = os.path.dirname(path)
attr_path = parent_dir + '/' + 'attributesStep3'
for root, dirs, files in os.walk(attr_path):
for filespath in files:
if 'tr.txt' in filespath:
train_path = os.path.join(root, filespath)
elif 'te.txt' in filespath:
test_path = os.path.join(root, filespath)
result_path = path + '/' + 'result_l2sgd_c2_2.txt'
model_path = path + '/' + 'l2sgd.model'
reference_path = dest_path + '/' + 'reference.txt'
# os.system('crfsuite learn -m '+ model_path + " -a l2sgd " +' -p c2=2 ' + ' -e2 ' +
# train_path + " " + test_path + " > " + result_path )
os.system('crfsuite tag -m '+ model_path + " -r " + test_path + " > " + reference_path )
reference_file_obj = open(reference_path,'r')
result_file_obj = open(result_path, 'a+')
[num_matches, num_lines] = Count(reference_file_obj)
result_file_obj.write("not matches - %d; total sents - %d; accuracy - %8.4f \n"
%(num_matches, num_lines, (num_lines- num_matches)/float(num_lines)))
reference_file_obj.close()
result_file_obj.close()
def FindNeighborTokenSubscript(first_token_list, current_pos , up_or_down ):
pos = current_pos
ind = up_or_down
li = first_token_list
if ind == 1:
i = 1
while len(li[pos+i]) < 1:
i += 1
return pos+i
if ind == -1:
i = 1
while len(li[pos-i]) < 1:
i += 1
return pos-i
def Standardize(path, dest_dir, sep):
output_path = dest_dir+ '/' + os.path.basename(path) + '.standard'
output_file_obj = open(output_path,'w')
file_obj = open(path)
line_list = file_obj.readlines()
token_list = []
for j in range(len(line_list)):
word_list = line_list[j].split()
if len(word_list) < 2:
token_list.append('')
else:
token_list.append(word_list[0])
repetition_vec_list = []
for i in range(len(line_list)):
if len(token_list[i]) == 0:
repetition_vec_list.append('')
else:
if i < 4 or i > len(line_list)- 5:
repetition_vec_list.append(['diff', 'diff','diff', 'diff'])
else:
previous_subscript = FindNeighborTokenSubscript(token_list, i, -1)
prev_prev_subscript = FindNeighborTokenSubscript(token_list, previous_subscript, -1)
next_subscript = FindNeighborTokenSubscript(token_list, i, 1)
next_next_subscript = FindNeighborTokenSubscript(token_list, next_subscript, 1)
prev_prev_label = 'same' if (token_list[i] == token_list[prev_prev_subscript]) else "diff"
prev_label = 'same' if (token_list[i] == token_list[previous_subscript]) else "diff"
next_label = 'same' if (token_list[i] == token_list[next_subscript]) else "diff"
next_next_subscript = 'same' if (token_list[i] == token_list[next_next_subscript]) else "diff"
repetition_vec_list.append([prev_prev_label, prev_label, next_label, next_next_subscript])
for k in range(len(line_list)):
line = line_list[k]
if len(line)<13:
label = ''
else:
word_list = line.split()
if 'filler' in word_list[4]:
label = 'filler'
elif 'repeat' in word_list[4] or 'nsert' in word_list[4]:
label = 'repeat'
elif 'restart' in word_list[4] or 'extraneou' in word_list[4]:
label = 'false_start'
elif 'elete' in word_list[4]:
label = 'other'
else:
label = 'OK'
if '-' in word_list[0]:
patial = 'patial'
else:
patial = 'nonpatial'
label = label
token = word_list[0]
pos = word_list[1]
word = word_list[2]
sem = word_list[3]
patial = patial
#pdb.set_trace()
pp = repetition_vec_list[k][0]
p = repetition_vec_list[k][1]
n = repetition_vec_list[k][2]
nn = repetition_vec_list[k][3]
#pdb.set_trace()
if len(line)<13:
line_format = ''
else:
line_format = (
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
%(label, sep, token,sep,pos, sep,word,sep,sem, sep, patial, sep,
pp, sep, p, sep, n,sep, nn))
output_file_obj.write(line_format)
output_file_obj.write('\n')
output_file_obj.close()
file_obj.close()
def GetSsrFeature(path, dest_dir, sep):
output_path = dest_dir+ '/' + os.path.basename(path) + '.noSpace'
output_file_obj = open(output_path,'w')
file_obj = open(path)
for line in file_obj:
if len(line)<3:
newLine = ''
else:
word_list = line[54:].split()
newLine = '_'.join(word_list)
token = line[:15].strip()
pos = line[15:25].strip()
word = line[25:40].strip()
sem = line[40:54].strip()
label = newLine
if len(line)<3:
line_format = ''
else:
line_format = "%s%s%s%s%s%s%s%s%s%s" %(token,sep,pos,sep,word,sep,sem, sep, label, sep)
output_file_obj.write(line_format)
output_file_obj.write('\n')
output_file_obj.close()
file_obj.close()
if __name__ == '__main__':
logFile = output_root_dir + "/logFile.txt"
logging.basicConfig(filename=logFile, level = logging.DEBUG)
os.makedirs(output_root_dir + "/standardStep1")
dest_dir = output_root_dir + "/standardStep1"
DirProcessing(data_dir, dest_dir)
# os.makedirs(output_root_dir + "/standardStep2") #
# dest_dir = output_root_dir + "/standardStep2"
# DirProcessing(data_dir, dest_dir) #
os.makedirs(output_root_dir + "/attributesStep3")
attr_dir = output_root_dir + "/attributesStep3"
GetAttributes(dest_dir, attr_dir)
os.makedirs(output_root_dir + "/classificationStep4")
result_dir = output_root_dir + "/classificationStep4"
RunClassifier( attr_dir, result_dir)
os.makedirs(output_root_dir + "/countSenAccurStep5")
accuracy_dir = output_root_dir + "/countSenAccurStep5"
SentAccuracy(result_dir, accuracy_dir)
| {
"content_hash": "ddd861026545d30d3084a508e3ab9d6a",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 110,
"avg_line_length": 36.545994065281896,
"alnum_prop": 0.5289866839883078,
"repo_name": "linkinwong/word2vec",
"id": "ea9edd85e262d178a397351fa2d622cb40df4778",
"size": "12371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/crf-paper-script/preprocessor11_l2sgd_sent_accuracy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "50032"
},
{
"name": "Makefile",
"bytes": "718"
},
{
"name": "Python",
"bytes": "337863"
},
{
"name": "Shell",
"bytes": "8539"
}
],
"symlink_target": ""
} |
"""Test a FCN on an imdb (image database)."""
from fcn.config import cfg, get_output_dir
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import cPickle
from utils.blob import im_list_to_blob
from utils.backprojection import backproject
import os
import math
import tensorflow as tf
def _get_image_blob(im, im_depth, meta_data):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
# RGB
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
processed_ims = []
im_scale_factors = []
assert len(cfg.TEST.SCALES_BASE) == 1
im_scale = cfg.TEST.SCALES_BASE[0]
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# depth
# backprojection to compute normals
im_depth = backproject(im_depth, meta_data)
im_orig = im_depth.astype(np.float32, copy=True)
# im_orig = im_orig / im_orig.max() * 255
# im_orig = np.tile(im_orig[:,:,np.newaxis], (1,1,3))
im_orig -= cfg.PIXEL_MEANS
processed_ims_depth = []
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
processed_ims_depth.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims, 3)
blob_depth = im_list_to_blob(processed_ims_depth, 3)
return blob, blob_depth, np.array(im_scale_factors)
def im_segment(sess, net, im, im_depth, meta_data, num_classes):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
# compute image blob
im_blob, im_depth_blob, im_scale_factors = _get_image_blob(im, im_depth, meta_data)
# forward pass
feed_dict={net.data: im_depth_blob}
pred_up = sess.run([net.pred_up], feed_dict=feed_dict)
labels = pred_up[0]
labels_shape = labels.shape
return labels.reshape((labels_shape[1], labels_shape[2]))
def vis_segmentations(im, im_depth, labels):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
fig = plt.figure()
# show image
fig.add_subplot(131)
plt.imshow(im)
# show depth image
fig.add_subplot(132)
plt.imshow(im_depth)
# show label
fig.add_subplot(133)
plt.imshow(labels)
plt.show()
def test_net(sess, net, imdb, weights_filename):
output_dir = get_output_dir(imdb, weights_filename)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
seg_file = os.path.join(output_dir, 'segmentations.pkl')
print imdb.name
if os.path.exists(seg_file):
with open(seg_file, 'rb') as fid:
segmentations = cPickle.load(fid)
imdb.evaluate_segmentations(segmentations, output_dir)
return
"""Test a FCN on an image database."""
num_images = len(imdb.image_index)
segmentations = [[] for _ in xrange(num_images)]
roidb = imdb.roidb
# timers
_t = {'im_segment' : Timer(), 'misc' : Timer()}
perm = np.random.permutation(np.arange(num_images))
# for i in xrange(num_images):
for i in perm:
im = cv2.imread(roidb[i]['image'])
im_depth = cv2.imread(roidb[i]['depth'], cv2.IMREAD_UNCHANGED)
meta_data = roidb[i]['meta_data']
_t['im_segment'].tic()
labels = im_segment(sess, net, im, im_depth, meta_data, imdb.num_classes)
_t['im_segment'].toc()
_t['misc'].tic()
seg = {'labels': labels}
segmentations[i] = seg
_t['misc'].toc()
vis_segmentations(im, im_depth, labels)
print 'im_segment: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_segment'].average_time, _t['misc'].average_time)
seg_file = os.path.join(output_dir, 'segmentations.pkl')
with open(seg_file, 'wb') as f:
cPickle.dump(segmentations, f, cPickle.HIGHEST_PROTOCOL)
# evaluation
imdb.evaluate_segmentations(segmentations, output_dir)
| {
"content_hash": "e314fd7923e686eae45aff12cb3fbe85",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 98,
"avg_line_length": 30.269736842105264,
"alnum_prop": 0.6350793305803086,
"repo_name": "yuxng/Deep_ISM",
"id": "923dd321a02c3a5613d47352683f87cb94cc0935",
"size": "4838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FCN/lib/fcn/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "292"
},
{
"name": "Cuda",
"bytes": "9383"
},
{
"name": "Makefile",
"bytes": "112"
},
{
"name": "Matlab",
"bytes": "83514"
},
{
"name": "Python",
"bytes": "307356"
},
{
"name": "Shell",
"bytes": "7042"
}
],
"symlink_target": ""
} |
from selenium.common.exceptions import WebDriverException
from libs.angular.angularCustomJavascript import *
import selenium.webdriver.support.ui as ui
import time
class AngularCommands:
def __init__(self, webdriver, jsinjector):
self.version = 0.1
self.driver = webdriver
self.jsinjector = jsinjector
#self.reload_with_debug_info()
def reload_with_debug_info(self):
self.driver.execute_script('angular.reloadWithDebugInfo()')
newurl = self.driver.current_url
print newurl
def show_app_name(self):
self.run_javascript('wn_showAngularAppName()')
def show_deps(self):
self.run_javascript('wn_showAngularDeps()')
def show_main_classes(self):
self.run_javascript('wn_showAngularMainClasses()')
def show_all_classes(self):
self.run_javascript('wn_showAngularAllClasses()')
def show_routes(self):
self.run_javascript('wn_showAngularRoutes()')
def run_javascript(self, javascript_function):
self.jsinjector.execute_javascript(self.driver, javascript_function)
print ''
print ''
raw_input("Press ENTER to return to menu.")
def show_ngResource_tests(self):
# ngResource classes generally communicate with api endpoints... run with proxy to capture api calls.
print "Testing classes, please wait..."
print ''
self.jsinjector.execute_javascript(self.driver, "wn_testNgResourceClasses();")
time.sleep(10)
result = self.jsinjector.execute_javascript(self.driver, "console.log('all done');")
print result;
print ''
print ''
raw_input("Press ENTER to return to menu.")
def show_http_tests(self):
# ngResource classes generally communicate with api endpoints... run with proxy to capture api calls.
print "Testing classes using $http, please wait..."
print ''
self.jsinjector.execute_javascript(self.driver, "wn_testHTTPClasses();")
time.sleep(10)
result = self.jsinjector.execute_javascript(self.driver, "console.log('All done son.');")
print result;
print ''
print ''
raw_input("Press ENTER to return to menu.")
| {
"content_hash": "aff3d8f1b457491fab56565ec7a0121b",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 103,
"avg_line_length": 32.45161290322581,
"alnum_prop": 0.7311133200795229,
"repo_name": "bugbound/webnuke",
"id": "87cf3bf89983a1eaa841ebf8d58eda02752154c0",
"size": "2012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libs/angular/angularCommands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "7488"
},
{
"name": "Python",
"bytes": "77507"
},
{
"name": "Shell",
"bytes": "420"
}
],
"symlink_target": ""
} |
from django.core.cache import cache
from django.db import models
from django.utils.translation import ugettext_lazy as _
from core.models import BaseModel
from package.models import Project
from grid.models import Grid
ITEM_TYPE_CHOICES = (
('package', 'Package'),
('grid', 'Grid'),
)
class SearchV2(BaseModel):
"""
Searches available on:
title
description
grids
packages
categories
stars
number of forks
last repo commit
last release on PyPI
"""
weight = models.IntegerField(_("Weight"), default=0)
item_type = models.CharField(_("Item Type"), max_length=40, choices=ITEM_TYPE_CHOICES)
item_id = models.IntegerField(_("Item ID"))
is_draft = models.BooleanField(_("Is draft"), default=False)
title = models.CharField(_("Title"), max_length=100, db_index=True)
title_no_prefix = models.CharField(_("No Prefix Title"), max_length=100, db_index=True)
slug = models.SlugField(_("Slug"), db_index=True)
slug_no_prefix = models.SlugField(_("No Prefix Slug"), db_index=True)
clean_title = models.CharField(_("Clean title with no crud"), max_length=100, db_index=True)
description = models.TextField(_("Repo Description"), blank=True)
category = models.CharField(_("Category"), blank=True, max_length=50)
absolute_url = models.CharField(_("Absolute URL"), max_length=255)
repo_watchers = models.IntegerField(_("Stars"), default=0)
repo_forks = models.IntegerField(_("repo forks"), default=0)
pypi_downloads = models.IntegerField(_("Pypi downloads"), default=0)
usage = models.IntegerField(_("Number of users"), default=0)
participants = models.TextField(_("Participants"),
help_text="List of collaborats/participants on the project", blank=True)
last_committed = models.DateTimeField(_("Last commit"), blank=True, null=True)
last_released = models.DateTimeField(_("Last release"), blank=True, null=True)
class Meta:
ordering = ['-weight', ]
verbose_name_plural = 'SearchV2s'
unique_together = ('item_type', 'item_id',)
def __str__(self):
return "{0}:{1}".format(self.weight, self.title)
@models.permalink
def get_absolute_url(self):
return self.absolute_url
def pypi_name(self):
key = "SEARCH_PYPI_NAME-{0}".format(self.slug)
pypi_name = cache.get(key)
if pypi_name:
return pypi_name
try:
package = Project.objects.get(slug=self.slug)
except Project.DoesNotExist:
return ""
pypi_name = package.pypi_name
cache.set(key, pypi_name, 24 * 60 * 60)
return pypi_name
def get_resource_uri(self):
return '/api/v4/{}/{}/'.format(self.item_type, 3)
def _self(self):
return self
| {
"content_hash": "b5cc9b7965dd5d5081e1fbab84ba3320",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 96,
"avg_line_length": 35.24390243902439,
"alnum_prop": 0.62560553633218,
"repo_name": "noisy/steemprojects.com",
"id": "d2c29e2398b8e470abdc5e55c0d74473a4a4c5dc",
"size": "2891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "searchv2/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63399"
},
{
"name": "Dockerfile",
"bytes": "1731"
},
{
"name": "HTML",
"bytes": "155684"
},
{
"name": "Makefile",
"bytes": "2785"
},
{
"name": "Python",
"bytes": "431771"
},
{
"name": "Shell",
"bytes": "5227"
}
],
"symlink_target": ""
} |
import unittest
from stackato import restclient
class MockRestClient(restclient.RestClient):
class requests(object):
@classmethod
def session(cls, headers, verify):
cls.init = [headers, verify]
return cls
headers = {}
@classmethod
def request(cls, method, url, return_response=False):
return cls.request_obj
class request_obj(object):
headers = {'Content-Type': ''}
@classmethod
def send(cls, prefetch):
pass
response = None
class RestClientTests(unittest.TestCase):
def test_init(self):
client = MockRestClient()
self.assertEqual(client.requests.init,
[{'Pragma': 'no-cache', 'Cache-Control': 'no-cache'},
False])
def test_target(self):
client = MockRestClient()
self.assertEqual(client.target, 'https://api.127.0.0.1.xip.io')
def test_headers(self):
client = MockRestClient()
self.assertEqual(client.headers, {})
def test_request(self):
client = MockRestClient()
client.request('foo', 'bar')
class ExceptionsTests(unittest.TestCase):
def test_it(self):
from stackato import exceptions
class StackatoClientTests(unittest.TestCase):
def test_it(self):
from stackato import stackatoclient
| {
"content_hash": "48a6d91f7c3eb7a8f957823cad93ba31",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 78,
"avg_line_length": 26.11111111111111,
"alnum_prop": 0.5936170212765958,
"repo_name": "noderabbit-team/PyStackato",
"id": "cb3239ff7b58aa34852641883c0e6123ede9394e",
"size": "1410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "11702"
},
{
"name": "Shell",
"bytes": "230"
}
],
"symlink_target": ""
} |
'''
Created on 2016年1月4日
@author: Darren
'''
'''
Given a string array words, find the maximum value of length(word[i]) * length(word[j]) where the two words do not share common letters. You may assume that each word will contain only lower case letters. If no such two words exist, return 0.
Example 1:
Given ["abcw", "baz", "foo", "bar", "xtfn", "abcdef"]
Return 16
The two words can be "abcw", "xtfn".
Example 2:
Given ["a", "ab", "abc", "d", "cd", "bcd", "abcd"]
Return 4
The two words can be "ab", "cd".
Example 3:
Given ["a", "aa", "aaa", "aaaa"]
Return 0
No such pair of words.
'''
class Solution(object):
def maxProduct(self, words):
"""
:type words: List[str]
:rtype: int
"""
d={}
for word in words:
wordSet=set(word)
key=''.join(sorted(list(wordSet)))
if key in d:
if len(word)>d[key]:
d[key]=len(word)
else:
d[key]=len(word)
res=0
keyList=list(d.keys())
for i in range(len(keyList)):
for j in range(i+1,len(keyList)):
set1,set2=set(keyList[i]),set(keyList[j])
if not (set1&set2):
res=max(res,d[keyList[i]]*d[keyList[j]])
return res
so=Solution()
words=["abcw", "baz", "foo", "bar", "xtfn", "abcdef"]
print(so.maxProduct(words))
| {
"content_hash": "8fa681e68ab28a4a1add506243ff67bc",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 242,
"avg_line_length": 27.58823529411765,
"alnum_prop": 0.5373134328358209,
"repo_name": "darrencheng0817/AlgorithmLearning",
"id": "a5a8734e08b2005de1ca9479a1e4be0aef72b6c8",
"size": "1413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/leetcode/maximumProductOfWordLengths.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2663"
},
{
"name": "Java",
"bytes": "89490"
},
{
"name": "Python",
"bytes": "600854"
}
],
"symlink_target": ""
} |
'''Binary search'''
from __future__ import division
from __future__ import print_function
def recursive_binary_search(sorted_sequence, key, start=0, end=None):
'''Returns the index of `key` in the `sorted_sequence`, or None if `key` is
not in `sorted_sequence`.'''
end = end or len(sorted_sequence)
if (end - start) < 0:
return None
middle = start + ((end - start) // 2)
if sorted_sequence[middle] == key:
return middle
elif sorted_sequence[middle] < key:
return recursive_binary_search(sorted_sequence, key, middle + 1, end)
else:
return recursive_binary_search(sorted_sequence, key, start, middle - 1)
def iterative_binary_search(sorted_sequence, key):
'''Returns the index of `key` in the `sorted_sequence`, or None if `key` is
not in `sorted_sequence`.'''
start = 0
end = len(sorted_sequence)
while start < end:
middle = start + ((end - start) // 2)
if sorted_sequence[middle] == key:
return middle
elif sorted_sequence[middle] < key:
start = middle + 1
else:
end = middle
return None
if __name__ == '__main__':
seq = [1, 1, 2, 5, 9, 11, 11, 11, 12, 18, 29, 37, 38, 40, 67, 78, 94, 94]
assert(recursive_binary_search(seq, 12) == 8)
assert(recursive_binary_search(seq, 13) == None)
assert(iterative_binary_search(seq, 12) == 8)
assert(iterative_binary_search(seq, 13) == None)
print('Tests passed.')
| {
"content_hash": "e88cc54645f662b794708de5cefc6ce6",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 28.807692307692307,
"alnum_prop": 0.6021361815754339,
"repo_name": "gg/algorithms",
"id": "8fb25101ba3f10600cc1f89043550f880e728641",
"size": "2630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "binary_search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "11105"
},
{
"name": "C++",
"bytes": "44597"
},
{
"name": "Python",
"bytes": "45750"
}
],
"symlink_target": ""
} |
from unittest import TestCase, mock
from unittest.mock import patch, MagicMock
from core.httpoperation import HttpOperation
from core.replicator import Replicator
from tests.core.replicatortest import ReplicatorTest
def mock_request_get(url, params=None, headers=None, verify=False):
pass
def mock_request_post(url, data=None, json=None, headers=None, verify=False):
pass
def mock_request_delete(url, headers=None, verify=False):
pass
def mock_request_put(url, data=None, headers=None, verify=False):
pass
def random_true(obj, bit):
return True
def random_false(obj, bit):
return False
def create_http_op_with_random_mock(op_code, host_basepath, path, op_infos, headers, ignore_tls=True):
replicator = Replicator(ReplicatorTest.SAMPLE_DEFINITION, True, False)
http_op = HttpOperation(op_code, host_basepath, path,
op_infos, headers, replicator, False, ignore_tls)
http_op.random.getrandbits = MagicMock(return_value=True)
return http_op
class HttpOperationTest(TestCase):
SAMPLE_OP_INFOS = {
"tags": [
"pet"
],
"summary": "Updates a pet in the store with form data",
"description": "",
"operationId": "updatePetWithForm",
"consumes": [
"application/x-www-form-urlencoded"
],
"produces": [
"application/xml",
"application/json"
],
"parameters": [
{
"name": "petId",
"in": "path",
"description": "ID of pet that needs to be updated",
"required": True,
"type": "integer",
"format": "int64"
},
{
"name": "name",
"in": "formData",
"description": "Updated name of the pet",
"required": False,
"type": "string"
},
{
"name": "status",
"in": "formData",
"description": "Updated status of the pet",
"required": False,
"type": "string"
}
],
"responses": {
"405": {
"description": "Invalid input"
}
},
"security": [
{
"petstore_auth": [
"write:pets",
"read:pets"
]
}
]
}
def setUp(self):
self.http_op = create_http_op_with_random_mock('post', 'https://server.de/', 'pet/{petId}/uploadImage',
self.SAMPLE_OP_INFOS, {"X-API-Key": "abcdef123"})
def test_replace_url_parameter_replaces_placeholder_in_url_with_type_value(self):
url = self.http_op.replace_url_parameter(self.http_op.url, 'petId', 'integer')
self.assertEqual(url, 'https://server.de/pet/0/uploadImage')
def test_replace_url_parameter_replaces_only_named_param(self):
url = self.http_op.replace_url_parameter('https://server.de/pet/{petId}/uploadImage/{imgName}',
'imgName', 'string')
self.assertEqual(url, 'https://server.de/pet/{petId}/uploadImage/')
def test_create_form_parameter_makes_instance_of_type_as_string(self):
value = self.http_op.create_form_parameter('integer')
self.assertEqual(value, '0')
def test_is_parameter_not_optional_but_randomize_returns_true_when_param_not_optional(self):
result = self.http_op.is_parameter_not_optional_but_randomize(parameter_required=True)
self.assertEqual(True, result)
def test_is_parameter_not_optional_but_randomize_returns_true_when_param_optional_and_random_true(self):
self.http_op.random.getrandbits = MagicMock(return_value=True)
result = self.http_op.is_parameter_not_optional_but_randomize(parameter_required=False)
self.assertEqual(True, result)
def test_is_parameter_not_optional_but_randomize_returns_false_when_param_optional_and_random_false(self):
self.http_op.random.getrandbits = MagicMock(return_value=False)
result = self.http_op.is_parameter_not_optional_but_randomize(parameter_required=False)
self.assertEqual(False, result)
def test_execute_with_unrecognizable_http_op_will_result_in_Nonetype_response(self):
self.http_op = create_http_op_with_random_mock('OGRE', 'https://server.de/', 'pet/{petId}/uploadImage',
self.SAMPLE_OP_INFOS, {"X-API-Key": "abcdef123"})
result = self.http_op.execute()
self.assertIsNone(result)
@patch('requests.get', side_effect=mock_request_get)
def test_execute_with_parameter_definition_will_send_request_without_parameters_set(self, mock_get):
definition_no_parameters = self.SAMPLE_OP_INFOS
definition_no_parameters.pop('parameters', 0)
self.http_op = create_http_op_with_random_mock('get', 'https://server.de/', 'pet/{petId}/uploadImage',
definition_no_parameters, {"X-API-Key": "abcdef123"})
self.http_op.execute()
self.assertIn(mock.call(params={}, headers={"X-API-Key": "abcdef123"},
url='https://server.de/pet/{petId}/uploadImage', verify=False), mock_get.call_args_list)
@patch('requests.post', side_effect=mock_request_post)
def test_execute_will_post__op_request_with_params_when_form_data_param_set(self, mock_post):
self.http_op.execute()
self.assertIn(mock.call(data={'status': '', 'name': ''}, json=None, headers={"X-API-Key": "abcdef123"},
url='https://server.de/pet/0/uploadImage', verify=False), mock_post.call_args_list)
@patch('requests.get', side_effect=mock_request_get)
def test_execute_will_get_op_request_with_url_and_params_when_form_data_param_set(self, mock_get):
self.http_op = create_http_op_with_random_mock('get', 'https://server.de/', 'pet/{petId}/uploadImage',
self.SAMPLE_OP_INFOS, {"X-API-Key": "abcdef123"})
self.http_op.execute()
self.assertIn(mock.call(params={'status': '', 'name': ''},
url='https://server.de/pet/0/uploadImage',
headers={"X-API-Key": "abcdef123"}, verify=False),
mock_get.call_args_list)
@patch('requests.delete', side_effect=mock_request_delete)
def test_execute_will_delete_op_request_with_url_only(self, mock_delete):
self.http_op = create_http_op_with_random_mock('delete', 'https://server.de/', 'pet/{petId}/uploadImage',
self.SAMPLE_OP_INFOS, {"X-API-Key": "abcdef123"})
self.http_op.execute()
self.assertIn(mock.call(url='https://server.de/pet/0/uploadImage',
headers={"X-API-Key": "abcdef123"}, verify=False),
mock_delete.call_args_list)
@patch('requests.put', side_effect=mock_request_put)
def test_execute_will_put_op_request_with_url_and_params_when_form_data_param_set(self, mock_put):
self.http_op = create_http_op_with_random_mock('put', 'https://server.de/', 'pet/{petId}/uploadImage',
self.SAMPLE_OP_INFOS, {"X-API-Key": "abcdef123"})
self.http_op.execute()
self.assertIn(mock.call(data={'status': '', 'name': ''}, headers={"X-API-Key": "abcdef123"},
url='https://server.de/pet/0/uploadImage', verify=False), mock_put.call_args_list)
| {
"content_hash": "f3e2f29c3ca5c871332d26ff73bad4d7",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 120,
"avg_line_length": 44.820809248554916,
"alnum_prop": 0.5757028630384318,
"repo_name": "Teebytes/TnT-Fuzzer",
"id": "2e1234c7024755d019892d8d2deb905650ff751a",
"size": "7754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tntfuzzer/tests/core/httpoperationtest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "295"
},
{
"name": "Python",
"bytes": "51826"
}
],
"symlink_target": ""
} |
import time
from oslo_log import log as logging
from murano.api import v1
from murano.common import wsgi
from murano.db.services import stats
LOG = logging.getLogger(__name__)
class RequestStatisticsCollection(object):
request_count = 0
error_count = 0
average_time = 0.0
requests_per_tenant = {}
errors_per_tenant = {}
def add_api_request(self, tenant, ex_time):
self.average_time = (self.average_time * self.request_count +
ex_time) / (self.request_count + 1)
if tenant:
tenant_count = self.requests_per_tenant.get(tenant, 0)
tenant_count += 1
self.requests_per_tenant[tenant] = tenant_count
def add_api_error(self, tenant, ex_time):
self.average_time = (self.average_time * self.request_count +
ex_time) / (self.request_count + 1)
if tenant:
tenant_count = self.errors_per_tenant.get(tenant, 0)
tenant_count += 1
self.errors_per_tenant[tenant] = tenant_count
def stats_count(api, method):
def wrapper(func):
def wrap(*args, **kwargs):
try:
ts = time.time()
result = func(*args, **kwargs)
te = time.time()
tenant = args[1].context.project_id
update_count(api, method, te - ts,
tenant)
return result
except Exception:
te = time.time()
tenant = args[1].context.project_id
LOG.exception('API {api} method {method} raised an '
'exception'.format(api=api, method=method))
update_error_count(api, method, te - te, tenant)
raise
return wrap
return wrapper
def update_count(api, method, ex_time, tenant=None):
LOG.debug("Updating count stats for {api}, {method} on object {object}"
.format(api=api, method=method, object=v1.stats))
v1.stats.add_api_request(tenant, ex_time)
v1.stats.request_count += 1
def update_error_count(api, method, ex_time, tenant=None):
LOG.debug("Updating count stats for {api}, {method} on object "
"{object}".format(api=api, method=method, object=v1.stats))
v1.stats.add_api_error(tenant, ex_time)
v1.stats.error_count += 1
v1.stats.request_count += 1
def init_stats():
if not v1.stats:
v1.stats = RequestStatisticsCollection()
class Controller(object):
def get(self, request):
model = stats.Statistics()
entries = model.get_all()
ent_list = []
for entry in entries:
ent_list.append(entry.to_dict())
return ent_list
def create_resource():
return wsgi.Resource(Controller())
| {
"content_hash": "05d8c39be845c73cba2abee4354541c7",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 75,
"avg_line_length": 30.956043956043956,
"alnum_prop": 0.5733049343272986,
"repo_name": "openstack/murano",
"id": "f16472267f60ec6ad50afe0c356256f070aefe90",
"size": "3431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "murano/api/v1/request_statistics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "2026"
},
{
"name": "PowerShell",
"bytes": "2966"
},
{
"name": "Python",
"bytes": "1817159"
},
{
"name": "Shell",
"bytes": "37531"
}
],
"symlink_target": ""
} |
import jsonrpc
import sys
import urllib
username = urllib.quote_plus ("ultimateonlinecash")
password = urllib.quote_plus ("password")
port = 8336
url = "http://%s:%s@localhost:%d/" % (username, password, port)
class AuxpowStats:
"""
Keep track of the interesting statistics of the auxpows
found in the blockchain.
"""
def __init__ (self):
self.merkleLength = dict ()
self.txSize = dict ()
self.maxMerkle = 0
self.maxTxSize = 0
def add (self, obj):
"""
Add the auxpow described by the block JSON obj (if any)
to the statistics.
"""
if 'auxpow' not in obj:
return
txSize = len (obj['auxpow']['tx']['hex']) / 2
merkleLen = len (obj['auxpow']['merklebranch'])
if txSize not in self.txSize:
self.txSize[txSize] = 1
else:
self.txSize[txSize] += 1
if merkleLen not in self.merkleLength:
self.merkleLength[merkleLen] = 1
else:
self.merkleLength[merkleLen] += 1
if txSize > self.maxTxSize:
self.maxTxSize = txSize
self.maxTxSizeHash = obj['hash']
if merkleLen > self.maxMerkle:
self.maxMerkle = merkleLen
self.maxMerkleHash = obj['hash']
def output (self):
"""
Output statistics in the end.
"""
print "Merkle lengths:"
for (key, val) in self.merkleLength.items ():
print "%4d: %6d" % (key, val)
print "Maximum: %d, block %s\n" % (self.maxMerkle, self.maxMerkleHash)
print "\nCoinbase tx sizes:"
buckets = [0, 1000, 2000, 5000, 10000, 20000, 50000]
bucketCnts = (len (buckets) + 1) * [0]
for (key, val) in self.txSize.items ():
for i in range (len (buckets) - 1, -1, -1):
if (key >= buckets[i]):
bucketCnts[i] += val
for i in range (len (buckets) - 1):
label = "%d - %d" % (buckets[i], buckets[i + 1] - 1)
print " %15s: %6d" % (label, bucketCnts[i])
label = ">= %d" % buckets[-1]
print " %15s: %6d" % (label, bucketCnts[-1])
print "Maximum: %d, block %s\n" % (self.maxTxSize, self.maxTxSizeHash)
rpc = jsonrpc.proxy.ServiceProxy (url)
tips = rpc.getchaintips ()
tip = None
for t in tips:
if t['status'] == 'active':
tip = t
break
assert tip is not None
stats = AuxpowStats ()
curHash = tip['hash']
while True:
obj = rpc.getblock (curHash)
stats.add (obj)
if obj['height'] % 1000 == 0:
sys.stderr.write ("At height %d...\n" % obj['height'])
if 'previousblockhash' not in obj:
break
curHash = obj['previousblockhash']
stats.output ()
| {
"content_hash": "bf783e920e3182887882b9e1bd32d6ed",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 74,
"avg_line_length": 26.595744680851062,
"alnum_prop": 0.604,
"repo_name": "cryptoprojects/ultimateonlinecash",
"id": "623d7669403b708f02a2a65570005369cabc4a85",
"size": "3311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/auxpow-sizes/auxpow-sizes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "448146"
},
{
"name": "C",
"bytes": "775481"
},
{
"name": "C++",
"bytes": "5391599"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "194014"
},
{
"name": "Makefile",
"bytes": "114799"
},
{
"name": "Objective-C",
"bytes": "2162"
},
{
"name": "Objective-C++",
"bytes": "6774"
},
{
"name": "Python",
"bytes": "1265404"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "61659"
}
],
"symlink_target": ""
} |
"""
Dummy client runner
This module implements a stand-alone launcher for stress-testing
an Evennia game. It will launch any number of fake clients. These
clients will log into the server and start doing random operations.
Customizing and weighing these operations differently depends on
which type of game is tested. The module contains a testing module
for plain Evennia.
Please note that you shouldn't run this on a production server!
Launch the program without any arguments or options to see a
full step-by-step setup help.
Basically (for testing default Evennia):
- Use an empty/testing database.
- set PERMISSION_ACCOUNT_DEFAULT = "Builder"
- start server, eventually with profiling active
- launch this client runner
If you want to customize the runner's client actions
(because you changed the cmdset or needs to better
match your use cases or add more actions), you can
change which actions by adding a path to
DUMMYRUNNER_ACTIONS_MODULE = <path.to.your.module>
in your settings. See utils.dummyrunner_actions.py
for instructions on how to define this module.
"""
import sys
import time
import random
from argparse import ArgumentParser
from twisted.conch import telnet
from twisted.internet import reactor, protocol
from twisted.internet.task import LoopingCall
from django.conf import settings
from evennia.utils import mod_import, time_format
# Load the dummyrunner settings module
DUMMYRUNNER_SETTINGS = mod_import(settings.DUMMYRUNNER_SETTINGS_MODULE)
if not DUMMYRUNNER_SETTINGS:
raise IOError(
"Error: Dummyrunner could not find settings file at %s"
% settings.DUMMYRUNNER_SETTINGS_MODULE
)
DATESTRING = "%Y%m%d%H%M%S"
# Settings
# number of clients to launch if no input is given on command line
NCLIENTS = 1
# time between each 'tick', in seconds, if not set on command
# line. All launched clients will be called upon to possibly do an
# action with this frequency.
TIMESTEP = DUMMYRUNNER_SETTINGS.TIMESTEP
# chance of a client performing an action, per timestep. This helps to
# spread out usage randomly, like it would be in reality.
CHANCE_OF_ACTION = DUMMYRUNNER_SETTINGS.CHANCE_OF_ACTION
# spread out the login action separately, having many accounts create accounts
# and connect simultaneously is generally unlikely.
CHANCE_OF_LOGIN = DUMMYRUNNER_SETTINGS.CHANCE_OF_LOGIN
# Port to use, if not specified on command line
TELNET_PORT = DUMMYRUNNER_SETTINGS.TELNET_PORT or settings.TELNET_PORTS[0]
#
NLOGGED_IN = 0
# Messages
INFO_STARTING = """
Dummyrunner starting using {N} dummy account(s). If you don't see
any connection messages, make sure that the Evennia server is
running.
Use Ctrl-C to stop/disconnect clients.
"""
ERROR_NO_MIXIN = """
Error: Evennia is not set up for dummyrunner. Before starting the
server, make sure to include the following at *the end* of your
settings file (remove when not using dummyrunner!):
from evennia.server.profiling.settings_mixin import *
This will change the settings in the following way:
- change PERMISSION_ACCOUNT_DEFAULT to 'Developer' to allow clients
to test all commands
- change PASSWORD_HASHERS to use a faster (but less safe) algorithm
when creating large numbers of accounts at the same time
If you don't want to use the custom settings of the mixin for some
reason, you can change their values manually after the import, or
add DUMMYRUNNER_MIXIN=True to your settings file to avoid this
error completely.
Warning: Don't run dummyrunner on a production database! It will
create a lot of spammy objects and accounts!
"""
ERROR_FEW_ACTIONS = """
Dummyrunner settings error: The ACTIONS tuple is too short: it must
contain at least login- and logout functions.
"""
HELPTEXT = """
DO NOT RUN THIS ON A PRODUCTION SERVER! USE A CLEAN/TESTING DATABASE!
This stand-alone program launches dummy telnet clients against a
running Evennia server. The idea is to mimic real accounts logging in
and repeatedly doing resource-heavy commands so as to stress test the
game. It uses the default command set to log in and issue commands, so
if that was customized, some of the functionality will not be tested
(it will not fail, the commands will just not be recognized). The
running clients will create new objects and rooms all over the place
as part of their running, so using a clean/testing database is
strongly recommended.
Setup:
1) setup a fresh/clean database (if using sqlite, just safe-copy
away your real evennia.db3 file and create a new one with
`evennia migrate`)
2) in server/conf/settings.py, add
PERMISSION_ACCOUNT_DEFAULT="Builder"
This is so that the dummy accounts can test building operations.
You can also customize the dummyrunner by modifying a setting
file specified by DUMMYRUNNER_SETTINGS_MODULE
3) Start Evennia like normal, optionally with profiling (--profile)
4) Run this dummy runner via the evennia launcher:
evennia --dummyrunner <nr_of_clients>
5) Log on and determine if game remains responsive despite the
heavier load. Note that if you activated profiling, there is a
considerate additional overhead from the profiler too so you
should usually not consider game responsivity when using the
profiler at the same time.
6) If you use profiling, let the game run long enough to gather
data, then stop the server cleanly using evennia stop or @shutdown.
@shutdown. The profile appears as
server/logs/server.prof/portal.prof (see Python's manual on
cProfiler).
Notes:
The dummyrunner tends to create a lot of accounts all at once, which is
a very heavy operation. This is not a realistic use-case - what you want
to test is performance during run. A large
number of clients here may lock up the client until all have been
created. It may be better to connect multiple dummyrunners instead of
starting one single one with a lot of accounts. Exactly what this number
is depends on your computer power. So start with 10-20 clients and increase
until you see the initial login slows things too much.
"""
# ------------------------------------------------------------
# Helper functions
# ------------------------------------------------------------
ICOUNT = 0
def idcounter():
"""
Makes unique ids.
Returns:
count (int): A globally unique counter.
"""
global ICOUNT
ICOUNT += 1
return str(ICOUNT)
GCOUNT = 0
def gidcounter():
"""
Makes globally unique ids.
Returns:
count (int); A globally unique counter.
"""
global GCOUNT
GCOUNT += 1
return "%s-%s" % (time.strftime(DATESTRING), GCOUNT)
def makeiter(obj):
"""
Makes everything iterable.
Args:
obj (any): Object to turn iterable.
Returns:
iterable (iterable): An iterable object.
"""
return obj if hasattr(obj, "__iter__") else [obj]
# ------------------------------------------------------------
# Client classes
# ------------------------------------------------------------
class DummyClient(telnet.StatefulTelnetProtocol):
"""
Handles connection to a running Evennia server,
mimicking a real account by sending commands on
a timer.
"""
def connectionMade(self):
"""
Called when connection is first established.
"""
# public properties
self.cid = idcounter()
self.key = "Dummy-%s" % self.cid
self.gid = "%s-%s" % (time.strftime(DATESTRING), self.cid)
self.istep = 0
self.exits = [] # exit names created
self.objs = [] # obj names created
self._connected = False
self._loggedin = False
self._logging_out = False
self._report = ""
self._cmdlist = [] # already stepping in a cmd definition
self._login = self.factory.actions[0]
self._logout = self.factory.actions[1]
self._actions = self.factory.actions[2:]
reactor.addSystemEventTrigger("before", "shutdown", self.logout)
def dataReceived(self, data):
"""
Called when data comes in over the protocol. We wait to start
stepping until the server actually responds
Args:
data (str): Incoming data.
"""
if not self._connected and not data.startswith(chr(255)):
# wait until we actually get text back (not just telnet
# negotiation)
self._connected = True
# start client tick
d = LoopingCall(self.step)
# dissipate exact step by up to +/- 0.5 second
timestep = TIMESTEP + (-0.5 + (random.random() * 1.0))
d.start(timestep, now=True).addErrback(self.error)
def connectionLost(self, reason):
"""
Called when loosing the connection.
Args:
reason (str): Reason for loosing connection.
"""
if not self._logging_out:
print("client %s(%s) lost connection (%s)" % (self.key, self.cid, reason))
def error(self, err):
"""
Error callback.
Args:
err (Failure): Error instance.
"""
print(err)
def counter(self):
"""
Produces a unique id, also between clients.
Returns:
counter (int): A unique counter.
"""
return gidcounter()
def logout(self):
"""
Causes the client to log out of the server. Triggered by ctrl-c signal.
"""
self._logging_out = True
cmd = self._logout(self)
print("client %s(%s) logout (%s actions)" % (self.key, self.cid, self.istep))
self.sendLine(cmd)
def step(self):
"""
Perform a step. This is called repeatedly by the runner and
causes the client to issue commands to the server. This holds
all "intelligence" of the dummy client.
"""
global NLOGGED_IN
rand = random.random()
if not self._cmdlist:
# no commands ready. Load some.
if not self._loggedin:
if rand < CHANCE_OF_LOGIN:
# get the login commands
self._cmdlist = list(makeiter(self._login(self)))
NLOGGED_IN += 1 # this is for book-keeping
print("connecting client %s (%i/%i)..." % (self.key, NLOGGED_IN, NCLIENTS))
self._loggedin = True
else:
# no login yet, so cmdlist not yet set
return
else:
# we always pick a cumulatively random function
crand = random.random()
cfunc = [func for (cprob, func) in self._actions if cprob >= crand][0]
self._cmdlist = list(makeiter(cfunc(self)))
# at this point we always have a list of commands
if rand < CHANCE_OF_ACTION:
# send to the game
self.sendLine(str(self._cmdlist.pop(0)))
self.istep += 1
class DummyFactory(protocol.ClientFactory):
protocol = DummyClient
def __init__(self, actions):
"Setup the factory base (shared by all clients)"
self.actions = actions
# ------------------------------------------------------------
# Access method:
# Starts clients and connects them to a running server.
# ------------------------------------------------------------
def start_all_dummy_clients(nclients):
"""
Initialize all clients, connect them and start to step them
Args:
nclients (int): Number of dummy clients to connect.
"""
global NCLIENTS
NCLIENTS = int(nclients)
actions = DUMMYRUNNER_SETTINGS.ACTIONS
if len(actions) < 2:
print(ERROR_FEW_ACTIONS)
return
# make sure the probabilities add up to 1
pratio = 1.0 / sum(tup[0] for tup in actions[2:])
flogin, flogout, probs, cfuncs = (
actions[0],
actions[1],
[tup[0] * pratio for tup in actions[2:]],
[tup[1] for tup in actions[2:]],
)
# create cumulative probabilies for the random actions
cprobs = [sum(v for i, v in enumerate(probs) if i <= k) for k in range(len(probs))]
# rebuild a new, optimized action structure
actions = (flogin, flogout) + tuple(zip(cprobs, cfuncs))
# setting up all clients (they are automatically started)
factory = DummyFactory(actions)
for i in range(NCLIENTS):
reactor.connectTCP("localhost", TELNET_PORT, factory)
# start reactor
reactor.run()
# ------------------------------------------------------------
# Command line interface
# ------------------------------------------------------------
if __name__ == "__main__":
try:
settings.DUMMYRUNNER_MIXIN
except AttributeError:
print(ERROR_NO_MIXIN)
sys.exit()
# parsing command line with default vals
parser = ArgumentParser(description=HELPTEXT)
parser.add_argument(
"-N", nargs=1, default=1, dest="nclients", help="Number of clients to start"
)
args = parser.parse_args()
print(INFO_STARTING.format(N=args.nclients[0]))
# run the dummyrunner
t0 = time.time()
start_all_dummy_clients(nclients=args.nclients[0])
ttot = time.time() - t0
# output runtime
print("... dummy client runner stopped after %s." % time_format(ttot, style=3))
| {
"content_hash": "2dc106cb48fdb0d61ad9a18481b45a59",
"timestamp": "",
"source": "github",
"line_count": 434,
"max_line_length": 95,
"avg_line_length": 31.018433179723502,
"alnum_prop": 0.6374238597533799,
"repo_name": "jamesbeebop/evennia",
"id": "3b13bb49f64f0dae73cb3991903fd8d0f21645e1",
"size": "13462",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "evennia/server/profiling/dummyrunner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19127"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "HTML",
"bytes": "13558"
},
{
"name": "JavaScript",
"bytes": "24398"
},
{
"name": "Python",
"bytes": "2143170"
}
],
"symlink_target": ""
} |
import wx
import armid
import ARM
from RoleCostDialog import RoleCostDialog
class RoleCostListCtrl(wx.ListCtrl):
def __init__(self,parent,winId,boxSize=wx.DefaultSize):
wx.ListCtrl.__init__(self,parent,winId,size=boxSize,style=wx.LC_REPORT)
self.InsertColumn(0,'Role')
self.SetColumnWidth(0,150)
self.InsertColumn(1,'Cost')
self.SetColumnWidth(1,300)
self.theDimMenu = wx.Menu()
self.theDimMenu.Append(armid.COSTLISTCTRL_MENUADD_ID,'Add')
self.theDimMenu.Append(armid.COSTLISTCTRL_MENUDELETE_ID,'Delete')
self.theSelectedIdx = -1
self.Bind(wx.EVT_RIGHT_DOWN,self.OnRightDown)
self.Bind(wx.EVT_LIST_ITEM_SELECTED,self.OnItemSelected)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED,self.OnItemDeselected)
wx.EVT_MENU(self.theDimMenu,armid.COSTLISTCTRL_MENUADD_ID,self.onAddProperty)
wx.EVT_MENU(self.theDimMenu,armid.COSTLISTCTRL_MENUDELETE_ID,self.onDeleteProperty)
def setEnvironment(self,environmentName):
pass
def OnItemSelected(self,evt):
self.theSelectedIdx = evt.GetIndex()
def OnItemDeselected(self,evt):
self.theSelectedIdx = -1
def OnRightDown(self,evt):
self.PopupMenu(self.theDimMenu)
def onAddProperty(self,evt):
dlg = RoleCostDialog(self)
if (dlg.ShowModal() == armid.ROLECOST_BUTTONADD_ID):
roleName = dlg.role()
roleCost = dlg.cost()
idx = self.GetItemCount()
self.InsertStringItem(idx,roleName)
self.SetStringItem(idx,1,roleCost)
def onDeleteProperty(self,evt):
if (self.theSelectedIdx == -1):
errorText = 'No property selected'
errorLabel = 'Delete Role Cost'
dlg = wx.MessageDialog(self,errorText,errorLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
else:
selectedValue = self.GetItemText(self.theSelectedIdx)
self.DeleteItem(self.theSelectedIdx)
def load(self,roleCosts):
for idx,roleCost in enumerate(roleCosts):
role = roleCost[0]
cost = roleCost[1]
self.InsertStringItem(idx,role)
self.SetStringItem(idx,1,cost)
def roles(self):
roleCosts = []
for x in range(self.GetItemCount()):
roleName = self.GetItemText(x)
cost = (self.GetItem(x,1)).GetText()
roleCosts.append( (roleName,cost) )
return roleCosts
| {
"content_hash": "9ba6c207d94885a0577404a9bdccf0de",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 87,
"avg_line_length": 33.14705882352941,
"alnum_prop": 0.7054125998225377,
"repo_name": "RobinQuetin/CAIRIS-web",
"id": "50b908a037cff156107f632a5ee3407b936e6fcd",
"size": "3053",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cairis/cairis/RoleCostListCtrl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11265"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "Python",
"bytes": "3313365"
},
{
"name": "Shell",
"bytes": "19461"
},
{
"name": "XSLT",
"bytes": "35522"
}
],
"symlink_target": ""
} |
import logging
import argparse
import sys
import csv
import datetime
from collections import defaultdict
from functools import partial, reduce
from itertools import groupby
parser = argparse.ArgumentParser('aggr')
parser.add_argument('-p', '--pattern', required=True)
parser.add_argument('-i', '--infile')
parser.add_argument('-o', '--outfile')
parser.add_argument('-d', '--field-delimiter')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--datetime-format')
parser.add_argument('--initial-argument-for-custom-aggregator')
parser.add_argument('--time-format')
parser.add_argument('--date-format')
parser.set_defaults(field_delimiter=',', verbose=False,
datetime_format='%Y-%m-%dT%H:%M:%s.%f%z', date_format='%Y-%m-%d',
time_format='%H:%M:%s', initial_argument_for_custom_aggregator='0')
args = parser.parse_args()
PATTERN_OPTIONS = ('key', 'sum', 'max', 'min', 'len', 'any', 'first', 'last')
FUNCTION_MAP = {
'sum' : sum,
'max' : max,
'min' : min,
'len' : len,
'any' : any,
'first' : lambda iter: iter[0],
'last' : lambda iter: iter[-1]
}
def parse_pattern():
pattern = args.pattern.split(args.field_delimiter)
return pattern
def get_keyfunc():
def keyfunc(record):
return tuple(f
for f, option in zip(record, parse_pattern())
if option == 'key')
return keyfunc
def infer_type(field):
types = (int, float, partial(lambda f: datetime.datetime.strptime(f, args.datetime_format)))
for t in types:
try:
return t(field)
except ValueError as ve:
pass
return field
def parse_record(raw_record):
return [infer_type(f.strip()) for f in raw_record.split(args.field_delimiter)]
def get_records():
if args.infile:
with open(args.infile, 'r') as infile:
return infile.readlines()
else:
return sys.stdin
def parse_aggregate_function(n):
try:
return FUNCTION_MAP[parse_pattern()[n]]
except KeyError as ke:
pass
r = eval(parse_pattern()[n])
# r is of the form
# def r(accumulator, value_from_iterator):
# return value_from_iterator
def logged_reduce(iterable):
logging.debug(r)
logging.debug(iterable)
return reduce(r, iterable, eval(args.initial_argument_for_custom_aggregator))
return logged_reduce
def aggregate_group(records):
records = list(records)
for n, _ in enumerate(records[0]):
if parse_pattern()[n] == 'key':
continue
else:
func = parse_aggregate_function(n)
column = [record[n] for record in records]
yield func(column)
def output(results):
if args.outfile:
with open(args.outfile, 'w') as of:
writer = csv.writer(of, delimiter=args.field_delimiter)
for row in results:
writer.writerow(row)
else:
writer = csv.writer(sys.stdout, delimiter=args.field_delimiter)
for row in results:
writer.writerow(row)
def aggr():
keyfunc = get_keyfunc()
records = map(parse_record, get_records())
records = sorted(records, key=keyfunc)
results = (k + tuple(aggregate_group(g))
for k, g in groupby(records, keyfunc))
output(results)
if __name__ == '__main__':
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
aggr()
| {
"content_hash": "6e5a8e22418d219d8e439facbc0570cf",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 96,
"avg_line_length": 27.2109375,
"alnum_prop": 0.6124031007751938,
"repo_name": "Horb/aggr",
"id": "0bede642931b46166ea75980f0d64235c8a9f5dd",
"size": "3502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aggr.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3502"
}
],
"symlink_target": ""
} |
"""Development settings and globals."""
from .base import *
# DEBUG CONFIGURATION
DEBUG = True
INTERNAL_IPS = ('127.0.0.1',)
ALLOWED_HOSTS = ['*']
# EMAIL CONFIGURATION
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# CACHE CONFIGURATION
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# FIXTURE CONFIGURATION
FIXTURE_DIRS = (
os.path.join(PROJECT_ROOT, 'fixtures'),
)
| {
"content_hash": "59914455f314715d0c57531bb02e6da3",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 67,
"avg_line_length": 19.652173913043477,
"alnum_prop": 0.6814159292035398,
"repo_name": "tom-henderson/bookmarks",
"id": "7ca0ab3481c30e5dddd660e780ad24c80543eb82",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bookmarks/config/settings/local.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1247"
},
{
"name": "Dockerfile",
"bytes": "830"
},
{
"name": "HTML",
"bytes": "15829"
},
{
"name": "Python",
"bytes": "22201"
},
{
"name": "Shell",
"bytes": "686"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from copy import copy
from decimal import Decimal
import os
import unittest
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB, mysql
from django.db import router
from django.conf import settings
from django.test import TestCase
from django.utils._os import upath
if HAS_GDAL:
from django.contrib.gis.utils.layermapping import (LayerMapping,
LayerMapError, InvalidDecimal, MissingForeignKey)
from django.contrib.gis.gdal import DataSource
from .models import (
City, County, CountyFeat, Interstate, ICity1, ICity2, Invalid, State,
city_mapping, co_mapping, cofeat_mapping, inter_mapping)
shp_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), os.pardir, 'data'))
city_shp = os.path.join(shp_path, 'cities', 'cities.shp')
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
inter_shp = os.path.join(shp_path, 'interstates', 'interstates.shp')
invalid_shp = os.path.join(shp_path, 'invalid', 'emptypoints.shp')
# Dictionaries to hold what's expected in the county shapefile.
NAMES = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
NUMS = [1, 2, 1, 19, 1] # Number of polygons for each.
STATES = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
@skipUnless(HAS_GDAL and HAS_SPATIAL_DB, "GDAL and spatial db are required.")
class LayerMapTest(TestCase):
def test_init(self):
"Testing LayerMapping initialization."
# Model field that does not exist.
bad1 = copy(city_mapping)
bad1['foobar'] = 'FooField'
# Shapefile field that does not exist.
bad2 = copy(city_mapping)
bad2['name'] = 'Nombre'
# Nonexistent geographic field type.
bad3 = copy(city_mapping)
bad3['point'] = 'CURVE'
# Incrementing through the bad mapping dictionaries and
# ensuring that a LayerMapError is raised.
for bad_map in (bad1, bad2, bad3):
with self.assertRaises(LayerMapError):
lm = LayerMapping(City, city_shp, bad_map)
# A LookupError should be thrown for bogus encodings.
with self.assertRaises(LookupError):
lm = LayerMapping(City, city_shp, city_mapping, encoding='foobar')
def test_simple_layermap(self):
"Test LayerMapping import of a simple point shapefile."
# Setting up for the LayerMapping.
lm = LayerMapping(City, city_shp, city_mapping)
lm.save()
# There should be three cities in the shape file.
self.assertEqual(3, City.objects.count())
# Opening up the shapefile, and verifying the values in each
# of the features made it to the model.
ds = DataSource(city_shp)
layer = ds[0]
for feat in layer:
city = City.objects.get(name=feat['Name'].value)
self.assertEqual(feat['Population'].value, city.population)
self.assertEqual(Decimal(str(feat['Density'])), city.density)
self.assertEqual(feat['Created'].value, city.dt)
# Comparing the geometries.
pnt1, pnt2 = feat.geom, city.point
self.assertAlmostEqual(pnt1.x, pnt2.x, 5)
self.assertAlmostEqual(pnt1.y, pnt2.y, 5)
def test_layermap_strict(self):
"Testing the `strict` keyword, and import of a LineString shapefile."
# When the `strict` keyword is set an error encountered will force
# the importation to stop.
with self.assertRaises(InvalidDecimal):
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True, strict=True)
Interstate.objects.all().delete()
# This LayerMapping should work b/c `strict` is not set.
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True)
# Two interstate should have imported correctly.
self.assertEqual(2, Interstate.objects.count())
# Verifying the values in the layer w/the model.
ds = DataSource(inter_shp)
# Only the first two features of this shapefile are valid.
valid_feats = ds[0][:2]
for feat in valid_feats:
istate = Interstate.objects.get(name=feat['Name'].value)
if feat.fid == 0:
self.assertEqual(Decimal(str(feat['Length'])), istate.length)
elif feat.fid == 1:
# Everything but the first two decimal digits were truncated,
# because the Interstate model's `length` field has decimal_places=2.
self.assertAlmostEqual(feat.get('Length'), float(istate.length), 2)
for p1, p2 in zip(feat.geom, istate.path):
self.assertAlmostEqual(p1[0], p2[0], 6)
self.assertAlmostEqual(p1[1], p2[1], 6)
def county_helper(self, county_feat=True):
"Helper function for ensuring the integrity of the mapped County models."
for name, n, st in zip(NAMES, NUMS, STATES):
# Should only be one record b/c of `unique` keyword.
c = County.objects.get(name=name)
self.assertEqual(n, len(c.mpoly))
self.assertEqual(st, c.state.name) # Checking ForeignKey mapping.
# Multiple records because `unique` was not set.
if county_feat:
qs = CountyFeat.objects.filter(name=name)
self.assertEqual(n, qs.count())
def test_layermap_unique_multigeometry_fk(self):
"Testing the `unique`, and `transform`, geometry collection conversion, and ForeignKey mappings."
# All the following should work.
try:
# Telling LayerMapping that we want no transformations performed on the data.
lm = LayerMapping(County, co_shp, co_mapping, transform=False)
# Specifying the source spatial reference system via the `source_srs` keyword.
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269)
lm = LayerMapping(County, co_shp, co_mapping, source_srs='NAD83')
# Unique may take tuple or string parameters.
for arg in ('name', ('name', 'mpoly')):
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique=arg)
except:
self.fail('No exception should be raised for proper use of keywords.')
# Testing invalid params for the `unique` keyword.
for e, arg in ((TypeError, 5.0), (ValueError, 'foobar'), (ValueError, ('name', 'mpolygon'))):
self.assertRaises(e, LayerMapping, County, co_shp, co_mapping, transform=False, unique=arg)
# No source reference system defined in the shapefile, should raise an error.
if not mysql:
self.assertRaises(LayerMapError, LayerMapping, County, co_shp, co_mapping)
# Passing in invalid ForeignKey mapping parameters -- must be a dictionary
# mapping for the model the ForeignKey points to.
bad_fk_map1 = copy(co_mapping); bad_fk_map1['state'] = 'name'
bad_fk_map2 = copy(co_mapping); bad_fk_map2['state'] = {'nombre' : 'State'}
self.assertRaises(TypeError, LayerMapping, County, co_shp, bad_fk_map1, transform=False)
self.assertRaises(LayerMapError, LayerMapping, County, co_shp, bad_fk_map2, transform=False)
# There exist no State models for the ForeignKey mapping to work -- should raise
# a MissingForeignKey exception (this error would be ignored if the `strict`
# keyword is not set).
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
self.assertRaises(MissingForeignKey, lm.save, silent=True, strict=True)
# Now creating the state models so the ForeignKey mapping may work.
State.objects.bulk_create([
State(name='Colorado'), State(name='Hawaii'), State(name='Texas')
])
# If a mapping is specified as a collection, all OGR fields that
# are not collections will be converted into them. For example,
# a Point column would be converted to MultiPoint. Other things being done
# w/the keyword args:
# `transform=False`: Specifies that no transform is to be done; this
# has the effect of ignoring the spatial reference check (because the
# county shapefile does not have implicit spatial reference info).
#
# `unique='name'`: Creates models on the condition that they have
# unique county names; geometries from each feature however will be
# appended to the geometry collection of the unique model. Thus,
# all of the various islands in Honolulu county will be in in one
# database record with a MULTIPOLYGON type.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
lm.save(silent=True, strict=True)
# A reference that doesn't use the unique keyword; a new database record will
# created for each polygon.
lm = LayerMapping(CountyFeat, co_shp, cofeat_mapping, transform=False)
lm.save(silent=True, strict=True)
# The county helper is called to ensure integrity of County models.
self.county_helper()
def test_test_fid_range_step(self):
"Tests the `fid_range` keyword and the `step` keyword of .save()."
# Function for clearing out all the counties before testing.
def clear_counties(): County.objects.all().delete()
State.objects.bulk_create([
State(name='Colorado'), State(name='Hawaii'), State(name='Texas')
])
# Initializing the LayerMapping object to use in these tests.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
# Bad feature id ranges should raise a type error.
bad_ranges = (5.0, 'foo', co_shp)
for bad in bad_ranges:
self.assertRaises(TypeError, lm.save, fid_range=bad)
# Step keyword should not be allowed w/`fid_range`.
fr = (3, 5) # layer[3:5]
self.assertRaises(LayerMapError, lm.save, fid_range=fr, step=10)
lm.save(fid_range=fr)
# Features IDs 3 & 4 are for Galveston County, Texas -- only
# one model is returned because the `unique` keyword was set.
qs = County.objects.all()
self.assertEqual(1, qs.count())
self.assertEqual('Galveston', qs[0].name)
# Features IDs 5 and beyond for Honolulu County, Hawaii, and
# FID 0 is for Pueblo County, Colorado.
clear_counties()
lm.save(fid_range=slice(5, None), silent=True, strict=True) # layer[5:]
lm.save(fid_range=slice(None, 1), silent=True, strict=True) # layer[:1]
# Only Pueblo & Honolulu counties should be present because of
# the `unique` keyword. Have to set `order_by` on this QuerySet
# or else MySQL will return a different ordering than the other dbs.
qs = County.objects.order_by('name')
self.assertEqual(2, qs.count())
hi, co = tuple(qs)
hi_idx, co_idx = tuple(map(NAMES.index, ('Honolulu', 'Pueblo')))
self.assertEqual('Pueblo', co.name)
self.assertEqual(NUMS[co_idx], len(co.mpoly))
self.assertEqual('Honolulu', hi.name)
self.assertEqual(NUMS[hi_idx], len(hi.mpoly))
# Testing the `step` keyword -- should get the same counties
# regardless of we use a step that divides equally, that is odd,
# or that is larger than the dataset.
for st in (4,7,1000):
clear_counties()
lm.save(step=st, strict=True)
self.county_helper(county_feat=False)
def test_model_inheritance(self):
"Tests LayerMapping on inherited models. See #12093."
icity_mapping = {'name' : 'Name',
'population' : 'Population',
'density' : 'Density',
'point' : 'POINT',
'dt' : 'Created',
}
# Parent model has geometry field.
lm1 = LayerMapping(ICity1, city_shp, icity_mapping)
lm1.save()
# Grandparent has geometry field.
lm2 = LayerMapping(ICity2, city_shp, icity_mapping)
lm2.save()
self.assertEqual(6, ICity1.objects.count())
self.assertEqual(3, ICity2.objects.count())
def test_invalid_layer(self):
"Tests LayerMapping on invalid geometries. See #15378."
invalid_mapping = {'point': 'POINT'}
lm = LayerMapping(Invalid, invalid_shp, invalid_mapping,
source_srs=4326)
lm.save(silent=True)
def test_textfield(self):
"Tests that String content fits also in a TextField"
mapping = copy(city_mapping)
mapping['name_txt'] = 'Name'
lm = LayerMapping(City, city_shp, mapping)
lm.save(silent=True, strict=True)
self.assertEqual(City.objects.count(), 3)
self.assertEqual(City.objects.all().order_by('name_txt')[0].name_txt, "Houston")
def test_encoded_name(self):
""" Test a layer containing utf-8-encoded name """
city_shp = os.path.join(shp_path, 'ch-city', 'ch-city.shp')
lm = LayerMapping(City, city_shp, city_mapping)
lm.save(silent=True, strict=True)
self.assertEqual(City.objects.count(), 1)
self.assertEqual(City.objects.all()[0].name, "Zürich")
class OtherRouter(object):
def db_for_read(self, model, **hints):
return 'other'
def db_for_write(self, model, **hints):
return self.db_for_read(model, **hints)
def allow_relation(self, obj1, obj2, **hints):
return None
def allow_migrate(self, db, model):
return True
@skipUnless(HAS_GDAL and HAS_SPATIAL_DB, "GDAL and spatial db are required.")
class LayerMapRouterTest(TestCase):
def setUp(self):
self.old_routers = router.routers
router.routers = [OtherRouter()]
def tearDown(self):
router.routers = self.old_routers
@unittest.skipUnless(len(settings.DATABASES) > 1, 'multiple databases required')
def test_layermapping_default_db(self):
lm = LayerMapping(City, city_shp, city_mapping)
self.assertEqual(lm.using, 'other')
| {
"content_hash": "abc0682227013cbca678856feef478f2",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 105,
"avg_line_length": 43.40909090909091,
"alnum_prop": 0.6330191972076789,
"repo_name": "denisenkom/django",
"id": "632cb98aebde640013adb28b8c05759b6954b637",
"size": "14342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/contrib/gis/tests/layermap/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "50381"
},
{
"name": "JavaScript",
"bytes": "100648"
},
{
"name": "Python",
"bytes": "8801295"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, absolute_import, unicode_literals
# from qtpy.QtCore import *
from qtpy.QtGui import *
# from qtpy.QtWidgets import *
CategoryItemType = QStandardItem.UserType + 1
PropertyItemType = CategoryItemType + 1
ADD_ICON = QIcon()
UP_ICON = QIcon()
DOWN_ICON = QIcon()
DELETE_ICON = QIcon()
COG_ICON = QIcon()
OPEN_DIR_ICON = QIcon()
CSS_PATH = None
def set_css_path(path):
global CSS_PATH
CSS_PATH = path
def cog_icon():
return COG_ICON
def set_icon(**params):
global ADD_ICON, UP_ICON, DOWN_ICON, DELETE_ICON, COG_ICON, OPEN_DIR_ICON
ADD_ICON = params.get("add_icon", ADD_ICON)
UP_ICON = params.get("up_icon", UP_ICON)
DOWN_ICON = params.get("down_icon", DOWN_ICON)
DELETE_ICON = params.get("delete_icon", DELETE_ICON)
COG_ICON = params.get("cog_icon", COG_ICON)
OPEN_DIR_ICON = params.get("open_dir_icon", OPEN_DIR_ICON)
| {
"content_hash": "eb601db768a63f07d1004f1477f05fa4",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 82,
"avg_line_length": 26.852941176470587,
"alnum_prop": 0.6845564074479737,
"repo_name": "pashango2/sphinx-explorer",
"id": "7f7613a6421656252771d305ee62738f8f4448d5",
"size": "959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sphinx_explorer/property_widget/define.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15218"
},
{
"name": "Python",
"bytes": "292789"
},
{
"name": "QMake",
"bytes": "253"
}
],
"symlink_target": ""
} |
"""Methods to read data in the graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.input_pipeline.python.ops import input_pipeline_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as var_ops
from tensorflow.python.platform import gfile
from tensorflow.python.summary import summary
from tensorflow.python.training import input as input_ops
from tensorflow.python.training import queue_runner
# Default name for key in the feature dict.
KEY_FEATURE_NAME = '__key__'
def read_batch_examples(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size`.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Use `parse_fn` if you need to do parsing / processing on single examples.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.global_variables_initializer()` as shown in the tests.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
_, examples = read_keyed_batch_examples(
file_pattern=file_pattern,
batch_size=batch_size,
reader=reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
read_batch_size=read_batch_size,
parse_fn=parse_fn,
name=name)
return examples
def read_keyed_batch_examples(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size`.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Use `parse_fn` if you need to do parsing / processing on single examples.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.global_variables_initializer()` as shown in the tests.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
return _read_keyed_batch_examples_helper(
file_pattern,
batch_size,
reader,
randomize_input,
num_epochs,
queue_capacity,
num_threads,
read_batch_size,
parse_fn,
setup_shared_queue=False,
name=name)
def _read_keyed_batch_examples_shared_queue(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch `Example` protos.
Given file pattern (or list of files), will setup a shared queue for file
names, setup a worker queue that pulls from the shared queue, read `Example`
protos using provided `reader`, use batch queue to create batches of examples
of size `batch_size`. This provides at most once visit guarantees. Note that
this only works if the parameter servers are not pre-empted or restarted or
the session is not restored from a checkpoint since the state of a queue
is not checkpointed and we will end up restarting from the entire list of
files.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Use `parse_fn` if you need to do parsing / processing on single examples.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.global_variables_initializer()` as shown in the tests.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
return _read_keyed_batch_examples_helper(
file_pattern,
batch_size,
reader,
randomize_input,
num_epochs,
queue_capacity,
num_threads,
read_batch_size,
parse_fn,
setup_shared_queue=True,
name=name)
def _get_file_names(file_pattern, randomize_input):
"""Parse list of file names from pattern, optionally shuffled.
Args:
file_pattern: File glob pattern, or list of strings.
randomize_input: Whether to shuffle the order of file names.
Returns:
List of file names matching `file_pattern`.
Raises:
ValueError: If `file_pattern` is empty, or pattern matches no files.
"""
if isinstance(file_pattern, list):
file_names = file_pattern
if not file_names:
raise ValueError('No files given to dequeue_examples.')
else:
file_names = list(gfile.Glob(file_pattern))
if not file_names:
raise ValueError('No files match %s.' % file_pattern)
# Sort files so it will be deterministic for unit tests. They'll be shuffled
# in `string_input_producer` if `randomize_input` is enabled.
if not randomize_input:
file_names = sorted(file_names)
return file_names
def _get_examples(file_name_queue, reader, num_threads, read_batch_size,
parse_fn):
with ops.name_scope('read'):
example_list = []
for _ in range(num_threads):
if read_batch_size > 1:
keys, examples_proto = reader().read_up_to(file_name_queue,
read_batch_size)
else:
keys, examples_proto = reader().read(file_name_queue)
if parse_fn:
parsed_examples = parse_fn(examples_proto)
# Map keys into example map because batch_join doesn't support
# tuple of Tensor + dict.
if isinstance(parsed_examples, dict):
parsed_examples[KEY_FEATURE_NAME] = keys
example_list.append(parsed_examples)
else:
example_list.append((keys, parsed_examples))
else:
example_list.append((keys, examples_proto))
return example_list
def _read_keyed_batch_examples_helper(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
parse_fn=None,
setup_shared_queue=False,
name=None):
"""Adds operations to read, queue, batch `Example` protos.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.global_variables_initializer()` as shown in the tests.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
setup_shared_queue: Whether to set up a shared queue for file names.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
# Retrieve files to read.
file_names = _get_file_names(file_pattern, randomize_input)
# Check input parameters are given and reasonable.
if (not queue_capacity) or (queue_capacity <= 0):
raise ValueError('Invalid queue_capacity %s.' % queue_capacity)
if (batch_size is None) or ((not isinstance(batch_size, ops.Tensor)) and
(batch_size <= 0 or batch_size > queue_capacity)):
raise ValueError('Invalid batch_size %s, with queue_capacity %s.' %
(batch_size, queue_capacity))
if (read_batch_size is None) or (
(not isinstance(read_batch_size, ops.Tensor)) and (read_batch_size <= 0)):
raise ValueError('Invalid read_batch_size %s.' % read_batch_size)
if (not num_threads) or (num_threads <= 0):
raise ValueError('Invalid num_threads %s.' % num_threads)
if (num_epochs is not None) and (num_epochs <= 0):
raise ValueError('Invalid num_epochs %s.' % num_epochs)
with ops.name_scope(name, 'read_batch_examples', [file_pattern]) as scope:
with ops.name_scope('file_name_queue') as file_name_queue_scope:
if setup_shared_queue:
file_name_queue = data_flow_ops.FIFOQueue(
capacity=1, dtypes=[dtypes.string], shapes=[[]])
enqueue_op = file_name_queue.enqueue(
input_pipeline_ops.seek_next(
file_names, shuffle=randomize_input, num_epochs=num_epochs))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(file_name_queue, [enqueue_op]))
else:
file_name_queue = input_ops.string_input_producer(
constant_op.constant(
file_names, name='input'),
shuffle=randomize_input,
num_epochs=num_epochs,
name=file_name_queue_scope)
example_list = _get_examples(file_name_queue, reader, num_threads,
read_batch_size, parse_fn)
enqueue_many = read_batch_size > 1
if num_epochs is None:
allow_smaller_final_batch = False
else:
allow_smaller_final_batch = True
# Setup batching queue given list of read example tensors.
if randomize_input:
if isinstance(batch_size, ops.Tensor):
min_after_dequeue = int(queue_capacity * 0.4)
else:
min_after_dequeue = max(queue_capacity - (3 * batch_size), batch_size)
queued_examples_with_keys = input_ops.shuffle_batch_join(
example_list,
batch_size,
capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=enqueue_many,
name=scope,
allow_smaller_final_batch=allow_smaller_final_batch)
else:
queued_examples_with_keys = input_ops.batch_join(
example_list,
batch_size,
capacity=queue_capacity,
enqueue_many=enqueue_many,
name=scope,
allow_smaller_final_batch=allow_smaller_final_batch)
if parse_fn and isinstance(queued_examples_with_keys, dict):
queued_keys = queued_examples_with_keys.pop(KEY_FEATURE_NAME)
return queued_keys, queued_examples_with_keys
return queued_examples_with_keys
def read_keyed_batch_features(file_pattern,
batch_size,
features,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
reader_num_threads=1,
feature_queue_capacity=100,
num_enqueue_threads=2,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch and parse `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size` and parse example given `features`
specification.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() as shown in the tests.
queue_capacity: Capacity for input queue.
reader_num_threads: The number of threads to read examples.
feature_queue_capacity: Capacity of the parsed features queue.
num_enqueue_threads: Number of threads to enqueue the parsed example queue.
Using multiple threads to enqueue the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
with ops.name_scope(name, 'read_batch_features', [file_pattern]) as scope:
keys, examples = read_keyed_batch_examples(
file_pattern,
batch_size,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=reader_num_threads,
read_batch_size=batch_size,
parse_fn=parse_fn,
name=scope)
# Parse the example.
feature_map = parsing_ops.parse_example(examples, features)
return queue_parsed_features(
feature_map,
keys=keys,
feature_queue_capacity=feature_queue_capacity,
num_enqueue_threads=num_enqueue_threads,
name=scope)
def _read_keyed_batch_features_shared_queue(file_pattern,
batch_size,
features,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
reader_num_threads=1,
feature_queue_capacity=100,
num_queue_runners=2,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch and parse `Example` protos.
Given file pattern (or list of files), will setup a shared queue for file
names, setup a worker queue that gets filenames from the shared queue,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size` and parse example given `features`
specification.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() as shown in the tests.
queue_capacity: Capacity for input queue.
reader_num_threads: The number of threads to read examples.
feature_queue_capacity: Capacity of the parsed features queue.
num_queue_runners: Number of threads to enqueue the parsed example queue.
Using multiple threads to enqueue the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
with ops.name_scope(name, 'read_batch_features', [file_pattern]) as scope:
keys, examples = _read_keyed_batch_examples_shared_queue(
file_pattern,
batch_size,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=reader_num_threads,
read_batch_size=batch_size,
parse_fn=parse_fn,
name=scope)
# Parse the example.
feature_map = parsing_ops.parse_example(examples, features)
return queue_parsed_features(
feature_map,
keys=keys,
feature_queue_capacity=feature_queue_capacity,
num_enqueue_threads=num_queue_runners,
name=scope)
def queue_parsed_features(parsed_features,
keys=None,
feature_queue_capacity=100,
num_enqueue_threads=2,
name=None):
"""Speeds up parsing by using queues to do it asynchronously.
This function adds the tensors in `parsed_features` to a queue, which allows
the parsing (or any other expensive op before this) to be asynchronous wrt the
rest of the training graph. This greatly improves read latency and speeds up
training since the data will already be parsed and ready when each step of
training needs it.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
parsed_features: A dict of string key to `Tensor` or `SparseTensor` objects.
keys: `Tensor` of string keys.
feature_queue_capacity: Capacity of the parsed features queue.
num_enqueue_threads: Number of threads to enqueue the parsed example queue.
Using multiple threads to enqueue the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` corresponding to `keys` if provided, otherwise `None`.
- A dict of string key to `Tensor` or `SparseTensor` objects corresponding
to `parsed_features`.
Raises:
ValueError: for invalid inputs.
"""
args = list(parsed_features.values())
if keys is not None:
args += [keys]
with ops.name_scope(name, 'queue_parsed_features', args):
# Lets also add preprocessed tensors into the queue types for each item of
# the queue.
tensors_to_enqueue = []
# Each entry contains the key, and a boolean which indicates whether the
# tensor was a sparse tensor.
tensors_mapping = []
# TODO(sibyl-Aix6ihai): Most of the functionality here is about pushing sparse
# tensors into a queue. This could be taken care in somewhere else so others
# can reuse it. Also, QueueBase maybe extended to handle sparse tensors
# directly.
for key in sorted(parsed_features.keys()):
tensor = parsed_features[key]
if isinstance(tensor, sparse_tensor.SparseTensor):
tensors_mapping.append((key, True))
tensors_to_enqueue.extend(
[tensor.indices, tensor.values, tensor.dense_shape])
else:
tensors_mapping.append((key, False))
tensors_to_enqueue.append(tensor)
if keys is not None:
tensors_to_enqueue.append(keys)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(feature_queue_capacity, queue_dtypes)
# Add a summary op to debug if our feature queue is full or not.
summary.scalar('queue/parsed_features/%s/fraction_of_%d_full' %
(input_queue.name, feature_queue_capacity),
math_ops.cast(input_queue.size(), dtypes.float32) *
(1. / feature_queue_capacity))
# Use a single QueueRunner with multiple threads to enqueue so the queue is
# always full. The threads are coordinated so the last batch will not be
# lost.
enqueue_ops = [
input_queue.enqueue(tensors_to_enqueue)
for _ in range(num_enqueue_threads)
]
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
input_queue,
enqueue_ops,
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
dequeued_tensors = input_queue.dequeue()
# Reset shapes on dequeued tensors.
for i in range(len(tensors_to_enqueue)):
dequeued_tensors[i].set_shape(tensors_to_enqueue[i].get_shape())
# Recreate feature mapping according to the original dictionary.
dequeued_parsed_features = {}
index = 0
for key, is_sparse_tensor in tensors_mapping:
if is_sparse_tensor:
# Three tensors are (indices, values, shape).
dequeued_parsed_features[key] = sparse_tensor.SparseTensor(
dequeued_tensors[index], dequeued_tensors[index + 1],
dequeued_tensors[index + 2])
index += 3
else:
dequeued_parsed_features[key] = dequeued_tensors[index]
index += 1
dequeued_keys = None
if keys is not None:
dequeued_keys = dequeued_tensors[-1]
return dequeued_keys, dequeued_parsed_features
def read_batch_features(file_pattern,
batch_size,
features,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
feature_queue_capacity=100,
reader_num_threads=1,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch and parse `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size` and parse example given `features`
specification.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() as shown in the tests.
queue_capacity: Capacity for input queue.
feature_queue_capacity: Capacity of the parsed features queue. Set this
value to a small number, for example 5 if the parsed features are large.
reader_num_threads: The number of threads to read examples.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
_, features = read_keyed_batch_features(
file_pattern,
batch_size,
features,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
feature_queue_capacity=feature_queue_capacity,
reader_num_threads=reader_num_threads,
parse_fn=parse_fn,
name=name)
return features
def read_batch_record_features(file_pattern,
batch_size,
features,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
reader_num_threads=1,
name='dequeue_record_examples'):
"""Reads TFRecord, queues, batches and parses `Example` proto.
See more detailed description in `read_examples`.
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() as shown in the tests.
queue_capacity: Capacity for input queue.
reader_num_threads: The number of threads to read examples.
name: Name of resulting op.
Returns:
A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
return read_batch_features(
file_pattern=file_pattern,
batch_size=batch_size,
features=features,
reader=io_ops.TFRecordReader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
reader_num_threads=reader_num_threads,
name=name)
| {
"content_hash": "1bc308ee1c7ae169a1c79b3bc16eb3e1",
"timestamp": "",
"source": "github",
"line_count": 773,
"max_line_length": 82,
"avg_line_length": 40.746442432082794,
"alnum_prop": 0.6378702733593675,
"repo_name": "elingg/tensorflow",
"id": "7ab6aafdf39e754d09ca81ad75c884cc4603774a",
"size": "32186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/learn_io/graph_io.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6963"
},
{
"name": "C",
"bytes": "126495"
},
{
"name": "C++",
"bytes": "20090320"
},
{
"name": "CMake",
"bytes": "111800"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "96872"
},
{
"name": "HTML",
"bytes": "538462"
},
{
"name": "Java",
"bytes": "215285"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "4068483"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "29647"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Python",
"bytes": "16219111"
},
{
"name": "Shell",
"bytes": "314152"
},
{
"name": "TypeScript",
"bytes": "761620"
}
],
"symlink_target": ""
} |
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.21
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ProductVideoListResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, data=None, pagination=None):
"""
ProductVideoListResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'data': 'list[Video]',
'pagination': 'Pagination'
}
self.attribute_map = {
'data': 'data',
'pagination': 'pagination'
}
self._data = data
self._pagination = pagination
@property
def data(self):
"""
Gets the data of this ProductVideoListResponse.
:return: The data of this ProductVideoListResponse.
:rtype: list[Video]
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this ProductVideoListResponse.
:param data: The data of this ProductVideoListResponse.
:type: list[Video]
"""
self._data = data
@property
def pagination(self):
"""
Gets the pagination of this ProductVideoListResponse.
:return: The pagination of this ProductVideoListResponse.
:rtype: Pagination
"""
return self._pagination
@pagination.setter
def pagination(self, pagination):
"""
Sets the pagination of this ProductVideoListResponse.
:param pagination: The pagination of this ProductVideoListResponse.
:type: Pagination
"""
self._pagination = pagination
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| {
"content_hash": "7169945930242b1dfed51d8672b25ed8",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 77,
"avg_line_length": 26.074626865671643,
"alnum_prop": 0.5323411562678878,
"repo_name": "kinow-io/kinow-python-sdk",
"id": "e47fdb1e749f136c0b53cb2875b12c7a26e8488c",
"size": "3511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kinow_client/models/product_video_list_response.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4659182"
},
{
"name": "Shell",
"bytes": "1666"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from fnmatch import fnmatch
from functools import wraps, partial
from glob import glob
from math import ceil
from operator import getitem
import os
import re
from threading import Lock
import uuid
import pandas as pd
import numpy as np
from toolz import merge, assoc, dissoc
from ..compatibility import BytesIO, unicode, range, apply
from ..utils import textblock, file_size, get_bom
from ..base import tokenize
from .. import array as da
from ..async import get_sync
from . import core
from .core import DataFrame, Series
from .shuffle import set_partition
lock = Lock()
csv_defaults = {'compression': None}
def _read_csv(fn, i, chunkbytes, compression, kwargs, bom):
block = textblock(fn, i*chunkbytes, (i+1) * chunkbytes, compression,
encoding=kwargs.get('encoding'))
block = BytesIO(bom + block)
try:
return pd.read_csv(block, **kwargs)
except ValueError as e:
msg = """
Dask dataframe inspected the first 1,000 rows of your csv file to guess the
data types of your columns. These first 1,000 rows led us to an incorrect
guess.
For example a column may have had integers in the first 1000
rows followed by a float or missing value in the 1,001-st row.
You will need to specify some dtype information explicitly using the
``dtype=`` keyword argument for the right column names and dtypes.
df = dd.read_csv(..., dtype={'my-column': float})
Pandas has given us the following error when trying to parse the file:
"%s"
""" % e.args[0]
match = re.match('cannot safely convert passed user dtype of (?P<old_dtype>\S+) for (?P<new_dtype>\S+) dtyped data in column (?P<column_number>\d+)', e.args[0])
if match:
d = match.groupdict()
d['column'] = kwargs['names'][int(d['column_number'])]
msg += """
From this we think that you should probably add the following column/dtype
pair to your dtype= dictionary
'%(column)s': '%(new_dtype)s'
""" % d
# TODO: add more regexes and msg logic here for other pandas errors
# as apporpriate
raise ValueError(msg)
def clean_kwargs(kwargs):
""" Do some sanity checks on kwargs
>>> clean_kwargs({'parse_dates': ['a', 'b'], 'usecols': ['b', 'c']})
{'parse_dates': ['b'], 'usecols': ['b', 'c']}
>>> clean_kwargs({'names': ['a', 'b'], 'usecols': [1]})
{'parse_dates': [], 'names': ['a', 'b'], 'usecols': ['b']}
"""
kwargs = kwargs.copy()
if 'usecols' in kwargs and 'names' in kwargs:
kwargs['usecols'] = [kwargs['names'][c]
if isinstance(c, int) and c not in kwargs['names']
else c
for c in kwargs['usecols']]
kwargs['parse_dates'] = [col for col in kwargs.get('parse_dates', ())
if kwargs.get('usecols') is None
or isinstance(col, (tuple, list)) and all(c in kwargs['usecols']
for c in col)
or col in kwargs['usecols']]
return kwargs
def fill_kwargs(fn, **kwargs):
""" Read a csv file and fill up kwargs
This normalizes kwargs against a sample file. It does the following:
1. If given a globstring, just use one file
2. Get names from csv file if not given
3. Identify the presence of a header
4. Identify dtypes
5. Establish column names
6. Switch around dtypes and column names if parse_dates is active
Normally ``pd.read_csv`` does this for us. However for ``dd.read_csv`` we
need to be consistent across multiple files and don't want to do these
heuristics each time so we use the pandas solution once, record the
results, and then send back a fully explicit kwargs dict to send to future
calls to ``pd.read_csv``.
Returns
-------
kwargs: dict
keyword arguments to give to pd.read_csv
"""
if 'index_col' in kwargs:
msg = """
The index column cannot be set at dataframe creation time. Instead use
the `set_index` method on the dataframe after it is created.
"""
raise ValueError(msg)
kwargs = merge(csv_defaults, kwargs)
sample_nrows = kwargs.pop('sample_nrows', 1000)
essentials = ['columns', 'names', 'header', 'parse_dates', 'dtype']
if set(essentials).issubset(kwargs):
return kwargs
# Let pandas infer on the first 100 rows
if '*' in fn:
filenames = sorted(glob(fn))
if not filenames:
raise ValueError("No files found matching name %s" % fn)
fn = filenames[0]
if 'names' not in kwargs:
kwargs['names'] = csv_names(fn, **kwargs)
if 'header' not in kwargs:
kwargs['header'] = 0 if infer_header(fn, **kwargs) else None
kwargs = clean_kwargs(kwargs)
try:
head = pd.read_csv(fn, **assoc(kwargs, 'nrows', sample_nrows))
except StopIteration:
head = pd.read_csv(fn, **kwargs)
if 'parse_dates' not in kwargs:
kwargs['parse_dates'] = [col for col in head.dtypes.index
if np.issubdtype(head.dtypes[col], np.datetime64)]
new_dtype = dict(head.dtypes)
dtype = kwargs.get('dtype', dict())
for k, v in dict(head.dtypes).items():
if k not in dtype:
dtype[k] = v
if kwargs.get('parse_dates'):
for col in kwargs['parse_dates']:
del dtype[col]
kwargs['dtype'] = dtype
return head.columns, kwargs
@wraps(pd.read_csv)
def read_csv(fn, **kwargs):
if 'nrows' in kwargs: # Just create single partition
df = read_csv(fn, **dissoc(kwargs, 'nrows'))
return df.head(kwargs['nrows'], compute=False)
chunkbytes = kwargs.pop('chunkbytes', 2**25) # 50 MB
index = kwargs.pop('index', None)
kwargs = kwargs.copy()
columns, kwargs = fill_kwargs(fn, **kwargs)
# Handle glob strings
if '*' in fn:
from .multi import concat
return concat([read_csv(f, **kwargs) for f in sorted(glob(fn))])
token = tokenize(os.path.getmtime(fn), kwargs)
name = 'read-csv-%s-%s' % (fn, token)
bom = get_bom(fn)
# Chunk sizes and numbers
total_bytes = file_size(fn, kwargs['compression'])
nchunks = int(ceil(total_bytes / chunkbytes))
divisions = [None] * (nchunks + 1)
first_kwargs = merge(kwargs, dict(compression=None))
rest_kwargs = merge(kwargs, dict(header=None, compression=None))
# Create dask graph
dsk = dict(((name, i), (_read_csv, fn, i, chunkbytes,
kwargs['compression'], rest_kwargs,
bom))
for i in range(1, nchunks))
dsk[(name, 0)] = (_read_csv, fn, 0, chunkbytes, kwargs['compression'],
first_kwargs, b'')
result = DataFrame(dsk, name, columns, divisions)
if index:
result = result.set_index(index)
return result
def infer_header(fn, **kwargs):
""" Guess if csv file has a header or not
This uses Pandas to read a sample of the file, then looks at the column
names to see if they are all phrase-like (words, potentially with spaces
in between.)
Returns True or False
"""
# See read_csv docs for header for reasoning
kwargs.update(dict(nrows=5, names=None, parse_dates=None))
try:
df = pd.read_csv(fn, **kwargs)
except StopIteration:
kwargs['nrows'] = None
df = pd.read_csv(fn, **kwargs)
return (len(df) > 0 and
all(re.match('^\s*\D[\w ]*\s*$', n) for n in df.columns) and
not all(dt == 'O' for dt in df.dtypes))
def csv_names(fn, encoding='utf-8', compression=None, names=None,
parse_dates=None, usecols=None, dtype=None, **kwargs):
try:
kwargs['nrows'] = 5
df = pd.read_csv(fn, encoding=encoding, compression=compression,
names=names, parse_dates=parse_dates, **kwargs)
except StopIteration:
kwargs['nrows'] = None
df = pd.read_csv(fn, encoding=encoding, compression=compression,
names=names, parse_dates=parse_dates, **kwargs)
return list(df.columns)
def from_array(x, chunksize=50000, columns=None):
""" Read dask Dataframe from any slicable array
Uses getitem syntax to pull slices out of the array. The array need not be
a NumPy array but must support slicing syntax
x[50000:100000]
and have 2 dimensions:
x.ndim == 2
or have a record dtype:
x.dtype == [('name', 'O'), ('balance', 'i8')]
"""
has_record_dtype = getattr(x.dtype, 'names', None) is not None
if x.ndim > 2:
raise ValueError('from_array does not input more than 2D array, got'
' array with shape %r' % (x.shape,))
if columns is None:
if has_record_dtype:
columns = tuple(x.dtype.names) # record array has named columns
elif x.ndim == 2:
columns = [str(i) for i in range(x.shape[1])]
if isinstance(x, da.Array):
return from_dask_array(x, columns=columns)
divisions = tuple(range(0, len(x), chunksize))
if divisions[-1] != len(x) - 1:
divisions = divisions + (len(x) - 1,)
token = tokenize(x, chunksize, columns)
name = 'from_array-' + token
dsk = dict(((name, i), (pd.DataFrame,
(getitem, x,
slice(i * chunksize, (i + 1) * chunksize))))
for i in range(0, int(ceil(len(x) / chunksize))))
return DataFrame(dsk, name, columns, divisions)
def from_pandas(data, npartitions, sort=True):
"""Construct a dask object from a pandas object.
If given a ``pandas.Series`` a ``dask.Series`` will be returned. If given a
``pandas.DataFrame`` a ``dask.DataFrame`` will be returned. All other
pandas objects will raise a ``TypeError``.
Parameters
----------
df : pandas.DataFrame or pandas.Series
The DataFrame/Series with which to construct a dask DataFrame/Series
npartitions : int
The number of partitions of the index to create
Returns
-------
dask.DataFrame or dask.Series
A dask DataFrame/Series partitioned along the index
Examples
--------
>>> df = pd.DataFrame(dict(a=list('aabbcc'), b=list(range(6))),
... index=pd.date_range(start='20100101', periods=6))
>>> ddf = from_pandas(df, npartitions=3)
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', offset='D'),
Timestamp('2010-01-03 00:00:00', offset='D'),
Timestamp('2010-01-05 00:00:00', offset='D'),
Timestamp('2010-01-06 00:00:00', offset='D'))
>>> ddf = from_pandas(df.a, npartitions=3) # Works with Series too!
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', offset='D'),
Timestamp('2010-01-03 00:00:00', offset='D'),
Timestamp('2010-01-05 00:00:00', offset='D'),
Timestamp('2010-01-06 00:00:00', offset='D'))
Raises
------
TypeError
If something other than a ``pandas.DataFrame`` or ``pandas.Series`` is
passed in.
See Also
--------
from_array : Construct a dask.DataFrame from an array that has record dtype
from_bcolz : Construct a dask.DataFrame from a bcolz ctable
read_csv : Construct a dask.DataFrame from a CSV file
"""
columns = getattr(data, 'columns', getattr(data, 'name', None))
if columns is None and not isinstance(data, pd.Series):
raise TypeError("Input must be a pandas DataFrame or Series")
nrows = len(data)
chunksize = int(ceil(nrows / npartitions))
if sort and not data.index.is_monotonic_increasing:
data = data.sort_index(ascending=True)
if sort:
divisions = tuple(data.index[i]
for i in range(0, nrows, chunksize))
divisions = divisions + (data.index[-1],)
else:
divisions = [None] * (npartitions + 1)
name = 'from_pandas-' + tokenize(data, chunksize)
dsk = dict(((name, i), data.iloc[i * chunksize:(i + 1) * chunksize])
for i in range(npartitions - 1))
dsk[(name, npartitions - 1)] = data.iloc[chunksize*(npartitions - 1):]
return getattr(core, type(data).__name__)(dsk, name, columns, divisions)
def from_bcolz(x, chunksize=None, categorize=True, index=None, **kwargs):
""" Read dask Dataframe from bcolz.ctable
Parameters
----------
x : bcolz.ctable
Input data
chunksize : int (optional)
The size of blocks to pull out from ctable. Ideally as large as can
comfortably fit in memory
categorize : bool (defaults to True)
Automatically categorize all string dtypes
index : string (optional)
Column to make the index
See Also
--------
from_array: more generic function not optimized for bcolz
"""
import dask.array as da
import bcolz
if isinstance(x, (str, unicode)):
x = bcolz.ctable(rootdir=x)
bc_chunklen = max(x[name].chunklen for name in x.names)
if chunksize is None and bc_chunklen > 10000:
chunksize = bc_chunklen
categories = dict()
if categorize:
for name in x.names:
if (np.issubdtype(x.dtype[name], np.string_) or
np.issubdtype(x.dtype[name], np.unicode_) or
np.issubdtype(x.dtype[name], np.object_)):
a = da.from_array(x[name], chunks=(chunksize * len(x.names),))
categories[name] = da.unique(a)
columns = tuple(x.dtype.names)
divisions = (0,) + tuple(range(-1, len(x), chunksize))[1:]
if divisions[-1] != len(x) - 1:
divisions = divisions + (len(x) - 1,)
if x.rootdir:
token = tokenize((x.rootdir, os.path.getmtime(x.rootdir)), chunksize,
categorize, index, kwargs)
else:
token = tokenize((id(x), x.shape, x.dtype), chunksize, categorize,
index, kwargs)
new_name = 'from_bcolz-' + token
dsk = dict(((new_name, i),
(locked_df_from_ctable,
x,
(slice(i * chunksize, (i + 1) * chunksize),),
columns, categories))
for i in range(0, int(ceil(len(x) / chunksize))))
result = DataFrame(dsk, new_name, columns, divisions)
if index:
assert index in x.names
a = da.from_array(x[index], chunks=(chunksize * len(x.names),))
q = np.linspace(0, 100, len(x) // chunksize + 2)
divisions = da.percentile(a, q).compute()
return set_partition(result, index, divisions, **kwargs)
else:
return result
def dataframe_from_ctable(x, slc, columns=None, categories=None):
""" Get DataFrame from bcolz.ctable
Parameters
----------
x: bcolz.ctable
slc: slice
columns: list of column names or None
>>> import bcolz
>>> x = bcolz.ctable([[1, 2, 3, 4], [10, 20, 30, 40]], names=['a', 'b'])
>>> dataframe_from_ctable(x, slice(1, 3))
a b
0 2 20
1 3 30
>>> dataframe_from_ctable(x, slice(1, 3), columns=['b'])
b
0 20
1 30
>>> dataframe_from_ctable(x, slice(1, 3), columns='b')
0 20
1 30
Name: b, dtype: int...
"""
import bcolz
if columns is None:
columns = x.dtype.names
if isinstance(columns, tuple):
columns = list(columns)
x = x[columns]
if isinstance(x, bcolz.ctable):
chunks = [x[name][slc] for name in columns]
if categories is not None:
chunks = [pd.Categorical.from_codes(np.searchsorted(categories[name],
chunk),
categories[name], True)
if name in categories else chunk
for name, chunk in zip(columns, chunks)]
return pd.DataFrame(dict(zip(columns, chunks)), columns=columns)
elif isinstance(x, bcolz.carray):
chunk = x[slc]
if categories is not None and columns and columns in categories:
chunk = pd.Categorical.from_codes(
np.searchsorted(categories[columns], chunk),
categories[columns], True)
return pd.Series(chunk, name=columns)
def locked_df_from_ctable(*args, **kwargs):
with lock:
result = dataframe_from_ctable(*args, **kwargs)
return result
def from_dask_array(x, columns=None):
""" Convert dask Array to dask DataFrame
Converts a 2d array into a DataFrame and a 1d array into a Series.
Parameters
----------
x: da.Array
columns: list or string
list of column names if DataFrame, single string if Series
Examples
--------
>>> import dask.array as da
>>> import dask.dataframe as dd
>>> x = da.ones((4, 2), chunks=(2, 2))
>>> df = dd.io.from_dask_array(x, columns=['a', 'b'])
>>> df.compute()
a b
0 1 1
1 1 1
2 1 1
3 1 1
"""
name = 'from-dask-array' + tokenize(x, columns)
divisions = [0]
for c in x.chunks[0]:
divisions.append(divisions[-1] + c)
index = [(range, a, b) for a, b in zip(divisions[:-1], divisions[1:])]
divisions[-1] -= 1
if x.ndim == 1:
if x.dtype.names is None:
dsk = dict(((name, i), (pd.Series, chunk, ind, x.dtype, columns))
for i, (chunk, ind) in enumerate(zip(x._keys(), index)))
return Series(merge(x.dask, dsk), name, columns, divisions)
else:
if columns is None:
columns = x.dtype.names
dsk = dict(((name, i), (pd.DataFrame, chunk, ind, columns))
for i, (chunk, ind) in enumerate(zip(x._keys(), index)))
return DataFrame(merge(x.dask, dsk), name, columns, divisions)
elif x.ndim == 2:
if columns is None:
raise ValueError("Must provide columns for DataFrame")
if len(columns) != x.shape[1]:
raise ValueError("Columns must be the same length as array width\n"
" columns: %s\n width: %d" % (str(columns), x.shape[1]))
if len(x.chunks[1]) > 1:
x = x.rechunk({1: x.shape[1]})
dsk = dict(((name, i), (pd.DataFrame, chunk[0], ind, columns))
for i, (chunk, ind) in enumerate(zip(x._keys(), index)))
return DataFrame(merge(x.dask, dsk), name, columns, divisions)
else:
raise ValueError("Array must have one or two dimensions. Had %d" %
x.ndim)
def from_castra(x, columns=None):
"""Load a dask DataFrame from a Castra.
Parameters
----------
x : filename or Castra
columns: list or string, optional
The columns to load. Default is all columns.
"""
from castra import Castra
if not isinstance(x, Castra):
x = Castra(x, readonly=True)
return x.to_dask(columns)
def _link(token, result):
""" A dummy function to link results together in a graph
We use this to enforce an artificial sequential ordering on tasks that
don't explicitly pass around a shared resource
"""
return None
@wraps(pd.DataFrame.to_hdf)
def to_hdf(df, path_or_buf, key, mode='a', append=False, complevel=0,
complib=None, fletcher32=False, get=get_sync, **kwargs):
name = 'to-hdf-' + uuid.uuid1().hex
pd_to_hdf = getattr(df._partition_type, 'to_hdf')
dsk = dict()
dsk[(name, 0)] = (_link, None,
(apply, pd_to_hdf,
(tuple, [(df._name, 0), path_or_buf, key]),
{'mode': mode, 'format': 'table', 'append': append,
'complevel': complevel, 'complib': complib,
'fletcher32': fletcher32}))
for i in range(1, df.npartitions):
dsk[(name, i)] = (_link, (name, i - 1),
(apply, pd_to_hdf,
(tuple, [(df._name, i), path_or_buf, key]),
{'mode': 'a', 'format': 'table', 'append': True,
'complevel': complevel, 'complib': complib,
'fletcher32': fletcher32}))
DataFrame._get(merge(df.dask, dsk), (name, df.npartitions - 1),
get=get_sync, **kwargs)
dont_use_fixed_error_message = """
This HDFStore is not partitionable and can only be use monolithically with
pandas. In the future when creating HDFStores use the ``format='table'``
option to ensure that your dataset can be parallelized"""
read_hdf_error_msg = """
The start and stop keywords are not supported when reading from more than
one file/dataset.
The combination is ambiguous because it could be interpreted as the starting
and stopping index per file, or starting and stopping index of the global
dataset."""
def _read_single_hdf(path, key, start=0, stop=None, columns=None,
chunksize=int(1e6), lock=None):
"""
Read a single hdf file into a dask.dataframe. Used for each file in
read_hdf.
"""
def get_keys_and_stops(path, key, stop):
"""
Get the "keys" or group identifiers which match the given key, which
can contain wildcards. This uses the hdf file identified by the
given path. Also get the index of the last row of data for each matched
key.
"""
with pd.HDFStore(path) as hdf:
keys = [k for k in hdf.keys() if fnmatch(k, key)]
stops = []
for k in keys:
storer = hdf.get_storer(k)
if storer.format_type != 'table':
raise TypeError(dont_use_fixed_error_message)
if stop is None:
stops.append(storer.nrows)
else:
stops.append(stop)
return keys, stops
def one_path_one_key(path, key, start, stop, columns, chunksize, lock):
"""
Get the data frame corresponding to one path and one key (which should
not contain any wildcards).
"""
if columns is None:
columns = list(pd.read_hdf(path, key, stop=0).columns)
token = tokenize((path, os.path.getmtime(path), key, start,
stop, columns, chunksize))
name = 'read-hdf-' + token
dsk = dict(((name, i), (_pd_read_hdf, path, key, lock,
{'start': s,
'stop': s + chunksize,
'columns': columns}))
for i, s in enumerate(range(start, stop, chunksize)))
divisions = [None] * (len(dsk) + 1)
return DataFrame(dsk, name, columns, divisions)
if lock is True:
lock = Lock()
keys, stops = get_keys_and_stops(path, key, stop)
if (start != 0 or stop is not None) and len(keys) > 1:
raise NotImplementedError(read_hdf_error_msg)
from .multi import concat
return concat([one_path_one_key(path, k, start, s, columns, chunksize, lock)
for k, s in zip(keys, stops)])
def _pd_read_hdf(path, key, lock, kwargs):
""" Read from hdf5 file with a lock """
if lock:
lock.acquire()
try:
result = pd.read_hdf(path, key, **kwargs)
finally:
if lock:
lock.release()
return result
@wraps(pd.read_hdf)
def read_hdf(pattern, key, start=0, stop=None, columns=None,
chunksize=1000000, lock=True):
"""
Read hdf files into a dask dataframe. Like pandas.read_hdf, except it we
can read multiple files, and read multiple keys from the same file by using
pattern matching.
Parameters
----------
pattern : pattern (string), or buffer to read from. Can contain wildcards
key : group identifier in the store. Can contain wildcards
start : optional, integer (defaults to 0), row number to start at
stop : optional, integer (defaults to None, the last row), row number to
stop at
columns : optional, a list of columns that if not None, will limit the
return columns
chunksize : optional, nrows to include in iteration, return an iterator
Returns
-------
dask.DataFrame
Examples
--------
Load single file
>>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP
Load multiple files
>>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP
Load multiple datasets
>>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP
"""
paths = sorted(glob(pattern))
if (start != 0 or stop is not None) and len(paths) > 1:
raise NotImplementedError(read_hdf_error_msg)
from .multi import concat
return concat([_read_single_hdf(path, key, start=start, stop=stop,
columns=columns, chunksize=chunksize,
lock=lock)
for path in paths])
def to_castra(df, fn=None, categories=None, sorted_index_column=None,
compute=True):
""" Write DataFrame to Castra on-disk store
See https://github.com/blosc/castra for details
See Also
--------
Castra.to_dask
"""
from castra import Castra
if isinstance(categories, list):
categories = (list, categories)
name = 'to-castra-' + uuid.uuid1().hex
if sorted_index_column:
set_index = lambda x: x.set_index(sorted_index_column)
func = lambda part: (set_index, part)
else:
func = lambda part: part
dsk = dict()
dsk[(name, -1)] = (Castra, fn, func((df._name, 0)), categories)
for i in range(0, df.npartitions):
dsk[(name, i)] = (_link, (name, i - 1),
(Castra.extend, (name, -1), func((df._name, i))))
dsk = merge(dsk, df.dask)
keys = [(name, -1), (name, df.npartitions - 1)]
if compute:
c, _ = DataFrame._get(dsk, keys, get=get_sync)
return c
else:
return dsk, keys
def to_csv(df, filename, compression=None, **kwargs):
if compression:
raise NotImplementedError("Writing compressed csv files not supported")
myget = kwargs.pop('get', None)
name = 'to-csv-' + uuid.uuid1().hex
dsk = dict()
dsk[(name, 0)] = (lambda df, fn, kwargs: df.to_csv(fn, **kwargs),
(df._name, 0), filename, kwargs)
kwargs2 = kwargs.copy()
kwargs2['mode'] = 'a'
kwargs2['header'] = False
for i in range(1, df.npartitions):
dsk[(name, i)] = (_link, (name, i - 1),
(lambda df, fn, kwargs: df.to_csv(fn, **kwargs),
(df._name, i), filename, kwargs2))
DataFrame._get(merge(dsk, df.dask), (name, df.npartitions - 1), get=myget)
def to_bag(df, index=False):
from ..bag.core import Bag
if isinstance(df, DataFrame):
func = lambda df: list(df.itertuples(index))
elif isinstance(df, Series):
func = (lambda df: list(df.iteritems())) if index else list
else:
raise TypeError("df must be either DataFrame or Series")
name = 'to_bag-' + tokenize(df, index)
dsk = dict(((name, i), (func, block)) for (i, block) in enumerate(df._keys()))
dsk.update(df._optimize(df.dask, df._keys()))
return Bag(dsk, name, df.npartitions)
| {
"content_hash": "f2e5139faf716b1a29678aa1daa909df",
"timestamp": "",
"source": "github",
"line_count": 809,
"max_line_length": 168,
"avg_line_length": 33.982694684796044,
"alnum_prop": 0.5793321693583589,
"repo_name": "pombredanne/dask",
"id": "42819bbb3f41ece0d1799110af9cb99d40dcaccd",
"size": "27492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dask/dataframe/io.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "Python",
"bytes": "895298"
},
{
"name": "Shell",
"bytes": "36"
}
],
"symlink_target": ""
} |
import hazelcast
from hazelcast.core import HazelcastJsonValue
from hazelcast.predicate import less_or_equal
from hazelcast.projection import single_attribute, multi_attribute
client = hazelcast.HazelcastClient()
people = client.get_map("people").blocking()
people.put_all(
{
1: HazelcastJsonValue({"name": "Philip", "age": 46}),
2: HazelcastJsonValue({"name": "Elizabeth", "age": 44}),
3: HazelcastJsonValue({"name": "Henry", "age": 13}),
4: HazelcastJsonValue({"name": "Paige", "age": 15}),
}
)
names = people.project(single_attribute("name"))
print("Names of the people are %s." % names)
children_names = people.project(single_attribute("name"), less_or_equal("age", 18))
print("Names of the children are %s." % children_names)
names_and_ages = people.project(multi_attribute("name", "age"))
print("Names and ages of the people are %s." % names_and_ages)
client.shutdown()
| {
"content_hash": "c58157ee4823865ef8df41da7a1040a4",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 83,
"avg_line_length": 31.862068965517242,
"alnum_prop": 0.6883116883116883,
"repo_name": "hazelcast/hazelcast-python-client",
"id": "d2254425b950d259c6139888288e0bc7d18b4d70",
"size": "924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/projections/projections_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2300326"
},
{
"name": "Shell",
"bytes": "1900"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import base64
from django.conf import settings
from django.db import models
from django.db import transaction
from django.db.models import Q
import logging
import us
import uuid
from polling.models import CANDIDATES_ADVOCATED
STATES = [(state.name, state.name) for state in us.STATES]
logger = logging.getLogger(__name__)
class PairProposalManager(models.Manager):
def pending(self):
return self.get_queryset().filter(
date_confirmed__isnull=True,
date_rejected__isnull=True)
def confirmed(self):
return self.get_queryset().filter(date_confirmed__isnull=False)
def rejected(self):
return self.get_queryset().filter(date_rejected__isnull=False)
class PairProposal(models.Model):
from_profile = models.ForeignKey(
'Profile',
on_delete=models.SET_NULL,
null=True,
related_name='proposals_made')
to_profile = models.ForeignKey(
'Profile',
on_delete=models.SET_NULL,
null=True,
related_name='proposals_received')
ref_id = models.UUIDField(primary_key=False, default=uuid.uuid4)
date_proposed = models.DateTimeField(auto_now_add=True)
date_confirmed = models.DateTimeField(null=True)
date_rejected = models.DateTimeField(null=True)
reason_rejected = models.TextField(null=True, blank=True)
objects = PairProposalManager()
def __repr__(self):
return "<PairProposal: from:{from_p} to:{to_p} at:{when}>".format(
from_p=repr(self.from_profile),
to_p=repr(self.to_profile),
when=self.date_proposed.isoformat())
class ProfileManager(models.Manager):
def get_queryset(self):
return (super(ProfileManager, self).get_queryset()
.select_related('user')
.prefetch_related('_paired_with__user'))
def unpaired(self):
return self.get_queryset().filter(_paired_with=None)
class SignUpLog(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, null=True)
referer = models.CharField(max_length=255, null=True)
ip = models.CharField(max_length=255, null=True)
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, null=True)
# TODO: populate fb_name with facebook name
fb_name = models.CharField(max_length=255, null=True)
fb_id = models.CharField(max_length=255, null=True)
state = models.CharField(max_length=255, choices=STATES, null=True)
preferred_candidate = models.CharField(
max_length=255, choices=CANDIDATES_ADVOCATED, null=True)
reason = models.TextField(null=True, blank=True) # Why a user is swapping
allow_random = models.BooleanField(default=False)
# Pairing with a User might be more technically correct, but then that
# requires us to JOIN against the users table when trying to get the
# user's information we actually care about
_paired_with = models.ManyToManyField(
'self', symmetrical=True, blank=True)
friends = models.ManyToManyField('self', symmetrical=True)
objects = ProfileManager()
def clean(self):
try:
# The point of this is to attempt to decode whatever is stored in
# reason. If it has already been encoded then nothing happens. If
# it hasn't been encoded, then the decoding will fail and we'll
# encode it and save it.
if self.reason_decoded == '':
pass
except:
logger.info("Encoding reason '%s'", self.reason)
self.reason = base64.b64encode(self.reason.encode('utf-8'))
def _all_friends(self, unpaired=False):
# TODO Raw SQL query is faster
direct_friend_ids = self.friends.all().values_list('id', flat=True)
all_friend_ids = self.friends.through.objects.filter(
Q(from_profile_id=self.id) |
Q(from_profile_id__in=direct_friend_ids)).values_list(
'to_profile_id', flat=True)
if self.allow_random:
all_friend_ids = set(all_friend_ids)
all_friend_ids |= set(
Profile.objects.filter(allow_random=True)
.values_list('id', flat=True))
if unpaired:
return (Profile.objects.unpaired()
.filter(id__in=all_friend_ids)
.exclude(id=self.id))
else:
return (Profile.objects
.filter(id__in=all_friend_ids)
.exclude(id=self.id))
@property
def reason_decoded(self):
if self.reason:
return base64.b64decode(self.reason).decode('utf-8')
return ''
@property
def all_friends(self):
return self._all_friends()
@property
def all_unpaired_friends(self):
return self._all_friends(unpaired=True)
@transaction.atomic
def set_pair(self, other):
if self._paired_with.all():
self._paired_with.clear()
self._paired_with.add(other)
def get_pair(self):
pair = self._paired_with.all()
if pair:
return pair[0]
return None
paired_with = property(get_pair, set_pair)
def _is_paired(self):
return self.paired_with is not None
_is_paired.boolean = True
is_paired = property(_is_paired)
def __repr__(self):
return "<Profile: user:{user}, state:{state}, cand:{candidate}, pair:{pair}>".format( # NOQA
user=self.user,
state=self.state,
candidate=self.preferred_candidate,
pair=repr(getattr(self.paired_with, 'user', None)))
| {
"content_hash": "8544d7e5f9cb382f3f3a1577a2d6a2d6",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 101,
"avg_line_length": 34.77914110429448,
"alnum_prop": 0.6295642970541542,
"repo_name": "sbuss/voteswap",
"id": "50c0a4e9f5d532b2e175b783f7a3945e1771f9cd",
"size": "5669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "users/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "200347"
},
{
"name": "HTML",
"bytes": "159385"
},
{
"name": "JavaScript",
"bytes": "120612"
},
{
"name": "Makefile",
"bytes": "1227"
},
{
"name": "Python",
"bytes": "11135152"
},
{
"name": "Shell",
"bytes": "931"
}
],
"symlink_target": ""
} |
import os
import sys
import json
import time
import datetime
import string
import re
import math
import commands
import logging
import logging.handlers
import threading
import threadpool
# configure logs
NOW_TIME = datetime.datetime.now()
STR_NOW = NOW_TIME.strftime("%Y-%m-%d_%H:%M:%S")
commands.getstatusoutput('mkdir -p %s/log' %(sys.path[0]))
LOG_FILE = '%s/log/del_obj_%s.log' %(sys.path[0], STR_NOW)
file_handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=1024*1024, backupCount=5)
file_formatter = logging.Formatter('%(asctime)s %(filename)s:%(lineno)s %(levelname)s - %(message)s')
file_handler.setFormatter(file_formatter)
log.addHandler(file_handler)
#console_handler = logging.StreamHandler()
#console_formatter = logging.Formatter('%(asctime)s %(filename)s:%(lineno)s %(levelname)s - %(message)s')
#console_handler.setFormatter(console_formatter)
#log.addHandler(console_handler)
log = logging.getLogger('del_objects')
log.setLevel(logging.INFO)
# globle variable
RESUME_FILE = sys.path[0] + '/resume.dat'
RESUME_DATA = []
MAX_EXPIRATION = 180
NO_EXPIRATION = '0'
def Get_Buckets_List():
(status, output) = commands.getstatusoutput('radosgw-admin bucket list')
if status == 0:
output = output[output.find('['):output.rfind(']')+1]
buckets_list = json.loads(output)
log.debug("Get Buckets List:\n%s" %buckets_list)
return buckets_list
else:
log.error(output)
return None
# Get Top 1000 objects from the 'marker'
def Get_Objects_List(bucket_name, marker):
opt_marker = ""
if marker != None:
opt_marker = "--marker=%s" %marker
(status, output) = commands.getstatusoutput('radosgw-admin bucket list --bucket=%s %s' %(bucket_name, opt_marker))
if status == 0:
output = output[output.find('['):output.rfind(']')+1]
objects_list = json.loads(output)
log.debug("%s: Get Objects List for %s:\n%s" %(threading.current_thread().getName(), bucket_name, objects_list))
return objects_list
else:
log.error(output)
return None
def Get_Bucket_Stats(bucket_name):
(status, output) = commands.getstatusoutput('radosgw-admin bucket stats --bucket=%s' %bucket_name)
if status == 0:
output = output[output.find('{'):output.rfind('}')+1]
bucket_stats = json.loads(output)
log.debug("Get Stats for Bucket %s:\n%s" %(bucket_name,bucket_stats))
return bucket_stats
else:
log.error(output)
return None
def Get_User_Info(user_id):
(status, output) = commands.getstatusoutput('radosgw-admin user info --uid=%s' %user_id)
if status == 0:
output = output[output.find('{'):output.rfind('}')+1]
user_info = json.loads(output)
log.debug("Get Info for User %s:\n%s" %(user_id,user_info))
return user_info
else:
log.error(output)
return None
def Cmp_Timestamp(timestamp_now, time_obj, exp_days):
# object mtime format: 2015-03-19 12:01:33.000000Z
time_obj = time_obj.split('.')[0]
timestamp_obj = time.mktime(time.strptime(time_obj,'%Y-%m-%d %H:%M:%S'))
exp_seconds = string.atof(exp_days) * 24 * 60 * 60
log.debug('timestamp_now = %f, timestamp_obj = %f, exp_seconds = %f, dif_timestamp = %d'
%(timestamp_now, timestamp_obj, exp_seconds, timestamp_now - timestamp_obj))
if (timestamp_now - timestamp_obj) >= exp_seconds:
return True
else:
return False
# get exp_days and current timestamp
def Get_Exp_Day(bucket_name):
bucket_stats = Get_Bucket_Stats(bucket_name)
user_info = Get_User_Info(bucket_stats['owner'])
if user_info != None:
pattern = re.compile(r'^[-+]?([0-9]+(\.[0-9]+)?|\.[0-9]+)$')
match = pattern.match(user_info['display_name'])
if match:
log.debug('Get_Exp_Day Match: Bucket - %s, Owner - %s, display_name - %s' %(bucket_name, bucket_stats['owner'], user_info['display_name']))
return user_info['display_name']
else:
log.warning('Get_Exp_Day NO Match: Bucket[%s], Owner[%s], display_name[%s]' %(bucket_name, bucket_stats['owner'], user_info['display_name']))
return None
def Get_object_shadow_info(bucket_name, obj_name):
# get object stat
(status, output) = commands.getstatusoutput('radosgw-admin object stat --bucket=%s --object=%s' % (bucket_name, obj_name))
if status == 0:
output = output[output.find('{'):output.rfind('}')+1]
obj_stat = json.loads(output)
log.debug("%s: Get stat for object %s/%s:\n%s" %(threading.current_thread().getName(), bucket_name, obj_name, obj_stat))
# get shadow info
obj_size = long(obj_stat["manifest"]["obj_size"])
head_size = long(obj_stat["manifest"]["head_size"])
if (obj_size > head_size):
pool = obj_stat["manifest"]["tail_bucket"]["pool"]
prefix = obj_stat["manifest"]["prefix"]
bucket_id = obj_stat["manifest"]["tail_bucket"]["bucket_id"]
stripe_max_size = long(obj_stat["manifest"]["rules"][0]["val"]["stripe_max_size"])
shadow_num = (obj_size - head_size) / stripe_max_size + 1
shadow_prefix = bucket_id + "__shadow_" + prefix
return (pool, shadow_prefix, shadow_num)
else:
return True
else:
log.error("%s: Get stat for object %s/%s Failed - %s" %(threading.current_thread().getName(), bucket_name, obj_name, output))
return None
def Get_object_multi_shadow_info(bucket_name, obj_name):
# get object stat
(status, output) = commands.getstatusoutput('radosgw-admin object stat --bucket=%s --object=%s' % (bucket_name, obj_name))
if status == 0:
output = output[output.find('{'):output.rfind('}')+1]
obj_stat = json.loads(output)
log.debug("%s: Get stat for object %s/%s:\n%s" %(threading.current_thread().getName(), bucket_name, obj_name, obj_stat))
# get shadow objects info
head_size = float(obj_stat["manifest"]["head_size"])
if head_size != 0:
# for object without multipart
obj_size = float(obj_stat["manifest"]["obj_size"])
if (obj_size > head_size):
pool = obj_stat["manifest"]["tail_bucket"]["pool"]
prefix = obj_stat["manifest"]["prefix"]
bucket_id = obj_stat["manifest"]["tail_bucket"]["bucket_id"]
stripe_max_size = float(obj_stat["manifest"]["rules"][0]["val"]["stripe_max_size"])
shadow_num = int(math.ceil((obj_size - head_size) / stripe_max_size))
shadow_prefix = bucket_id + "__shadow_" + prefix
shadow_list = []
for s_id in range(1,shadow_num+1):
shadow_list.append('%s%d' %(shadow_prefix, s_id))
return (pool, shadow_list)
else:
return True
else:
# for multipart object
# gen prefix
pool = obj_stat["manifest"]["tail_bucket"]["pool"]
prefix = obj_stat["manifest"]["prefix"]
bucket_id = obj_stat["manifest"]["tail_bucket"]["bucket_id"]
multipart_prefix = bucket_id + "__multipart_" + prefix
shadow_prefix = bucket_id + "__shadow_" + prefix
# get num of multipart
obj_size = int(obj_stat["manifest"]["obj_size"])
part_size = float(obj_stat["manifest"]["rules"][0]["val"]["part_size"])
if (obj_size % int(part_size)) == 0:
part_num = obj_size / int(part_size) + 1
else:
part_num = int(obj_stat["manifest"]["rules"][1]["val"]["start_part_num"])
# gen shadow_num for isometric objects
stripe_max_size = float(obj_stat["manifest"]["rules"][0]["val"]["stripe_max_size"])
part_shadow_num = int(math.ceil(part_size / stripe_max_size))
shadow_list = []
# gen names for isometric objects
for m_id in range(1,part_num ):
shadow_list.append("%s.%d" %(multipart_prefix, m_id))
for s_id in range(1,part_shadow_num):
shadow_list.append("%s.%d_%d" %(shadow_prefix, m_id, s_id))
# gen names foe tail-part objects
if len(obj_stat["manifest"]["rules"]) > 1:
tail_part_size = float(obj_stat["manifest"]["rules"][1]["val"]["part_size"])
tail_stripe_max_size = float(obj_stat["manifest"]["rules"][1]["val"]["stripe_max_size"])
tail_shadow_num = int(math.ceil(tail_part_size / tail_stripe_max_size))
shadow_list.append("%s.%d" %(multipart_prefix, part_num))
for s_id in range(1, tail_shadow_num):
shadow_list.append("%s.%d_%d" %(shadow_prefix, part_num, s_id))
return (pool, shadow_list)
else:
log.error("%s: Get stat for object %s/%s Failed - %s" %(threading.current_thread().getName(), bucket_name, obj_name, output))
return None
# delete object one by one acording timestamp
def Del_Objects_ByTime(bucket_info):
bucket_name = bucket_info[0]
exp_days = bucket_info[1]
log.info("%s: Deleting objects of bucket %s" %(threading.current_thread().getName(), bucket_name))
timestamp_now = time.time()
marker = None
objects_list = Get_Objects_List(bucket_name, marker)
while objects_list != None and len(objects_list) > 0:
for obj in objects_list:
if Cmp_Timestamp(timestamp_now, obj['mtime'], exp_days):
shadow_info = Get_object_multi_shadow_info(bucket_name, obj['name'])
if shadow_info:
# delete object head
(status, output) = commands.getstatusoutput('radosgw-admin object rm --bucket=%s --object=%s' % (bucket_name, obj['name']))
if status == 0:
log_str = '%s: Deletion of %s/%s - Delete main object successfully' %(threading.current_thread().getName(), bucket_name, obj['name'])
# delete object tail
if shadow_info != True:
pool, shadow_list = shadow_info
for s_obj in shadow_list:
(status, output) = commands.getstatusoutput('rados -p %s rm %s' %(pool, s_obj))
if status != 0:
log_str += '; Delete shadow Failed - %s[%s] - %s' %(pool, s_obj, output)
log.info(log_str)
else:
log.error('%s: Deletion of %s/%s was FAILED - %s' %(threading.current_thread().getName(), bucket_name, obj['name'], output))
objects_list = Get_Objects_List(bucket_name, objects_list[len(objects_list)-1]['name'])
# add the bucket name to resume file
threadpool.lock.acquire()
file = open(RESUME_FILE, 'a')
file.write('%s\n' %bucket_name)
file.close()
threadpool.lock.release()
def GC_Process():
(status, output) = commands.getstatusoutput('radosgw-admin gc proocess')
if status == 0:
log.info("Run 'radosgw-admingc process' successfully!")
else:
log.error("Execution of 'radosgw-admingc process' was FAILED - %s" %output)
def Deletion():
log.info("START Deletion")
buckets_list = Get_Buckets_List()
if buckets_list != None and len(buckets_list) > 0:
pool = threadpool.ThreadPool(10)
for bucket_name in buckets_list:
# skip the bucket has been done deletion.
if bucket_name in RESUME_DATA:
log.debug("The bucket %s was existing in resume data." %bucket_name)
continue
# check the bucket's expiration.
exp_days = Get_Exp_Day(bucket_name)
if (exp_days == None) or (exp_days == NO_EXPIRATION):
continue
# delete overdue objects
pool.queueTask(Del_Objects_ByTime, (bucket_name,exp_days))
else:
log.info("There's no bucket in the cluster!")
pool.joinAll()
time.sleep(2)
log.info("COMPLETED Deletion")
def Initialize():
# initialize resume data
if os.path.isfile(RESUME_FILE):
log.info("The resume file %s was existing." %RESUME_FILE)
file = open(RESUME_FILE, 'r')
for line in file:
RESUME_DATA.append(line.strip('\n').strip('\r'))
file.close()
if len(RESUME_DATA) == 0:
log.warning("The resume file %s is empty." %RESUME_FILE)
log.debug("The resume data list: %s" %RESUME_DATA)
else:
(status, output) = commands.getstatusoutput('touch %s' %RESUME_FILE)
if status != 0:
log.error("Creation of resume file %s is FAILED - %s" %(RESUME_FILE,output))
sys.exit(2)
else:
log.info("Creation of resume file %s is successfully." %RESUME_FILE)
def Finalize():
# delete resume file
(status, output) = commands.getstatusoutput('rm %s' %RESUME_FILE)
if status != 0:
log.error("Deletion of resume file %s is FAILED - %s" %(RESUME_FILE,output))
else:
log.info("The reaume file %s was deleted." %RESUME_FILE)
def main():
Initialize()
Deletion()
Finalize()
if __name__ == '__main__':
script_name = "%s/%s" %(sys.path[0], os.path.basename(sys.argv[0]))
log.info("START %s" %script_name)
main()
log.info("EXIT %s" %script_name) | {
"content_hash": "06e8bbda177882f08a0acc1c36e15f35",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 144,
"avg_line_length": 39.49832775919732,
"alnum_prop": 0.6668924640135478,
"repo_name": "heroanxiaobo/Cloud-Tools",
"id": "328b1ad830ce74bf1fbf4a7a531eb17f60e18402",
"size": "11833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "del_s3_obj/del_s3_objs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17174"
}
],
"symlink_target": ""
} |
import copy
from functools import wraps
import json
import sys
import django
from django.contrib.admin.helpers import AdminForm
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin, messages
from django.contrib.admin.models import LogEntry, CHANGE
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.utils import get_deleted_objects, quote
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import (MultipleObjectsReturned, ObjectDoesNotExist,
PermissionDenied, ValidationError)
from django.db import router, transaction
from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import render, get_object_or_404
from django.template.defaultfilters import escape
from django.utils.encoding import force_text
from django.utils.formats import localize
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import ugettext_lazy as _, get_language
from django.utils.decorators import method_decorator
from django.views.decorators.http import require_POST
from cms.admin.change_list import CMSChangeList
from cms.admin.dialog.views import get_copy_dialog
from cms.admin.forms import (
PageForm, AdvancedSettingsForm, PagePermissionForm, PublicationDatesForm
)
from cms.admin.permissionadmin import (
PERMISSION_ADMIN_INLINES, PagePermissionInlineAdmin, ViewRestrictionInlineAdmin
)
from cms.admin.placeholderadmin import PlaceholderAdminMixin
from cms.admin.views import revert_plugins
from cms.constants import (
PAGE_TYPES_ID,
PUBLISHER_STATE_PENDING,
REVISION_INITIAL_COMMENT,
)
from cms.models import Page, Title, CMSPlugin, PagePermission, GlobalPagePermission, StaticPlaceholder
from cms.models.managers import PagePermissionsPermissionManager
from cms.plugin_pool import plugin_pool
from cms.toolbar_pool import toolbar_pool
from cms.utils import helpers, permissions, get_language_from_request, admin as admin_utils, copy_plugins
from cms.utils.i18n import get_language_list, get_language_tuple, get_language_object, force_language
from cms.utils.admin import jsonify_request
from cms.utils.compat.dj import is_installed
from cms.utils.conf import get_cms_setting
from cms.utils.helpers import find_placeholder_relation, current_site
from cms.utils.permissions import has_global_page_permission, has_generic_permission
from cms.utils.urlutils import add_url_parameters, admin_reverse
require_POST = method_decorator(require_POST)
if is_installed('reversion'):
from cms.utils.reversion_hacks import ModelAdmin, create_revision, Version, RollBackRevisionView
else: # pragma: no cover
from django.contrib.admin import ModelAdmin
class ReversionContext(object):
def __enter__(self):
yield
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __call__(self, func):
"""Allows this revision context to be used as a decorator."""
@wraps(func)
def do_revision_context(*args, **kwargs):
self.__enter__()
exception = False
try:
try:
return func(*args, **kwargs)
except:
exception = True
if not self.__exit__(*sys.exc_info()):
raise
finally:
if not exception:
self.__exit__(None, None, None)
return do_revision_context
def create_revision():
return ReversionContext()
PUBLISH_COMMENT = "Publish"
class PageAdmin(PlaceholderAdminMixin, ModelAdmin):
form = PageForm
search_fields = ('=id', 'title_set__slug', 'title_set__title', 'reverse_id')
revision_form_template = "admin/cms/page/history/revision_header.html"
recover_form_template = "admin/cms/page/history/recover_header.html"
add_general_fields = ['title', 'slug', 'language', 'template']
change_list_template = "admin/cms/page/tree/base.html"
list_filter = ['in_navigation', 'template', 'changed_by', 'soft_root']
title_frontend_editable_fields = ['title', 'menu_title', 'page_title']
inlines = PERMISSION_ADMIN_INLINES
def get_urls(self):
"""Get the admin urls
"""
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.model_name)
pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = [
pat(r'^([0-9]+)/advanced-settings/$', self.advanced),
pat(r'^([0-9]+)/dates/$', self.dates),
pat(r'^([0-9]+)/permission-settings/$', self.permissions),
pat(r'^([0-9]+)/delete-translation/$', self.delete_translation),
pat(r'^([0-9]+)/move-page/$', self.move_page),
pat(r'^([0-9]+)/copy-page/$', self.copy_page),
pat(r'^([0-9]+)/copy-language/$', self.copy_language),
pat(r'^([0-9]+)/dialog/copy/$', get_copy_dialog), # copy dialog
pat(r'^([0-9]+)/change-navigation/$', self.change_innavigation),
pat(r'^([0-9]+)/permissions/$', self.get_permissions),
pat(r'^([0-9]+)/undo/$', self.undo),
pat(r'^([0-9]+)/redo/$', self.redo),
# Deprecated in 3.2.1, please use ".../change-template/..." instead
pat(r'^([0-9]+)/change_template/$', self.change_template),
pat(r'^([0-9]+)/change-template/$', self.change_template),
pat(r'^([0-9]+)/([a-z\-]+)/edit-field/$', self.edit_title_fields),
pat(r'^([0-9]+)/([a-z\-]+)/publish/$', self.publish_page),
pat(r'^([0-9]+)/([a-z\-]+)/unpublish/$', self.unpublish),
pat(r'^([0-9]+)/([a-z\-]+)/revert/$', self.revert_page),
pat(r'^([0-9]+)/([a-z\-]+)/preview/$', self.preview_page),
pat(r'^add-page-type/$', self.add_page_type),
pat(r'^published-pages/$', self.get_published_pagelist),
url(r'^resolve/$', self.resolve, name="cms_page_resolve"),
url(r'^get-tree/$', self.get_tree, name="get_tree"),
]
if plugin_pool.get_all_plugins():
url_patterns += plugin_pool.get_patterns()
url_patterns += super(PageAdmin, self).get_urls()
return url_patterns
def get_revision_instances(self, request, object):
"""Returns all the instances to be used in the object's revision."""
if isinstance(object, Title):
object = object.page
if isinstance(object, Page) and not object.publisher_is_draft:
object = object.publisher_public
placeholder_relation = find_placeholder_relation(object)
data = [object]
filters = {'placeholder__%s' % placeholder_relation: object}
for plugin in CMSPlugin.objects.filter(**filters):
data.append(plugin)
plugin_instance, admin = plugin.get_plugin_instance()
if plugin_instance:
data.append(plugin_instance)
if isinstance(object, Page):
titles = object.title_set.all()
for title in titles:
title.publisher_public = None
data.append(title)
return data
def save_model(self, request, obj, form, change):
"""
Move the page in the tree if necessary and save every placeholder
Content object.
"""
from cms.extensions import extension_pool
target = request.GET.get('target', None)
position = request.GET.get('position', None)
if 'recover' in request.path_info:
tmp_page = Page(
path=None,
numchild=0,
depth=0,
site_id=obj.site_id,
)
# It's necessary to create a temporary page
# in order to calculate the tree attributes.
if obj.parent_id:
tmp_page = obj.parent.add_child(instance=tmp_page)
else:
tmp_page = obj.add_root(instance=tmp_page)
obj.path = tmp_page.path
obj.numchild = tmp_page.numchild
obj.depth = tmp_page.depth
# Remove temporary page.
tmp_page.delete()
else:
if 'history' in request.path_info:
old_obj = self.model.objects.get(pk=obj.pk)
obj.depth = old_obj.depth
obj.parent_id = old_obj.parent_id
obj.path = old_obj.path
obj.numchild = old_obj.numchild
new = False
if not obj.pk:
new = True
obj.save()
if 'recover' in request.path_info or 'history' in request.path_info:
revert_plugins(request, obj.version.pk, obj)
if target is not None and position is not None:
try:
target = self.model.objects.get(pk=target)
except self.model.DoesNotExist:
pass
else:
if position == 'last-child' or position == 'first-child':
obj.parent_id = target.pk
else:
obj.parent_id = target.parent_id
obj.save()
obj = obj.move(target, pos=position)
page_type_id = form.cleaned_data.get('page_type')
copy_target_id = request.GET.get('copy_target')
copy_target = None
if copy_target_id or page_type_id:
if page_type_id:
copy_target_id = page_type_id
copy_target = self.model.objects.get(pk=copy_target_id)
if not copy_target.has_view_permission(request):
raise PermissionDenied()
obj = obj.reload()
copy_target._copy_attributes(obj, clean=True)
obj.save()
for lang in copy_target.get_languages():
copy_target._copy_contents(obj, lang)
if 'permission' not in request.path_info:
language = form.cleaned_data['language']
Title.objects.set_or_create(
request,
obj,
form,
language,
)
if copy_target:
extension_pool.copy_extensions(copy_target, obj)
# is it home? publish it right away
if new and Page.objects.filter(site_id=obj.site_id).count() == 1:
obj.publish(language)
def get_fieldsets(self, request, obj=None):
form = self.get_form(request, obj, fields=None)
if getattr(form, 'fieldsets', None) is None:
fields = list(form.base_fields) + list(self.get_readonly_fields(request, obj))
return [(None, {'fields': fields})]
else:
return form.fieldsets
def get_inline_classes(self, request, obj=None, **kwargs):
if obj and 'permission' in request.path_info:
return PERMISSION_ADMIN_INLINES
return []
def get_form_class(self, request, obj=None, **kwargs):
if 'advanced' in request.path_info:
return AdvancedSettingsForm
elif 'permission' in request.path_info:
return PagePermissionForm
elif 'dates' in request.path_info:
return PublicationDatesForm
return self.form
def get_form(self, request, obj=None, **kwargs):
"""
Get PageForm for the Page model and modify its fields depending on
the request.
"""
language = get_language_from_request(request, obj)
form_cls = self.get_form_class(request, obj)
form = super(PageAdmin, self).get_form(request, obj, form=form_cls, **kwargs)
# get_form method operates by overriding initial fields value which
# may persist across invocation. Code below deepcopies fields definition
# to avoid leaks
for field in form.base_fields.keys():
form.base_fields[field] = copy.deepcopy(form.base_fields[field])
if 'language' in form.base_fields:
form.base_fields['language'].initial = language
if 'page_type' in form.base_fields:
if 'copy_target' in request.GET or 'add_page_type' in request.GET or obj:
del form.base_fields['page_type']
elif not Title.objects.filter(page__parent__reverse_id=PAGE_TYPES_ID, language=language).exists():
del form.base_fields['page_type']
if 'add_page_type' in request.GET:
del form.base_fields['menu_title']
del form.base_fields['meta_description']
del form.base_fields['page_title']
self.inlines = self.get_inline_classes(request, obj, **kwargs)
if obj:
if 'history' in request.path_info or 'recover' in request.path_info:
version_id = request.path_info.split('/')[-2]
else:
version_id = None
title_obj = obj.get_title_obj(language=language, fallback=False, version_id=version_id, force_reload=True)
if 'site' in form.base_fields and form.base_fields['site'].initial is None:
form.base_fields['site'].initial = obj.site
for name in ('slug', 'title', 'meta_description', 'menu_title', 'page_title', 'redirect'):
if name in form.base_fields:
form.base_fields[name].initial = getattr(title_obj, name)
if 'overwrite_url' in form.base_fields:
if title_obj.has_url_overwrite:
form.base_fields['overwrite_url'].initial = title_obj.path
else:
form.base_fields['overwrite_url'].initial = ''
else:
for name in ('slug', 'title'):
form.base_fields[name].initial = u''
if 'target' in request.GET or 'copy_target' in request.GET:
target = request.GET.get('copy_target') or request.GET.get('target')
if 'position' in request.GET:
position = request.GET['position']
if position == 'last-child' or position == 'first-child':
form.base_fields['parent'].initial = request.GET.get('target', None)
else:
sibling = self.model.objects.get(pk=target)
form.base_fields['parent'].initial = sibling.parent_id
else:
form.base_fields['parent'].initial = request.GET.get('target', None)
form.base_fields['site'].initial = request.session.get('cms_admin_site', None)
return form
def advanced(self, request, object_id):
page = get_object_or_404(self.model, pk=object_id)
if not page.has_advanced_settings_permission(request):
raise PermissionDenied("No permission for editing advanced settings")
return self.change_view(request, object_id, extra_context={'advanced_settings': True, 'title': _("Advanced Settings")})
def dates(self, request, object_id):
return self.change_view(request, object_id, extra_context={'publishing_dates': True, 'title': _("Publishing dates")})
def permissions(self, request, object_id):
page = get_object_or_404(self.model, pk=object_id)
if not page.has_change_permissions_permission(request):
raise PermissionDenied("No permission for editing advanced settings")
return self.change_view(request, object_id, extra_context={'show_permissions': True, 'title': _("Change Permissions")})
def get_inline_instances(self, request, obj=None):
inlines = super(PageAdmin, self).get_inline_instances(request, obj)
if get_cms_setting('PERMISSION') and obj:
filtered_inlines = []
for inline in inlines:
if (isinstance(inline, PagePermissionInlineAdmin)
and not isinstance(inline, ViewRestrictionInlineAdmin)):
if "recover" in request.path or "history" in request.path:
# do not display permissions in recover mode
continue
if not obj.has_change_permissions_permission(request):
continue
filtered_inlines.append(inline)
inlines = filtered_inlines
return inlines
def get_unihandecode_context(self, language):
if language[:2] in get_cms_setting('UNIHANDECODE_DECODERS'):
uhd_lang = language[:2]
else:
uhd_lang = get_cms_setting('UNIHANDECODE_DEFAULT_DECODER')
uhd_host = get_cms_setting('UNIHANDECODE_HOST')
uhd_version = get_cms_setting('UNIHANDECODE_VERSION')
if uhd_lang and uhd_host and uhd_version:
uhd_urls = [
'%sunihandecode-%s.core.min.js' % (uhd_host, uhd_version),
'%sunihandecode-%s.%s.min.js' % (uhd_host, uhd_version, uhd_lang),
]
else:
uhd_urls = []
return {'unihandecode_lang': uhd_lang, 'unihandecode_urls': uhd_urls}
@create_revision()
def add_view(self, request, form_url='', extra_context=None):
extra_context = extra_context or {}
language = get_language_from_request(request)
extra_context.update({
'language': language,
})
if not request.GET.get('add_page_type') is None:
extra_context.update({
'add_page_type': True,
'title': _("Add Page Type"),
})
elif 'copy_target' in request.GET:
extra_context.update({
'title': _("Add Page Copy"),
})
else:
extra_context = self.update_language_tab_context(request, context=extra_context)
extra_context.update(self.get_unihandecode_context(language))
return super(PageAdmin, self).add_view(request, form_url, extra_context=extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
"""
The 'change' admin view for the Page model.
"""
if extra_context is None:
extra_context = {'basic_info': True}
try:
obj = self.model.objects.get(pk=object_id)
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
else:
context = {
'page': obj,
'CMS_PERMISSION': get_cms_setting('PERMISSION'),
'ADMIN_MEDIA_URL': settings.STATIC_URL,
'can_change': obj.has_change_permission(request),
'can_change_permissions': obj.has_change_permissions_permission(request),
'current_site_id': settings.SITE_ID,
}
context.update(extra_context or {})
extra_context = self.update_language_tab_context(request, obj, context)
tab_language = get_language_from_request(request)
extra_context.update(self.get_unihandecode_context(tab_language))
response = super(PageAdmin, self).change_view(
request, object_id, form_url=form_url, extra_context=extra_context)
if tab_language and response.status_code == 302 and response._headers['location'][1] == request.path_info:
location = response._headers['location']
response._headers['location'] = (location[0], "%s?language=%s" % (location[1], tab_language))
if request.method == "POST" and response.status_code in (200, 302):
if 'history' in request.path_info:
return HttpResponseRedirect(admin_reverse('cms_page_change', args=(quote(object_id),)))
elif 'recover' in request.path_info:
return HttpResponseRedirect(admin_reverse('cms_page_change', args=(quote(object_id),)))
return response
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
# add context variables
filled_languages = []
if obj:
filled_languages = [t[0] for t in obj.title_set.filter(title__isnull=False).values_list('language')]
allowed_languages = [lang[0] for lang in self._get_site_languages(obj)]
context.update({
'filled_languages': [lang for lang in filled_languages if lang in allowed_languages],
})
return super(PageAdmin, self).render_change_form(request, context, add, change, form_url, obj)
def _get_site_languages(self, obj=None):
if obj:
site_id = obj.site_id
else:
site_id = Site.objects.get_current().pk
return get_language_tuple(site_id)
def update_language_tab_context(self, request, obj=None, context=None):
if not context:
context = {}
language = get_language_from_request(request, obj)
languages = self._get_site_languages(obj)
context.update({
'language': language,
'language_tabs': languages,
# Dates are not language dependent, thus we hide the language
# selection bar: the language is forced through the form class
'show_language_tabs': len(list(languages)) > 1 and not context.get('publishing_dates', False),
})
return context
def response_change(self, request, obj):
"""Called always when page gets changed, call save on page, there may be
some new stuff, which should be published after all other objects on page
are collected.
"""
# save the object again, so all the related changes to page model
# can be published if required
obj.save()
return super(PageAdmin, self).response_change(request, obj)
def has_add_permission(self, request):
"""
Return true if the current user has permission to add a new page.
"""
if get_cms_setting('PERMISSION'):
return permissions.has_page_add_permission_from_request(request)
return super(PageAdmin, self).has_add_permission(request)
def has_change_permission(self, request, obj=None):
"""
Return true if the current user has permission on the page.
Return the string 'All' if the user has all rights.
"""
if get_cms_setting('PERMISSION'):
if obj:
return obj.has_change_permission(request)
else:
return permissions.has_page_change_permission(request)
return super(PageAdmin, self).has_change_permission(request, obj)
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance. If CMS_PERMISSION are in use also takes look to
object permissions.
"""
if get_cms_setting('PERMISSION') and obj is not None:
return obj.has_delete_permission(request)
return super(PageAdmin, self).has_delete_permission(request, obj)
def has_recover_permission(self, request):
"""
Returns True if the use has the right to recover pages
"""
if not is_installed('reversion'):
return False
user = request.user
if user.is_superuser:
return True
try:
if has_global_page_permission(request, can_recover_page=True):
return True
except:
pass
return False
def has_add_plugin_permission(self, request, placeholder, plugin_type):
if not permissions.has_plugin_permission(request.user, plugin_type, "add"):
return False
page = placeholder.page
if page and not page.has_change_permission(request):
return False
if page and not page.publisher_is_draft:
return False
return True
def has_copy_plugin_permission(self, request, source_placeholder, target_placeholder, plugins):
source_page = source_placeholder.page
if source_page and not source_page.has_change_permission(request):
return False
target_page = target_placeholder.page
if target_page and not target_page.has_change_permission(request):
return False
if target_page and not target_page.publisher_is_draft:
return False
for plugin in plugins:
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "add"):
return False
return True
def has_change_plugin_permission(self, request, plugin):
page = plugin.placeholder.page if plugin.placeholder else None
if page and not page.has_change_permission(request):
return False
if page and not page.publisher_is_draft:
return False
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
return True
def has_move_plugin_permission(self, request, plugin, target_placeholder):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
page = plugin.placeholder.page
if page and not page.has_change_permission(request):
return False
if page and not page.publisher_is_draft:
return False
return True
def has_delete_plugin_permission(self, request, plugin):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "delete"):
return False
page = plugin.placeholder.page
if page:
if not page.publisher_is_draft:
return False
if not page.has_change_permission(request):
return False
return True
def has_clear_placeholder_permission(self, request, placeholder):
page = placeholder.page if placeholder else None
if page:
if not page.publisher_is_draft:
return False
if not page.has_change_permission(request):
return False
return True
@create_revision()
def post_add_plugin(self, request, placeholder, plugin):
if is_installed('reversion') and placeholder.page:
plugin_name = force_text(plugin_pool.get_plugin(plugin.plugin_type).name)
message = _(u"%(plugin_name)s plugin added to %(placeholder)s") % {
'plugin_name': plugin_name, 'placeholder': placeholder}
self.cleanup_history(placeholder.page)
helpers.make_revision_with_plugins(placeholder.page, request.user, message)
@create_revision()
def post_copy_plugins(self, request, source_placeholder, target_placeholder, plugins):
page = target_placeholder.page
if page and is_installed('reversion'):
message = _(u"Copied plugins to %(placeholder)s") % {'placeholder': target_placeholder}
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
@create_revision()
def post_edit_plugin(self, request, plugin):
page = plugin.placeholder.page
# if reversion is installed, save version of the page plugins
if page and is_installed('reversion'):
plugin_name = force_text(plugin_pool.get_plugin(plugin.plugin_type).name)
message = _(
u"%(plugin_name)s plugin edited at position %(position)s in %(placeholder)s") % {
'plugin_name': plugin_name,
'position': plugin.position,
'placeholder': plugin.placeholder.slot
}
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
@create_revision()
def post_move_plugin(self, request, source_placeholder, target_placeholder, plugin):
# order matters.
# We give priority to the target page but fallback to the source.
# This comes into play when moving plugins between static placeholders
# and non static placeholders.
page = target_placeholder.page or source_placeholder.page
if page and is_installed('reversion'):
message = _(u"Moved plugins to %(placeholder)s") % {'placeholder': target_placeholder}
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
@create_revision()
def post_delete_plugin(self, request, plugin):
plugin_name = force_text(plugin_pool.get_plugin(plugin.plugin_type).name)
page = plugin.placeholder.page
if page:
page.save()
comment = _("%(plugin_name)s plugin at position %(position)s in %(placeholder)s was deleted.") % {
'plugin_name': plugin_name,
'position': plugin.position,
'placeholder': plugin.placeholder,
}
if is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, comment)
@create_revision()
def post_clear_placeholder(self, request, placeholder):
page = placeholder.page
if page:
page.save()
comment = _('All plugins in the placeholder "%(name)s" were deleted.') % {
'name': force_text(placeholder)
}
if is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, comment)
def get_placeholder_template(self, request, placeholder):
page = placeholder.page
if page:
return page.get_template()
def changelist_view(self, request, extra_context=None):
"The 'change list' admin view for this model."
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
return HttpResponseForbidden(force_text(_("You do not have permission to change pages.")))
try:
cl = CMSChangeList(request, self.model, self.list_display, self.list_display_links, self.list_filter,
self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page,
self.list_max_show_all, self.list_editable, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given and
# the 'invalid=1' parameter was already in the query string, something
# is screwed up with the database, so display an error page.
if ERROR_FLAG in request.GET.keys():
return render(request, 'admin/invalid_setup.html', {'title': _('Database error')})
return HttpResponseRedirect(request.path_info + '?' + ERROR_FLAG + '=1')
cl.set_items(request)
site_id = request.GET.get('site__exact', None)
if site_id is None:
site_id = current_site(request).pk
site_id = int(site_id)
# languages
languages = get_language_list(site_id)
# parse the cookie that saves which page trees have
# been opened already and extracts the page ID
djangocms_nodes_open = request.COOKIES.get('djangocms_nodes_open', '')
raw_nodes = unquote(djangocms_nodes_open).split(',')
try:
open_menu_trees = [int(c.split('page_', 1)[1]) for c in raw_nodes]
except IndexError:
open_menu_trees = []
# Language may be present in the GET dictionary but empty
language = request.GET.get('language', get_language())
if not language:
language = get_language()
context = {
'title': cl.title,
'is_popup': cl.is_popup,
'cl': cl,
'opts': opts,
'has_add_permission': self.has_add_permission(request),
'root_path': admin_reverse('index'),
'app_label': app_label,
'preview_language': language,
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'CMS_PERMISSION': get_cms_setting('PERMISSION'),
'DEBUG': settings.DEBUG,
'site_languages': languages,
'open_menu_trees': open_menu_trees,
}
if is_installed('reversion'):
context['has_recover_permission'] = self.has_recover_permission(request)
context['has_change_permission'] = self.has_change_permission(request)
context.update(extra_context or {})
return render(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.object_name.lower()),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context)
def recoverlist_view(self, request, extra_context=None):
if not self.has_recover_permission(request):
raise PermissionDenied
return super(PageAdmin, self).recoverlist_view(request, extra_context)
def recover_view(self, request, version_id, extra_context=None):
if not self.has_recover_permission(request):
raise PermissionDenied
extra_context = self.update_language_tab_context(request, None, extra_context)
request.original_version_id = version_id
return super(PageAdmin, self).recover_view(request, version_id, extra_context)
def revision_view(self, request, object_id, version_id, extra_context=None):
if not is_installed('reversion'):
return HttpResponseBadRequest('django reversion not installed')
if not self.has_change_permission(request, Page.objects.get(pk=object_id)):
raise PermissionDenied
page = get_object_or_404(self.model, pk=object_id)
if not page.publisher_is_draft:
page = page.publisher_draft
if not page.has_change_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
try:
version = Version.objects.get(pk=version_id)
clean = page._apply_revision(version.revision, set_dirty=True)
if not clean:
messages.error(request, _("Page reverted but slug stays the same because of url collisions."))
with create_revision():
adapter = self.revision_manager.get_adapter(page.__class__)
self.revision_context_manager.add_to_context(self.revision_manager, page, adapter.get_version_data(page))
self.revision_context_manager.set_comment(_("Reverted to previous version, saved on %(datetime)s") % {"datetime": localize(version.revision.date_created)})
except IndexError as e:
return HttpResponseBadRequest(e.message)
return HttpResponseRedirect(admin_reverse('cms_page_change', args=(quote(object_id),)))
def history_view(self, request, object_id, extra_context=None):
if not self.has_change_permission(request, Page.objects.get(pk=object_id)):
raise PermissionDenied
extra_context = self.update_language_tab_context(request, None, extra_context)
return super(PageAdmin, self).history_view(request, object_id, extra_context)
def get_object(self, request, object_id, from_field=None):
if from_field:
obj = super(PageAdmin, self).get_object(request, object_id, from_field)
else:
# This is for DJANGO_16
obj = super(PageAdmin, self).get_object(request, object_id)
if is_installed('reversion') and getattr(request, 'original_version_id', None):
version = get_object_or_404(Version, pk=getattr(request, 'original_version_id', None))
recover = 'recover' in request.path_info
revert = 'history' in request.path_info
obj, version = self._reset_parent_during_reversion(obj, version, revert, recover)
return obj
def _reset_parent_during_reversion(self, obj, version, revert=False, recover=False):
if version.field_dict['parent']:
try:
Page.objects.get(pk=version.field_dict['parent'])
except:
if revert and obj.parent_id != int(version.field_dict['parent']):
version.field_dict['parent'] = obj.parent_id
if recover:
obj.parent = None
obj.parent_id = None
version.field_dict['parent'] = None
obj.version = version
return obj, version
# Reversion 1.9+ no longer uses these two methods to save revision, but we still need them
# as we do not use signals
def log_addition(self, request, object, message=None):
"""Sets the version meta information."""
if is_installed('reversion') and not hasattr(self, 'get_revision_data'):
adapter = self.revision_manager.get_adapter(object.__class__)
self.revision_context_manager.add_to_context(self.revision_manager, object, adapter.get_version_data(object))
self.revision_context_manager.set_comment(REVISION_INITIAL_COMMENT)
# Same code as reversion 1.9
try:
super(PageAdmin, self).log_addition(request, object, REVISION_INITIAL_COMMENT)
except TypeError: # Django < 1.9 pragma: no cover
super(PageAdmin, self).log_addition(request, object)
def log_change(self, request, object, message):
"""Sets the version meta information."""
if is_installed('reversion') and not hasattr(self, 'get_revision_data'):
adapter = self.revision_manager.get_adapter(object.__class__)
self.revision_context_manager.add_to_context(self.revision_manager, object, adapter.get_version_data(object))
self.revision_context_manager.set_comment(message)
if isinstance(object, Title):
page = object.page
if isinstance(object, Page):
page = object
helpers.make_revision_with_plugins(page, request.user, message)
super(PageAdmin, self).log_change(request, object, message)
# This is just for Django 1.6 / reversion 1.8 compatibility
# The handling of recover / revision in 3.3 can be simplified
# by using the new reversion semantic and django changeform_view
def revisionform_view(self, request, version, template_name, extra_context=None):
try:
with transaction.atomic():
# Revert the revision.
version.revision.revert(delete=True)
# Run the normal change_view view.
with self._create_revision(request):
response = self.change_view(request, version.object_id, request.path, extra_context)
# Decide on whether the keep the changes.
if request.method == "POST" and response.status_code == 302:
self.revision_context_manager.set_comment(_("Reverted to previous version, saved on %(datetime)s") % {"datetime": localize(version.revision.date_created)})
else:
response.template_name = template_name
response.render()
raise RollBackRevisionView
except RollBackRevisionView:
pass
return response
def render_revision_form(self, request, obj, version, context, revert=False, recover=False):
# reset parent to null if parent is not found
obj, version = self._reset_parent_during_reversion(obj, version, revert, recover)
return super(PageAdmin, self).render_revision_form(request, obj, version, context, revert, recover)
@require_POST
def undo(self, request, object_id):
if not is_installed('reversion'):
return HttpResponseBadRequest('django reversion not installed')
page = get_object_or_404(self.model, pk=object_id)
if not page.publisher_is_draft:
page = page.publisher_draft
if not page.has_change_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
try:
reverted, clean = page.undo()
if not clean:
messages.error(request, _("Page reverted but slug stays the same because of url collisions."))
except IndexError as e:
return HttpResponseBadRequest(e.message)
return HttpResponse("ok")
@require_POST
def redo(self, request, object_id):
if not is_installed('reversion'):
return HttpResponseBadRequest('django reversion not installed')
page = get_object_or_404(self.model, pk=object_id)
if not page.publisher_is_draft:
page = page.publisher_draft
if not page.has_change_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
try:
reverted, clean = page.redo()
if not clean:
messages.error(request, _("Page reverted but slug stays the same because of url collisions."))
except IndexError as e:
return HttpResponseBadRequest(e.message)
return HttpResponse("ok")
@require_POST
@create_revision()
def change_template(self, request, object_id):
page = get_object_or_404(self.model, pk=object_id)
if not page.has_change_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to change the template")))
to_template = request.POST.get("template", None)
if to_template not in dict(get_cms_setting('TEMPLATES')):
return HttpResponseBadRequest(force_text(_("Template not valid")))
page.template = to_template
page.save()
if is_installed('reversion'):
message = _("Template changed to %s") % dict(get_cms_setting('TEMPLATES'))[to_template]
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
return HttpResponse(force_text(_("The template was successfully changed")))
@require_POST
@transaction.atomic
def move_page(self, request, page_id, extra_context=None):
"""
Move the page to the requested target, at the given position.
NOTE: We have to change from one "coordinate system" to another to
adapt JSTree to Django Treebeard.
If the Tree looks like this:
<root>
⊢ …
⊢ …
⊢ Page 4
⊢ Page 5 (position 0)
⊢ …
For example,
target=4, position=1 => target=5, position="right"
target=4, position=0 => target=4, position="first-child"
"""
target = request.POST.get('target', None)
position = request.POST.get('position', 0)
site_id = request.POST.get('site', None)
try:
position = int(position)
except (TypeError, ValueError):
position = 0
try:
page = self.model.objects.get(pk=page_id)
except self.model.DoesNotExist:
return jsonify_request(HttpResponseBadRequest("error"))
try:
site = Site.objects.get(id=int(site_id))
except (TypeError, ValueError, MultipleObjectsReturned,
ObjectDoesNotExist):
site = get_current_site(request)
if target is None:
# Special case: If «target» is not provided, it means to let the
# page become a new root node.
try:
tb_target = Page.get_root_nodes().filter(
publisher_is_draft=True, site=site)[position]
if page.is_sibling_of(tb_target) and page.path < tb_target.path:
tb_position = "right"
else:
tb_position = "left"
except IndexError:
# Move page to become the last root node.
tb_target = Page.get_last_root_node()
tb_position = "right"
else:
try:
target = tb_target = self.model.objects.get(pk=int(target), site=site)
except (TypeError, ValueError, self.model.DoesNotExist):
return jsonify_request(HttpResponseBadRequest("error"))
if position == 0:
tb_position = "first-child"
else:
try:
tb_target = target.get_children().filter(
publisher_is_draft=True, site=site)[position]
if page.is_sibling_of(tb_target) and page.path < tb_target.path:
tb_position = "right"
else:
tb_position = "left"
except IndexError:
tb_position = "last-child"
# Does the user have permissions to do this...?
if not page.has_move_page_permission(request) or (
target and not target.has_add_permission(request)):
return jsonify_request(
HttpResponseForbidden(
force_text(_("Error! You don't have permissions to move "
"this page. Please reload the page"))))
page.move_page(tb_target, tb_position)
if is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(
page, request.user, _("Page moved"))
return jsonify_request(
HttpResponse(admin_utils.render_admin_menu_item(request, page)))
def get_permissions(self, request, page_id):
page = get_object_or_404(self.model, id=page_id)
can_change_list = Page.permissions.get_change_id_list(request.user, page.site_id)
global_page_permissions = GlobalPagePermission.objects.filter(sites__in=[page.site_id])
page_permissions = PagePermission.objects.for_page(page)
all_permissions = list(global_page_permissions) + list(page_permissions)
# does he can change global permissions ?
has_global = permissions.has_global_change_permissions_permission(request)
permission_set = []
for permission in all_permissions:
if isinstance(permission, GlobalPagePermission):
if has_global:
permission_set.append([(True, True), permission])
else:
permission_set.append([(True, False), permission])
else:
if can_change_list == PagePermissionsPermissionManager.GRANT_ALL:
can_change = True
else:
can_change = permission.page_id in can_change_list
permission_set.append([(False, can_change), permission])
context = {
'page': page,
'permission_set': permission_set,
}
return render(request, 'admin/cms/page/permissions.html', context)
@require_POST
@transaction.atomic
def copy_language(self, request, page_id):
with create_revision():
source_language = request.POST.get('source_language')
target_language = request.POST.get('target_language')
page = Page.objects.get(pk=page_id)
placeholders = page.get_placeholders()
if not target_language or not target_language in get_language_list():
return HttpResponseBadRequest(force_text(_("Language must be set to a supported language!")))
for placeholder in placeholders:
plugins = list(
placeholder.cmsplugin_set.filter(language=source_language).order_by('path'))
if not self.has_copy_plugin_permission(request, placeholder, placeholder, plugins):
return HttpResponseForbidden(force_text(_('You do not have permission to copy these plugins.')))
copy_plugins.copy_plugins_to(plugins, placeholder, target_language)
if page and is_installed('reversion'):
message = _(u"Copied plugins from %(source_language)s to %(target_language)s") % {
'source_language': source_language, 'target_language': target_language}
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
return HttpResponse("ok")
@require_POST
@transaction.atomic
def copy_page(self, request, page_id, extra_context=None):
"""
Copy the page and all its plugins and descendants to the requested
target, at the given position
NOTE: We have to change from one "coordinate system" to another to
adapt JSTree to Django Treebeard. See comments in move_page().
NOTE: This code handles more cases then are *currently* supported in
the UI, specifically, the target should never be None and the position
should never be non-zero. These are implemented, however, because we
intend to support these cases later.
"""
target = request.POST.get('target', None)
position = request.POST.get('position', None)
site_id = request.POST.get('site', None)
copy_permissions = request.POST.get('copy_permissions', False)
try:
page = self.model.objects.get(pk=page_id)
except self.model.DoesNotExist:
return jsonify_request(HttpResponseBadRequest("Error"))
try:
position = int(position)
except (TypeError, ValueError):
position = 0
try:
site = Site.objects.get(id=int(site_id))
except (TypeError, ValueError, MultipleObjectsReturned,
ObjectDoesNotExist):
site = get_current_site(request)
if target is None:
# Special case: If «target» is not provided, it means to create the
# new page as a root node.
try:
tb_target = Page.get_root_nodes().filter(
publisher_is_draft=True, site=site)[position]
tb_position = "left"
except IndexError:
# New page to become the last root node.
tb_target = Page.get_last_root_node()
tb_position = "right"
else:
try:
tb_target = self.model.objects.get(pk=int(target), site=site)
assert tb_target.has_add_permission(request)
except (TypeError, ValueError, self.model.DoesNotExist,
AssertionError):
return jsonify_request(HttpResponseBadRequest("Error"))
if position == 0:
# This is really the only possible value for position.
tb_position = "first-child"
else:
# But, just in case...
try:
tb_target = tb_target.get_children().filter(
publisher_is_draft=True, site=site)[position]
tb_position = "left"
except IndexError:
tb_position = "last-child"
try:
new_page = page.copy_page(tb_target, site, tb_position,
copy_permissions=copy_permissions)
results = {"id": new_page.pk}
return HttpResponse(
json.dumps(results), content_type='application/json')
except ValidationError:
exc = sys.exc_info()[1]
return jsonify_request(HttpResponseBadRequest(exc.messages))
@require_POST
@transaction.atomic
@create_revision()
def publish_page(self, request, page_id, language):
try:
page = Page.objects.get(id=page_id, publisher_is_draft=True)
except Page.DoesNotExist:
page = None
# ensure user has permissions to publish this page
all_published = True
if page:
if not page.has_publish_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to publish this page")))
published = page.publish(language)
if not published:
all_published = False
statics = request.GET.get('statics', '')
if not statics and not page:
raise Http404("No page or stack found for publishing.")
if statics:
static_ids = statics .split(',')
for pk in static_ids:
static_placeholder = StaticPlaceholder.objects.get(pk=pk)
published = static_placeholder.publish(request, language)
if not published:
all_published = False
if page:
if all_published:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
messages.warning(request, _("Page not published! A parent page is not published yet."))
else:
messages.info(request, _('The content was successfully published.'))
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Page).pk,
object_id=page_id,
object_repr=page.get_title(language),
action_flag=CHANGE,
)
else:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
messages.warning(request, _("Page not published! A parent page is not published yet."))
else:
messages.warning(request, _("There was a problem publishing your content"))
if is_installed('reversion') and page:
self.cleanup_history(page, publish=True)
helpers.make_revision_with_plugins(page, request.user, PUBLISH_COMMENT)
# create a new publish reversion
if 'node' in request.GET or 'node' in request.POST:
# if request comes from tree..
return HttpResponse(admin_utils.render_admin_menu_item(request, page))
if 'redirect' in request.GET:
return HttpResponseRedirect(request.GET['redirect'])
referrer = request.META.get('HTTP_REFERER', '')
path = admin_reverse("cms_page_changelist")
if request.GET.get('redirect_language'):
path = "%s?language=%s&page_id=%s" % (path, request.GET.get('redirect_language'), request.GET.get('redirect_page_id'))
if admin_reverse('index') not in referrer:
if all_published:
if page:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
path = page.get_absolute_url(language, fallback=True)
else:
public_page = Page.objects.get(publisher_public=page.pk)
path = '%s?%s' % (public_page.get_absolute_url(language, fallback=True), get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
else:
path = '%s?%s' % (referrer, get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
else:
path = '/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF')
return HttpResponseRedirect(path)
def cleanup_history(self, page, publish=False):
if is_installed('reversion') and page:
# delete revisions that are not publish revisions
from cms.utils.reversion_hacks import Version
content_type = ContentType.objects.get_for_model(Page)
# reversion 1.8+ removes type field, revision filtering must be based on comments
versions_qs = Version.objects.filter(content_type=content_type, object_id_int=page.pk)
history_limit = get_cms_setting("MAX_PAGE_HISTORY_REVERSIONS")
deleted = []
for version in versions_qs.exclude(revision__comment__in=(REVISION_INITIAL_COMMENT, PUBLISH_COMMENT)).order_by(
'-revision__pk')[history_limit - 1:]:
if not version.revision_id in deleted:
revision = version.revision
revision.delete()
deleted.append(revision.pk)
# delete all publish revisions that are more then MAX_PAGE_PUBLISH_REVERSIONS
publish_limit = get_cms_setting("MAX_PAGE_PUBLISH_REVERSIONS")
if publish_limit and publish:
deleted = []
for version in versions_qs.filter(revision__comment__exact=PUBLISH_COMMENT).order_by(
'-revision__pk')[publish_limit - 1:]:
if not version.revision_id in deleted:
revision = version.revision
revision.delete()
deleted.append(revision.pk)
@require_POST
@transaction.atomic
def unpublish(self, request, page_id, language):
"""
Publish or unpublish a language of a page
"""
site = Site.objects.get_current()
page = get_object_or_404(self.model, pk=page_id)
if not page.has_publish_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to unpublish this page")))
if not page.publisher_public_id:
return HttpResponseForbidden(force_text(_("This page was never published")))
try:
page.unpublish(language)
message = _('The %(language)s page "%(page)s" was successfully unpublished') % {
'language': get_language_object(language, site)['name'], 'page': page}
messages.info(request, message)
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Page).pk,
object_id=page_id,
object_repr=page.get_title(),
action_flag=CHANGE,
change_message=message,
)
except RuntimeError:
exc = sys.exc_info()[1]
messages.error(request, exc.message)
except ValidationError:
exc = sys.exc_info()[1]
messages.error(request, exc.message)
path = admin_reverse("cms_page_changelist")
if request.GET.get('redirect_language'):
path = "%s?language=%s&page_id=%s" % (path, request.GET.get('redirect_language'), request.GET.get('redirect_page_id'))
return HttpResponseRedirect(path)
@require_POST
@transaction.atomic
def revert_page(self, request, page_id, language):
page = get_object_or_404(self.model, id=page_id)
# ensure user has permissions to publish this page
if not page.has_change_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
page.revert(language)
messages.info(request, _('The page "%s" was successfully reverted.') % page)
if 'node' in request.GET or 'node' in request.POST:
# if request comes from tree..
return HttpResponse(admin_utils.render_admin_menu_item(request, page))
# TODO: This should never fail, but it may be a POF
path = page.get_absolute_url(language=language)
path = '%s?%s' % (path, get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
return HttpResponseRedirect(path)
@create_revision()
def delete_translation(self, request, object_id, extra_context=None):
if 'language' in request.GET:
language = request.GET['language']
else:
language = get_language_from_request(request)
opts = Page._meta
titleopts = Title._meta
app_label = titleopts.app_label
pluginopts = CMSPlugin._meta
try:
obj = self.get_queryset(request).get(pk=unquote(object_id))
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(opts.verbose_name),
'key': escape(object_id)
})
if not len(list(obj.get_languages())) > 1:
raise Http404(_('There only exists one translation for this page'))
titleobj = get_object_or_404(Title, page__id=object_id, language=language)
saved_plugins = CMSPlugin.objects.filter(placeholder__page__id=object_id, language=language)
using = router.db_for_read(self.model)
kwargs = {
'admin_site': self.admin_site,
'user': request.user,
'using': using
}
deleted_objects, __, perms_needed = get_deleted_objects(
[titleobj],
titleopts,
**kwargs
)[:3]
to_delete_plugins, __, perms_needed_plugins = get_deleted_objects(
saved_plugins,
pluginopts,
**kwargs
)[:3]
deleted_objects.append(to_delete_plugins)
perms_needed = set(list(perms_needed) + list(perms_needed_plugins))
if request.method == 'POST':
if perms_needed:
raise PermissionDenied
message = _('Title and plugins with language %(language)s was deleted') % {
'language': force_text(get_language_object(language)['name'])
}
self.log_change(request, titleobj, message)
messages.info(request, message)
titleobj.delete()
for p in saved_plugins:
p.delete()
public = obj.publisher_public
if public:
public.save()
if is_installed('reversion'):
self.cleanup_history(obj)
helpers.make_revision_with_plugins(obj, request.user, message)
if not self.has_change_permission(request, None):
return HttpResponseRedirect(admin_reverse('index'))
return HttpResponseRedirect(admin_reverse('cms_page_changelist'))
context = {
"title": _("Are you sure?"),
"object_name": force_text(titleopts.verbose_name),
"object": titleobj,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": admin_reverse('index'),
"app_label": app_label,
}
context.update(extra_context or {})
request.current_app = self.admin_site.name
return render(request, self.delete_confirmation_template or [
"admin/%s/%s/delete_confirmation.html" % (app_label, titleopts.object_name.lower()),
"admin/%s/delete_confirmation.html" % app_label,
"admin/delete_confirmation.html"
], context)
def preview_page(self, request, object_id, language):
"""Redirecting preview function based on draft_id
"""
page = get_object_or_404(self.model, id=object_id)
attrs = "?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
attrs += "&language=" + language
with force_language(language):
url = page.get_absolute_url(language) + attrs
site = get_current_site(request)
if not site == page.site:
url = "http%s://%s%s" % ('s' if request.is_secure() else '',
page.site.domain, url)
return HttpResponseRedirect(url)
@require_POST
def change_innavigation(self, request, page_id):
"""
Switch the in_navigation of a page
"""
page = get_object_or_404(self.model, pk=page_id)
if page.has_change_permission(request):
page.toggle_in_navigation()
language = request.GET.get('language') or get_language_from_request(request)
return HttpResponse(admin_utils.render_admin_menu_item(request, page, language=language))
return HttpResponseForbidden(force_text(_("You do not have permission to change this page's in_navigation status")))
def get_tree(self, request):
"""
Get html for the descendants (only) of given page or if no page_id is
provided, all the root nodes.
Used for lazy loading pages in cms.pagetree.js
Permission checks is done in admin_utils.get_admin_menu_item_context
which is called by admin_utils.render_admin_menu_item.
"""
page_id = request.GET.get('pageId', None)
site_id = request.GET.get('site', None)
language = request.GET.get('language', None)
open_nodes = list(map(int, request.GET.getlist('openNodes[]')))
try:
site_id = int(site_id)
site = Site.objects.get(id=site_id)
except (TypeError, ValueError, MultipleObjectsReturned,
ObjectDoesNotExist):
site = get_current_site(request)
if language is None:
language = (request.GET.get('language') or
get_language_from_request(request))
if page_id:
page = get_object_or_404(self.model, pk=int(page_id))
pages = list(page.get_children())
else:
pages = Page.get_root_nodes().filter(site=site,
publisher_is_draft=True)
template = "admin/cms/page/tree/lazy_menu.html"
response = u""
for page in pages:
response += admin_utils.render_admin_menu_item(
request, page,
template=template,
language=language,
open_nodes=open_nodes,
)
return HttpResponse(response)
def add_page_type(self, request):
site = Site.objects.get_current()
language = request.GET.get('language') or get_language()
target = request.GET.get('copy_target')
type_root, created = self.model.objects.get_or_create(reverse_id=PAGE_TYPES_ID, publisher_is_draft=True, site=site,
defaults={'in_navigation': False})
type_title, created = Title.objects.get_or_create(page=type_root, language=language, slug=PAGE_TYPES_ID,
defaults={'title': _('Page Types')})
url = add_url_parameters(admin_reverse('cms_page_add'), target=type_root.pk, position='first-child',
add_page_type=1, copy_target=target, language=language)
return HttpResponseRedirect(url)
def resolve(self, request):
if not request.user.is_staff:
return HttpResponse('/', content_type='text/plain')
obj = False
url = False
if request.session.get('cms_log_latest', False):
log = LogEntry.objects.get(pk=request.session['cms_log_latest'])
try:
obj = log.get_edited_object()
except (ObjectDoesNotExist, ValueError):
obj = None
del request.session['cms_log_latest']
if obj and obj.__class__ in toolbar_pool.get_watch_models() and hasattr(obj, 'get_absolute_url'):
# This is a test if the object url can be retrieved
# In case it can't, object it's not taken into account
try:
force_text(obj.get_absolute_url())
except:
obj = None
else:
obj = None
if not obj:
pk = request.GET.get('pk', False) or request.POST.get('pk', False)
full_model = request.GET.get('model') or request.POST.get('model', False)
if pk and full_model:
app_label, model = full_model.split('.')
if pk and app_label:
ctype = ContentType.objects.get(app_label=app_label, model=model)
try:
obj = ctype.get_object_for_this_type(pk=pk)
except ctype.model_class().DoesNotExist:
obj = None
try:
force_text(obj.get_absolute_url())
except:
obj = None
if obj:
if not getattr(request, 'toolbar', False) or not getattr(request.toolbar, 'edit_mode', False):
if isinstance(obj, Page):
if obj.get_public_object():
url = obj.get_public_object().get_absolute_url()
else:
url = '%s?%s' % (
obj.get_draft_object().get_absolute_url(),
get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
)
else:
url = obj.get_absolute_url()
else:
url = obj.get_absolute_url()
if url:
return HttpResponse(force_text(url), content_type='text/plain')
return HttpResponse('', content_type='text/plain')
def lookup_allowed(self, key, *args, **kwargs):
if key == 'site__exact':
return True
return super(PageAdmin, self).lookup_allowed(key, *args, **kwargs)
def edit_title_fields(self, request, page_id, language):
title = Title.objects.get(page_id=page_id, language=language)
saved_successfully = False
raw_fields = request.GET.get("edit_fields", 'title')
edit_fields = [field for field in raw_fields.split(",") if field in self.title_frontend_editable_fields]
cancel_clicked = request.POST.get("_cancel", False)
opts = Title._meta
if not edit_fields:
# Defaults to title
edit_fields = ('title',)
if not has_generic_permission(title.page.pk, request.user, "change",
title.page.site.pk):
return HttpResponseForbidden(force_text(_("You do not have permission to edit this page")))
class PageTitleForm(django.forms.ModelForm):
"""
Dynamic form showing only the fields to be edited
"""
class Meta:
model = Title
fields = edit_fields
if not cancel_clicked and request.method == 'POST':
form = PageTitleForm(instance=title, data=request.POST)
if form.is_valid():
form.save()
saved_successfully = True
else:
form = PageTitleForm(instance=title)
admin_form = AdminForm(form, fieldsets=[(None, {'fields': edit_fields})], prepopulated_fields={},
model_admin=self)
media = self.media + admin_form.media
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'title': 'Title',
'plugin': title.page,
'plugin_id': title.page.id,
'adminform': admin_form,
'add': False,
'is_popup': True,
'media': media,
'opts': opts,
'change': True,
'save_as': False,
'has_add_permission': False,
'window_close_timeout': 10,
}
if cancel_clicked:
# cancel button was clicked
context.update({
'cancel': True,
})
return render(request, 'admin/cms/page/plugin/confirm_form.html', context)
if not cancel_clicked and request.method == 'POST' and saved_successfully:
return render(request, 'admin/cms/page/plugin/confirm_form.html', context)
return render(request, 'admin/cms/page/plugin/change_form.html', context)
def get_published_pagelist(self, *args, **kwargs):
"""
This view is used by the PageSmartLinkWidget as the user type to feed the autocomplete drop-down.
"""
request = args[0]
if request.is_ajax():
query_term = request.GET.get('q','').strip('/')
language_code = request.GET.get('language_code', settings.LANGUAGE_CODE)
matching_published_pages = self.model.objects.published().public().filter(
Q(title_set__title__icontains=query_term, title_set__language=language_code)
| Q(title_set__path__icontains=query_term, title_set__language=language_code)
| Q(title_set__menu_title__icontains=query_term, title_set__language=language_code)
| Q(title_set__page_title__icontains=query_term, title_set__language=language_code)
).distinct()
results = []
for page in matching_published_pages:
results.append(
{
'path': page.get_path(language=language_code),
'title': page.get_title(language=language_code),
'redirect_url': page.get_absolute_url(language=language_code)
}
)
return HttpResponse(json.dumps(results), content_type='application/json')
else:
return HttpResponseForbidden()
admin.site.register(Page, PageAdmin)
| {
"content_hash": "a81caf3dcc0fd8d4dcebe2ca30fa1395",
"timestamp": "",
"source": "github",
"line_count": 1657,
"max_line_length": 179,
"avg_line_length": 44.85817742908871,
"alnum_prop": 0.5897484192116238,
"repo_name": "vxsx/django-cms",
"id": "b18c0f5336f53a78aac2e98d46e9a3231307d94b",
"size": "74374",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cms/admin/pageadmin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "133419"
},
{
"name": "HTML",
"bytes": "154109"
},
{
"name": "JavaScript",
"bytes": "1172445"
},
{
"name": "Python",
"bytes": "1996894"
},
{
"name": "Shell",
"bytes": "28"
}
],
"symlink_target": ""
} |
__author__ = ['markshao']
provider_name = "lxc"
privider_summary = "linux container"
| {
"content_hash": "8249fd174cd5fc3cef7e259ea872f71f",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 36,
"avg_line_length": 21.5,
"alnum_prop": 0.6744186046511628,
"repo_name": "markshao/pagrant",
"id": "0afed9bd42e4e9b0b87e528424b07ab2c0034527",
"size": "118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pagrant/vmproviders/lxc/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85425"
}
],
"symlink_target": ""
} |
"""
==================
Visualize surfaces
==================
Here is a simple tutorial that shows how to visualize surfaces using dipy_. It
also shows how to load/save, get/set and update ``vtkPolyData`` and show
surfaces.
``vtkPolyData`` is a structure used by VTK to represent surfaces and other data
structures. Here we show how to visualize a simple cube but the same idea
should apply for any surface.
"""
import numpy as np
"""
Import useful functions from ``dipy.viz.utils``
"""
import dipy.io.vtk as io_vtk
import dipy.viz.utils as ut_vtk
from dipy.viz import window
# Conditional import machinery for vtk
# Allow import, but disable doctests if we don't have vtk
from dipy.utils.optpkg import optional_package
vtk, have_vtk, setup_module = optional_package('vtk')
"""
Create an empty ``vtkPolyData``
"""
my_polydata = vtk.vtkPolyData()
"""
Create a cube with vertices and triangles as numpy arrays
"""
my_vertices = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[1.0, 0.0, 0.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 1.0]])
# the data type for vtk is needed to mention here, numpy.int64
my_triangles = np.array([[0, 6, 4],
[0, 2, 6],
[0, 3, 2],
[0, 1, 3],
[2, 7, 6],
[2, 3, 7],
[4, 6, 7],
[4, 7, 5],
[0, 4, 5],
[0, 5, 1],
[1, 5, 7],
[1, 7, 3]],dtype='i8')
"""
Set vertices and triangles in the ``vtkPolyData``
"""
ut_vtk.set_polydata_vertices(my_polydata, my_vertices)
ut_vtk.set_polydata_triangles(my_polydata, my_triangles)
"""
Save the ``vtkPolyData``
"""
file_name = "my_cube.vtk"
io_vtk.save_polydata(my_polydata, file_name)
print("Surface saved in " + file_name)
"""
Load the ``vtkPolyData``
"""
cube_polydata = io_vtk.load_polydata(file_name)
"""
add color based on vertices position
"""
cube_vertices = ut_vtk.get_polydata_vertices(cube_polydata)
colors = cube_vertices * 255
ut_vtk.set_polydata_colors(cube_polydata, colors)
print("new surface colors")
print(ut_vtk.get_polydata_colors(cube_polydata))
"""
Visualize surfaces
"""
# get vtkActor
cube_actor = ut_vtk.get_actor_from_polydata(cube_polydata)
# renderer and scene
renderer = window.Renderer()
renderer.add(cube_actor)
renderer.set_camera(position=(10, 5, 7), focal_point=(0.5, 0.5, 0.5))
renderer.zoom(3)
# display
# window.show(renderer, size=(600, 600), reset_camera=False)
window.record(renderer, out_path='cube.png', size=(600, 600))
"""
.. figure:: cube.png
:align: center
An example of a simple surface visualized with DIPY.
.. include:: ../links_names.inc
"""
| {
"content_hash": "b2c4a47a32421084a3777852d1df8a49",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 24.95,
"alnum_prop": 0.564128256513026,
"repo_name": "nilgoyyou/dipy",
"id": "c0bfbaf004fd88205ed4895d5e8d448573465740",
"size": "2994",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/examples/viz_surfaces.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2844"
},
{
"name": "Makefile",
"bytes": "3639"
},
{
"name": "Python",
"bytes": "2968209"
}
],
"symlink_target": ""
} |
from django.contrib.sitemaps import Sitemap
from .models import Post
class PostSitemap(Sitemap):
changefreq = 'weekly'
priority = 0.9
def items(self):
return Post.published.all()
def lastmod(self, obj):
return obj.publish
| {
"content_hash": "35a6c3f06d523f20452163656e438c68",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 43,
"avg_line_length": 19.416666666666668,
"alnum_prop": 0.7467811158798283,
"repo_name": "SlouchyBeanie/blog",
"id": "65b41e8ad0c4ee8814fc220fa99872c13fe0758d",
"size": "233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/sitemaps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1316"
},
{
"name": "HTML",
"bytes": "3973"
},
{
"name": "Python",
"bytes": "15143"
}
],
"symlink_target": ""
} |
"""LCM type definitions
This file automatically generated by lcm.
DO NOT MODIFY BY HAND!!!!
"""
import cStringIO as StringIO
import struct
import header
class piemos_health(object):
__slots__ = ["header", "auton", "enabled", "robot_connection", "xbox_controller", "game_time"]
def __init__(self):
self.header = None
self.auton = False
self.enabled = False
self.robot_connection = False
self.xbox_controller = False
self.game_time = 0
def encode(self):
buf = StringIO.StringIO()
buf.write(piemos_health._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
assert self.header._get_packed_fingerprint() == header.header._get_packed_fingerprint()
self.header._encode_one(buf)
buf.write(struct.pack(">bbbbi", self.auton, self.enabled, self.robot_connection, self.xbox_controller, self.game_time))
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = StringIO.StringIO(data)
if buf.read(8) != piemos_health._get_packed_fingerprint():
raise ValueError("Decode error")
return piemos_health._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = piemos_health()
self.header = header.header._decode_one(buf)
self.auton, self.enabled, self.robot_connection, self.xbox_controller, self.game_time = struct.unpack(">bbbbi", buf.read(8))
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if piemos_health in parents: return 0
newparents = parents + [piemos_health]
tmphash = (0x684c6c42b9e14cf1+ header.header._get_hash_recursive(newparents)) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if piemos_health._packed_fingerprint is None:
piemos_health._packed_fingerprint = struct.pack(">Q", piemos_health._get_hash_recursive([]))
return piemos_health._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
| {
"content_hash": "54ec3b27cc53d960f8f8d106d66d36f9",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 132,
"avg_line_length": 36.55384615384615,
"alnum_prop": 0.6460437710437711,
"repo_name": "pioneers/topgear",
"id": "1fdf67ea6308372c87a2253dbe5ffd9535ea1762",
"size": "2376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/forseti2/piemos_health.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "65922"
},
{
"name": "Java",
"bytes": "78468"
},
{
"name": "JavaScript",
"bytes": "24075"
},
{
"name": "Python",
"bytes": "230356"
},
{
"name": "Shell",
"bytes": "4595"
}
],
"symlink_target": ""
} |
"""
type lookahead: some operations need to be done knowing the resulting type
"""
# Somewhat easy: the type checker knows that l1 has type [str] from the start
l1 = [None]
l1[0] = "hello"
print l1
# Hard: the list literal correctly has type [None], but [None] * 1 needs to return
# an object of type [str], a case of things needing to be upconverted from _get
l2 = [None] * 1
l2[0] = ""
print l2
| {
"content_hash": "b046e53e093168ff3a30699f6541028e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 82,
"avg_line_length": 28.5,
"alnum_prop": 0.6942355889724311,
"repo_name": "kmod/icbd",
"id": "7ea88dbb747f5c1398d0a0e2296d5061d3aae674",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "icbd/compiler/tests/71.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "33042"
},
{
"name": "C++",
"bytes": "35981"
},
{
"name": "CSS",
"bytes": "8888"
},
{
"name": "JavaScript",
"bytes": "3602"
},
{
"name": "Makefile",
"bytes": "48655"
},
{
"name": "Objective-C",
"bytes": "88"
},
{
"name": "Python",
"bytes": "10340340"
},
{
"name": "Shell",
"bytes": "18865"
}
],
"symlink_target": ""
} |
import re
with open ("file.txt", "r") as myfile:
data=myfile.read().replace('moreaddress(', '')
data = data.replace ('"Bangalore");', '')
data = data.replace ('"', '')
data = data.replace (' ', '')
data = data.replace(',\n\n','\n')
data = re.sub(r',Pin-\d*', "", data)
print data
'''Write out put to file
#python readFile.py > test.csv
'''
| {
"content_hash": "2e5fcc99536d8a1485d66e492accd0f6",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 50,
"avg_line_length": 28.384615384615383,
"alnum_prop": 0.5501355013550135,
"repo_name": "sudikrt/costproML",
"id": "15c0443fa3d62baefb6c35efaf10ec9469334fe7",
"size": "369",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "testproject/temp/readFile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "85031"
}
],
"symlink_target": ""
} |
from random import sample
from discoutils.thesaurus_loader import Thesaurus
class DummyThesaurus(Thesaurus):
"""
A thesaurus-like object which return "b/N" as the only neighbour of every possible entry
"""
name = 'Constant'
def __init__(self):
pass
def get_nearest_neighbours(self, feature):
return [('b/N', 1.0)]
def get_vector(self):
pass
def to_shelf(self, *args, **kwargs):
pass
def __len__(self):
return 9999999
def __contains__(self, feature):
return True
class RandomThesaurus(DummyThesaurus):
"""
A thesaurus-like object which returns a single random neighbour for every possible entry. That neighbour
is chosen from the vocabulary that is passed in (as a dict {feature:index} )
"""
name = 'Random'
def __init__(self, vocab=None, k=1):
self.vocab = vocab
self.k = k
def get_nearest_neighbours(self, item):
if not self.vocab:
raise ValueError('You need to provide a set of value to choose from first.')
return [(str(foo), 1.) for foo in sample(self.vocab, self.k)]
| {
"content_hash": "7c74000f2cfecb7dbcb6be847fb10353",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 108,
"avg_line_length": 26,
"alnum_prop": 0.6258741258741258,
"repo_name": "mbatchkarov/dc_evaluation",
"id": "446aa1c4eab4be19b015cdfffd5ec1ee3ac8f12b",
"size": "1144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eval/pipeline/thesauri.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "410897"
}
],
"symlink_target": ""
} |
from flask import Blueprint
main = Blueprint('main', __name__)
from . import views, errors
from flask_restful import Api
from .api.gfcard import GiftCard
api = Api(main)
api.add_resource(GiftCard, '/giftcard/request')
from apscheduler.schedulers.background import BackgroundScheduler
from .jobs.giftCardSchedule import GiftCardSchedule
giftCArdSchedule = GiftCardSchedule()
scheduler = BackgroundScheduler()
scheduler.add_job(giftCArdSchedule.test, 'interval', seconds=3)
scheduler.start()
| {
"content_hash": "964c097cfa5be111a35e93bf62c98e11",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 26.105263157894736,
"alnum_prop": 0.7983870967741935,
"repo_name": "simonqiang/gftest",
"id": "90fcc0ce09da7fcf0724d26f0283968ac640f2d9",
"size": "496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/main/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2346"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "31261"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HMM2_if_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMM2_if_ConnectedLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMM2_if_ConnectedLHS, self).__init__(name='HMM2_if_ConnectedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HMM2_if_ConnectedLHS')
self["equations"] = []
# Set the node attributes
# Add the edges
self.add_edges([
])
# define evaluation methods for each match class.
# define evaluation methods for each match association.
def constraint(self, PreNode, graph):
return True
| {
"content_hash": "853651767d2b3a16a94b3766a580762b",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 96,
"avg_line_length": 24.973684210526315,
"alnum_prop": 0.6933614330874605,
"repo_name": "levilucio/SyVOLT",
"id": "32e3bdca849d00ae4c0ec71e5f2ea94adaf0a45d",
"size": "949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/Properties/from_MPS/HMM2_if_ConnectedLHS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
import re
import numpy as np
from sklearn.preprocessing import normalize
from sklearn.feature_extraction.text import HashingVectorizer
def preprocess_string(text):
res = re.sub("[!.;?^*()_{}|]","", text) # remove special characters (we keep characters such as "$" and ",-" )
res = re.sub("\d+", " ^number^ ", res) # replace numbers wit special word and space
return res
def my_tokenizer(s):
return s.split()
def get_text_nodes(leaf_nodes, n_features):
text_nodes = []
vectorizer = HashingVectorizer(n_features=n_features, tokenizer=my_tokenizer, non_negative=True, preprocessor=preprocess_string, norm=None)
for node in leaf_nodes:
#-- process text nodes
# if it is text node with value
if node['type'] == 3 and 'value' in node:
position = node['position']
size = [(position[2]-position[0])*(position[3]-position[1])]
# get text - remove whitespaces, lowercase
text = node['value']
text = ' '.join(text.lower().split())
encoded_text = vectorizer.transform([text])
if len(encoded_text.nonzero()[0]) > 0:
text_nodes.append((position,encoded_text,size))
# ORDER TEXT NODES BY SIZE
text_nodes.sort(key=lambda x: x[2], reverse=True)
return text_nodes
def get_text_maps(text_nodes, n_features, spatial_shape, text_map_scale):
# scale down spatial dimensions
features = np.zeros((round((spatial_shape[0]*text_map_scale)),round((spatial_shape[1]*text_map_scale)), n_features), dtype=np.float)
# for each node in text nodes
for node in text_nodes:
bb = node[0]
bb_scaled = [int(round(x*text_map_scale)) for x in bb]
encoded_text = node[1]
encoded_text = normalize(encoded_text, axis=1, norm='l2')
encoded_text = encoded_text*255 # we multiply by 255 in order to move to image scale
vector = np.asarray(encoded_text.todense())[0]
features[bb_scaled[1]:bb_scaled[3],bb_scaled[0]:bb_scaled[2],:] = vector
return features
| {
"content_hash": "16cb8003ab30323dc9184eb5752a77b0",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 143,
"avg_line_length": 40.84313725490196,
"alnum_prop": 0.628420547287566,
"repo_name": "gogartom/TextMaps",
"id": "b49a3e821a62480610f0fdd181135b1725b3de28",
"size": "2083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custom_layers/web_data_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "10501"
},
{
"name": "Jupyter Notebook",
"bytes": "632436"
},
{
"name": "Python",
"bytes": "44394"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.