repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
xke/nash
|
app/__init__.py
|
Python
|
bsd-2-clause
| 145
| 0.006897
|
# __init__.py is a special Python file that allow
|
s a directory to become
# a Python package so it can be accessed using the 'import' statement.
|
|
yunojuno/django-onfido
|
tests/conftest.py
|
Python
|
mit
| 8,575
| 0.0007
|
"""Shared pytest fixtures and test data."""
import copy
import uuid
import pytest
from django.contrib.auth import get_user_model
from onfido.models import Applicant, Check, Event, Report
APPLICANT_ID = str(uuid.uuid4())
CHECK_ID = str(uuid.uuid4())
IDENTITY_REPORT_ID = str(uuid.uuid4())
DOCUMENT_REPORT_ID = str(uuid.uuid4())
DOCUMENT_ID = str(uuid.uuid4())
User = get_user_model()
@pytest.fixture
def user():
return User.objects.create_user(
"fred", first_name="Fred", last_name="Flinstone", email="fred@example.com"
)
@pytest.fixture
def applicant(user):
data = copy.deepcopy(TEST_APPLICANT)
return Applicant.objects.create_applicant(user=user, raw=data)
@pytest.fixture
def check(applicant):
data = copy.deepcopy(TEST_CHECK)
return Check.objects.create_check(applicant, raw=data)
@pytest.fixture
def identity_report(check):
data = copy.deepcopy(TEST_REPORT_IDENTITY_ENHANCED)
return Report.objects.create_report(check, raw=data)
@pytest.fixture
def document_report(check):
data = copy.deepcopy(TEST_REPORT_DOCUMENT)
return Report.objects.create_report(check, raw=data)
@pytest.fixture
def report(identity_report):
return identity_report
@pytest.fixture
def event(check):
data = copy.deepcopy(TEST_EVENT)
return Event().parse(data)
# Test data taken from Onfido v3 API docs.
# https://documentation.onfido.com/#applicant-object
TEST_APPLICANT = {
"id": APPLICANT_ID,
"created_at": "2019-10-09T16:52:42Z",
"sandbox": True,
"first_name": "Jane",
"last_name": "Doe",
"email": None,
"dob": "1990-01-01",
"delete_at": None,
"href": f"/v3/applicants/{APPLICANT_ID}",
"id_numbers": [],
"address": {
"flat_number": None,
"building_number": None,
"building_name": None,
"street": "Second Street",
"sub_street": None,
"town": "London",
"state": None,
"postcode": "S2 2DF",
"country": "GBR",
"line1": None,
"line2": None,
"line3": None,
},
}
# https://documentation.onfido.com/#check-object
TEST_CHECK = {
"id": CHECK_ID,
"created_at": "2019-10-09T17:01:59Z",
"status": "in_progress",
"redirect_uri": None,
"result": None,
"sandbox": True,
"tags": [],
"results_uri": f"https://onfido.com/checks/{CHECK_ID}/reports",
"form_uri": None,
"paused": False,
"version": "3.0",
"report_ids": [IDENTITY_REPORT_ID],
"href": f"/v3/checks/{CHECK_ID}",
"applicant_id": APPLICANT_ID,
"applicant_provides_data": False,
}
# https://documentation.onfido.com/#identity-enhanced-report
TEST_REPORT_IDENTITY_ENHANCED = {
"created_at": "2019-10-03T15:54:20Z",
"href": f"/v3/reports/{IDENTITY_REPORT_ID}",
"id": IDENTITY_REPORT_ID,
"name": "identity_enhanced",
"properties": {
"matched_address": 19099121,
"matched_addresses": [
{"id": 19099121, "match_types": ["credit_agencies", "voting_register"]}
],
},
"result": "clear",
"status": "complete",
"sub_result": None,
"breakdown": {
"sources": {
"result": "clear",
"breakdown": {
"total_sources": {
"result": "clear",
"properties": {"total_number_of_sources": "3"},
}
},
},
"address": {
"result": "clear",
"breakdown": {
"credit_agencies": {
"result": "clear",
"properties": {"number_of_matches": "1"},
},
"telephone_database": {"result": "cl
|
ear", "properties": {}},
"voting_register": {"result": "clear", "properties": {}},
},
},
"date_of_birth": {
"result": "clear",
"breakdown": {
"credit_agencies": {"result":
|
"clear", "properties": {}},
"voting_register": {"result": "clear", "properties": {}},
},
},
"mortality": {"result": "clear"},
},
"check_id": CHECK_ID,
"documents": [],
}
TEST_REPORT_DOCUMENT = {
"created_at": "2019-10-03T14:05:48Z",
"documents": [{"id": DOCUMENT_ID}],
"href": f"/v3/reports/{DOCUMENT_REPORT_ID}",
"id": DOCUMENT_REPORT_ID,
"name": "document",
"properties": {
"nationality": "",
"last_name": "Names",
"issuing_country": "GBR",
"gender": "",
"first_name": "Report",
"document_type": "passport",
"document_numbers": [{"value": "123456789", "type": "document_number"}],
"date_of_expiry": "2030-01-01",
"date_of_birth": "1990-01-01",
},
"result": "clear",
"status": "complete",
"sub_result": "clear",
"breakdown": {
"data_comparison": {
"result": "clear",
"breakdown": {
"issuing_country": {"result": "clear", "properties": {}},
"gender": {"result": "clear", "properties": {}},
"date_of_expiry": {"result": "clear", "properties": {}},
"last_name": {"result": "clear", "properties": {}},
"document_type": {"result": "clear", "properties": {}},
"document_numbers": {"result": "clear", "properties": {}},
"first_name": {"result": "clear", "properties": {}},
"date_of_birth": {"result": "clear", "properties": {}},
},
},
"data_validation": {
"result": "clear",
"breakdown": {
"gender": {"result": "clear", "properties": {}},
"date_of_birth": {"result": "clear", "properties": {}},
"document_numbers": {"result": "clear", "properties": {}},
"document_expiration": {"result": "clear", "properties": {}},
"expiry_date": {"result": "clear", "properties": {}},
"mrz": {"result": "clear", "properties": {}},
},
},
"age_validation": {
"result": "clear",
"breakdown": {
"minimum_accepted_age": {"result": "clear", "properties": {}}
},
},
"image_integrity": {
"result": "clear",
"breakdown": {
"image_quality": {"result": "clear", "properties": {}},
"conclusive_document_quality": {"result": "clear", "properties": {}},
"supported_document": {"result": "clear", "properties": {}},
"colour_picture": {"result": "clear", "properties": {}},
},
},
"visual_authenticity": {
"result": "clear",
"breakdown": {
"fonts": {"result": "clear", "properties": {}},
"picture_face_integrity": {"result": "clear", "properties": {}},
"template": {"result": "clear", "properties": {}},
"security_features": {"result": "clear", "properties": {}},
"original_document_present": {"result": "clear", "properties": {}},
"digital_tampering": {"result": "clear", "properties": {}},
"other": {"result": "clear", "properties": {}},
"face_detection": {"result": "clear", "properties": {}},
},
},
"data_consistency": {
"result": "clear",
"breakdown": {
"date_of_expiry": {"result": "clear", "properties": {}},
"document_numbers": {"result": "clear", "properties": {}},
"issuing_country": {"result": "clear", "properties": {}},
"document_type": {"result": "clear", "properties": {}},
"date_of_birth": {"result": "clear", "properties": {}},
"gender": {"result": "clear", "properties": {}},
"first_name": {"result": "clear", "properties": {}},
"last_name": {"result": "clear", "properties": {}},
"nationality": {"result": "clear", "properties": {}},
},
},
"police_record": {"result": "clear"},
"compromised_documen
|
xycfree/py_spider
|
baidu/__init__.py
|
Python
|
gpl-3.0
| 154
| 0.006494
|
#!/usr/bin/env python
# -*- codi
|
ng: utf-8 -*-
# @Date : 2016/11/23 16:15
# @Aut
|
hor : xycfree
# @Link : http://example.org
# @Version : $
import os
|
sayan801/indivo_server
|
indivo/tests/data/reports/allergy.py
|
Python
|
gpl-3.0
| 2,324
| 0.003442
|
from base import report_content_to_test_docs
_TEST_ALLERGIES_INVALID = [
# an allergy with the wrong schema, should trigger a validation error
"""
<Models xmlns='http://indivo.org/vocab/xml/documents#'>
<Model name="Allergy">
<Field name="allergic_reaction_title">Anaphylaxis</Field>
<Field name="allergic_reaction_system">http://purl.bioontology.org/ontology/SNOMEDCT/</Field>
<Field name="allergic_reaction_identifier">39579001</Field>
<Field name="category_title">Drug allergy</Field>
<Field name="category_system">http://purl.bioontology.org/ontology/SNOMEDCT/</Field>
<Field name="category_identifier">416098002</Field>
<Field name="drug_class_allergen_title">Sulfonamide Antibacterial</Field>
<Field name="drug_class_allergen_system">http://purl.bioontology.org/ontology/NDFRT/</Field>
<Field name="drug_class_allergen_identifier">N0000175503</Field>
<Field name="severity_title">Severe</Field>
<Field name="severity_system">http://purl.bioontology.org/ontology/SNOMEDCT/</Field>
<Field name="severity_identifier">24484000</Field>
<Monkey name="woahthere">THIS SHOULDN'T BE THERE</Monkey>
</Model>
</Models>
""",
]
_TEST_ALLERGIES = [
"""
<Models xmlns='http://indivo.org/vocab/xml/documents#'>
<Model name="Allergy">
<Field name="allergic_reaction_title">Anaphylaxis</Field>
<Field name="allergic_reaction_system">http://purl.bioontology.org/ontology/SNOMEDCT/</Field>
<Field name="allergic_reaction_identifier">39579001</Field>
<Field name="category_title">Drug allergy</Field>
<Field name="category_system">http://purl.bioontology.org/ontology/SNOMEDCT/</Field
|
>
<Field name="category_identifier">416098002</Field>
<Field name="drug_class_allergen_title">Sulfonamide Antibacterial</Field>
<Field name="drug_class_allergen_system">http://purl.bioontology.org/ontology/NDFRT/</Fiel
|
d>
<Field name="drug_class_allergen_identifier">N0000175503</Field>
<Field name="severity_title">Severe</Field>
<Field name="severity_system">http://purl.bioontology.org/ontology/SNOMEDCT/</Field>
<Field name="severity_identifier">24484000</Field>
</Model>
</Models>
""",
]
TEST_ALLERGIES_INVALID = report_content_to_test_docs(_TEST_ALLERGIES_INVALID)
TEST_ALLERGIES = report_content_to_test_docs(_TEST_ALLERGIES)
|
graphql-python/graphql-core
|
tests/test_star_wars_introspection.py
|
Python
|
mit
| 11,566
| 0.000865
|
from typing import Any
from graphql import graphql_sync
from .star_wars_schema import star_wars_schema
def query_star_wars(source: str) -> Any:
result = graphql_sync(star_wars_schema, source)
assert result.errors is None
return result.data
def describe_star_wars_introspection_tests():
def describe_basic_introspection():
def allows_querying_the_schema_for_types():
data = query_star_wars(
"""
{
__schema {
types {
name
}
}
}
"""
)
# Include all types used by StarWars schema, introspection types and
# standard directives. For example, `Boolean` is used in `@skip`,
# `@include` and also inside introspection types.
assert data == {
"__schema": {
"types": [
{"name": "Human"},
{"name": "Character"},
{"name": "String"},
{"name": "Episode"},
{"name": "Droid"},
{"name": "Query"},
{"name": "Boolean"},
{"name": "__Schema"},
{"name": "__Type"},
{"name": "__TypeKind"},
{"name": "__Field"},
{"name": "__InputValue"},
{"name": "__EnumValue"},
{"name": "__Directive"},
{"name": "__DirectiveLocation"},
]
}
}
def allows_querying_the_schema_for_query_type():
data = query_star_wars(
"""
{
__schema {
queryType {
name
}
}
}
"""
)
assert data == {"__schema": {"queryType": {"name": "Query"}}}
def allows_querying_the_schema_for_a_specific_type():
data = query_star_wars(
"""
{
__type(name: "Droid") {
name
}
}
"""
)
assert data == {"__type": {"name": "Droid"}}
def allows_querying_the_schema_for_an_object_kind():
data = query_star_wars(
"""
{
__type(name: "Droid") {
name
kind
}
}
"""
)
assert data == {"__type": {"name": "Droid", "kind": "OBJECT"}}
def allows_querying_the_schema_for_an_interface_kind():
data = query_star_wars(
"""
{
__type(name: "Character") {
name
kind
}
}
"""
)
assert data == {"__type": {"name": "Character", "kind": "INTERFACE"}}
def allows_querying_the_schema_for_object_fields():
data = query_star_wars(
"""
{
__type(name: "Droid") {
name
fields {
name
type {
name
kind
}
}
}
}
"""
)
assert data == {
"__type": {
"name": "Droid",
"fields": [
{"name": "id", "type": {"name": None, "kind": "NON_NULL"}},
{"name": "name", "type": {"name": "String", "kind": "SCALAR"}},
{"name": "friends", "type": {"name": None, "kind": "LIST"}},
{"name": "appearsIn", "type": {"name": None, "kind": "LIST"}},
{
"name": "secretBackstory",
"type": {"name": "String", "kind": "SCALAR"},
},
{
"name": "primaryFunction",
"type": {"name": "String", "kind": "SCALAR"},
},
],
}
}
def allows_querying_the_schema_for_nested_object_fields():
data = query_star_wars(
"""
{
__type(name: "Droid") {
name
fields {
name
type {
name
kind
ofType {
name
kind
}
}
}
}
}
"""
)
assert data == {
"__type": {
"name": "Droid",
"fields": [
{
"name": "id",
"type": {
"name": None,
"kind": "NON_NULL",
"ofType": {"name": "String", "kind": "SCALAR"},
},
},
{
"name": "name",
"type": {
"name": "String",
"kind": "SCALAR",
"ofType": None,
},
},
{
"name": "friends",
"type": {
|
"name": None,
"kind": "LIST",
"ofType": {"name": "Character", "kind": "INTERFACE"},
},
},
{
"name": "appearsIn",
"type": {
"name": None,
"kind": "LIST",
"ofType": {"name": "Episode", "kin
|
d": "ENUM"},
},
},
{
"name": "secretBackstory",
"type": {
"name": "String",
"kind": "SCALAR",
"ofType": None,
},
},
{
"name": "primaryFunction",
"type": {
"name": "String",
"kind": "SCALAR",
"ofType": None,
},
},
],
}
}
def allows_querying_the_schema_for_field_args():
data = query_star_wars(
"""
{
__schema {
queryType {
fields {
name
args {
name
description
type {
name
kind
ofType {
name
kind
}
}
defaultValue
}
}
}
}
}
"""
)
assert data == {
"__schema": {
|
sigmapi-gammaiota/sigmapi-web
|
sigmapiweb/apps/PartyListV2/migrations/0012_auto_20181022_0058.py
|
Python
|
mit
| 444
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-1
|
0-22 00:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("PartyListV2", "0011_auto_20181021_2203"),
]
operations = [
migrations.RenameField(
model_name="restrictedguest",
old_name="addedBy",
|
new_name="added_by",
),
]
|
ict-felix/stack
|
vt_manager_kvm/src/python/vt_manager_kvm/communication/utils/ZabbixHelper.py
|
Python
|
apache-2.0
| 2,427
| 0.030902
|
# -*- coding: utf-8 -*-
'''
Created on 2015/04/17
@author: 2015 AIST
'''
import time
from django.conf import settings
from zbxsend import Metric, send_to_zabbix
import logging
import json
from vt_manager_kvm.communication.geni.v3.configurators.handlerconfigurator import HandlerConfigurator
class ZabbixHelper():
logger
|
= logging.getLogger("ZabbixHelper")
@staticmethod
def sendAgentStatus(server, available):
if available == True:
status = 1 # UP
else:
status = 2 # DOWN
timestamp = int(time.time())
driver = HandlerConfigurator.get_vt_am_driver()
server_urn = driver.generate_component_id(server)
itemname = settings.ZBX_ITEM_HOSTSTATUS + '[' + str(server_urn) + ']'
metric = Metric(server.name, str(itemname), status, timestamp)
ZabbixHelper.sendZabbix(metric)
ret
|
urn
@staticmethod
def sendVMDiscovery(server, vms):
timestamp = int(time.time())
discoveryList = []
for vm in vms:
discovery = {"{#USERVM.NAME}": vm.name}
discoveryList.append(discovery)
tmpobj = {"data": discoveryList}
discoveryStr = json.dumps(tmpobj)
metric = Metric(server.name, settings.ZBX_ITEM_DISCOVERY_USERVM,
str(discoveryStr), timestamp)
ZabbixHelper.sendZabbix(metric)
return
@staticmethod
def sendVMStatusDiscovery(vms):
timestamp = int(time.time())
driver = HandlerConfigurator.get_vt_am_driver()
for vm in vms:
discoveryList = []
vm_urn = driver.generate_sliver_urn(vm)
discovery = {"{#USERVM.URN}": vm_urn}
discoveryList.append(discovery)
tmpobj = {"data": discoveryList}
discoveryStr = json.dumps(tmpobj)
metric = Metric(vm.name, settings.ZBX_ITEM_DISCOVERY_USERVMSTATUS, str(discoveryStr), timestamp)
ZabbixHelper.sendZabbix(metric)
return
@staticmethod
def sendVMStatus(vm, isUp):
if isUp == True:
status = 1 # UP
else:
status = 2 # DOWN
driver = HandlerConfigurator.get_vt_am_driver()
vm_urn = driver.generate_sliver_urn(vm)
timestamp = int(time.time())
itemname = settings.ZBX_ITEM_USERVMSTATUS + '[' + str(vm_urn) + ']'
metric = Metric(vm.name, str(itemname), status, timestamp)
ZabbixHelper.sendZabbix(metric)
return
@staticmethod
def sendZabbix(metric):
ZabbixHelper.logger.debug("send Zabbix " + str(metric))
result = send_to_zabbix([metric], settings.ZBX_SERVER_IP, settings.ZBX_SERVER_PORT)
if(result == False):
ZabbixHelper.logger.warn("cannot send VM status to Zabbix, continue anyway")
return
|
Byron/bcore
|
src/python/bsemantic/tests/test_generators.py
|
Python
|
lgpl-3.0
| 4,860
| 0.004527
|
#-*-coding:utf-8-*-
"""
@package bsemantic.tests.test_generators
@brief tests for bsemantic.generators
@author Sebastian Thiel
@copyright [GNU Lesser General Public License](https://www.gnu.org/licenses/lgpl.html)
"""
from __future__ import unicode_literals
from butility.future import str
__all__ = []
# W0614 unused wildcard import - its okay here
# pylint: disable-msg=W0614
# test from x import *
from bsemantic import ElementNodeTree
from bsemantic.generators import *
from bsemantic.tests.base import Test
from butility import DictObject
class TestGenerators(Test):
__slots__ = ()
def test_formatting(self):
"""Test basic node formatting"""
tree = ElementNodeTree.new('root', self.path_rule_data, element_node_type=StringFormatNode)
root = tree.root_node()
assert isinstance(root, StringFormatNode)
assert isinstance(root.format_string(), str)
assert len(root.format_keys()) == 1
assert root.format_keys()[0].count('.') == 2
assert len(root.format_data()) == 0
assert root.format_result is None, "default result should be None"
assert not root.format_data()
assert root.apply_format(dict()).format_result is None, "Should have no result if substitution failed"
assert not root.format_data()
data = dict(project='something')
assert root.apply_format(data).format_result is None, "It should not throw if it cannot resolve the key"
data = self.base_data()
assert root.apply_format(DictObject(data)).format_result == self.fs_root
fmt_data = root.format_data()
assert len(fmt_data) == 1 and isinstance(fmt_data['project'], dict) # check nesting
# if we assign with some incomplete dict once again, the result is reset
assert root.apply_format(dict()).format_result is None
assert not root.format_data()
# it is possible to reformat the node with the returned data
assert root.apply_format(DictObject(fmt_data)).format_result is not None
assert root.format_data() == fmt_data
# TREE ITERATION
################
string_tree = StringFormatNodeTree.new('root', self.path_rule_data)
assert len(list(string_tree)) == len(
list(tree)), "default iteration should be the same for both implementations"
assert len(list(string_tree.iterate_formatted_nodes(dict()))) == 0, "Without any data, nothing is returned"
snode_lists = list(string_tree.iterate_formatted_nodes(data))
assert len(snode_lists) == 1, "Got more/less than the eixpected single list: %s" % str(snode_lists)
node_list = snode_lists[0]
assert len(node_list)
|
== 1, "Should have only one node"
assert node_list[0] is string_tree.root_node(), "the only contained string node should be the root"
|
# assert concatenation with different separators works. Note that we work on node-references here
new_sep = '#'
assert new_sep not in node_list.to_string()
node_list.extend(node_list)
# this affects one and the same root, the first node in a list will not contribute its parent separator
node_list[-1].child_separator = '#'
assert node_list.to_string().count(new_sep) == 1
assert len(list(string_tree.iterate(predicate=lambda nlist: False))) == 0, "iterate ignored the predicate"
# add more data to get more paths
data['project'].code = 'my_project'
snode_lists = list(string_tree.iterate_formatted_nodes(data, prune=lambda nlist: len(nlist) > 2))
assert len(snode_lists) == 1, "expected one longer node lists, got %i" % len(snode_lists)
assert len(snode_lists[0]) == 2, "Should have two nodes in it"
snode_lists = list(string_tree.iterate_formatted_nodes(data, predicate=lambda nlist: False))
assert len(snode_lists) == 0, "predicate has no effect"
# without the prune method, many more would be valid, as we have many nodes that don't need any data
snode_lists = list(string_tree.iterate_formatted_nodes(data))
assert len(snode_lists) > 15, "should have many more lists"
# STRING NODE LISTS ASSERTIONS
##############################
names = set()
for nlist in snode_lists:
nlist_string = nlist.to_string()
names.add(nlist_string)
# should have the same result, as the data is exactly the same
assert nlist.apply_format(data).to_string() == nlist_string
# also we assume that we don't get short lists, as we return the longest ones only
assert len(nlist) > 2, "Expected long lists, got one of length %i" % len(nlist)
# end for each nlist
assert len(names) == len(snode_lists), "expected unique path names"
# end class TestGenerators
|
banebg/embedded
|
webdimovanjeitemp/blog/django_thermometer/settings.py
|
Python
|
gpl-3.0
| 58
| 0
|
BASE_PATH = '/sys/bus/w1/devices/'
|
TAIL_PATH = "w1_sla
|
ve"
|
auspbro/CodeSnippets
|
Python/LPTHW/ex26.py
|
Python
|
gpl-3.0
| 2,393
| 0.004597
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Ryan'
import ex25
def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explantion
\n\t\twhere there is none.
"""
print "--------------"
print poem
print "--------------"
five = 10 - 2 + 3 - 5
print "This should be five: %s" % five
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
jelly_beans, jars, crates = secret_f
|
ormula(start_point)
print "With a starting point of: %d" % start_point
print "We'd have %d jeans, %d jars, and %d crates." % (jelly_beans, jars, crates)
start_point = start_point / 10
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crabapples." % secret_formula(start_point)
sentence = "All god\tthings come to those who weight."
|
words = ex25.break_words(sentence)
sorted_words = ex25.sort_words(words)
print_first_word(words)
print_last_word(words)
print_first_word(sorted_words)
print_last_word(sorted_words)
sorted_words = ex25.sort_sentence(sentence)
print sorted_words
print_first_and_last(sentence)
print_first_and_last_sorted(sentence)
|
johnkerl/sack
|
uniqc_m.py
|
Python
|
bsd-2-clause
| 1,891
| 0.002644
|
#!/usr/bin/python -Wall
# ================================================================
# Given a list, returns a list of pairs of elements and repetition counts.
# Example (with commas elided for legibility):
#
# Input: [ 1 1 1 2 2 3 3 3 3 5 5 1 1 ]
# Output: [ [3 1] [2 2] [4 3] [2 5] [2 1] ]
#
# I.e. there is a run of 3 1's, then a run of 2 2's, then a run of 4 3's, then
# 2 5's, then 2 1's. This similar to the output of the Unix "uniq -c" command,
# if the input were one number per line. However, uniq -c puts the columns in
# reverse order from what I do here.
# ================================================================
# John Kerl
# kerl.john.r@gmail.com
# 2008-01-22
# ================================================================
def uniqc(list):
rv = []
n = len(list)
if (n == 0):
return []
curri = 0
nexti = 1
head = list[curri]
count = 1
while (curri < n):
if (nexti == n): # Last element in the list
if (list[curri] == head):
rv.append([head, count])
else:
rv.append([list[curri], 1])
elif (list[curri] == list[nexti]):
count += 1
else:
|
rv.append([head, count])
head = list[nexti]
count = 1
curri += 1
nexti += 1
return rv
# ----------------------------------------------------------
|
------
# Test cases:
#def test1(list):
# #print list
# #print uniqc(list)
# #print
#
# # Pipe the output to, say, expand -20.
# print list, "\t", uniqc(list)
#
#def test_uniqc():
# test1([])
# test1([8])
# test1([8, 8])
# test1([8, 9])
# test1([9, 8])
# test1([9, 9])
# test1([8, 8, 8])
# test1([8, 8, 9])
# test1([8, 9, 8])
# test1([8, 9, 9])
# test1([9, 8, 8])
# test1([9, 8, 9])
# test1([9, 9, 8])
# test1([9, 9, 9])
#
#test_uniqc()
|
gentledevil/ansible
|
lib/ansible/plugins/shell/sh.py
|
Python
|
gpl-3.0
| 6,399
| 0.002813
|
# (c) 2014, Chris Church <chris@ninemoreminutes.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTI
|
CULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__
|
import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import pipes
import ansible.constants as C
import time
import random
_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$')
class ShellModule(object):
# How to end lines in a python script one-liner
_SHELL_EMBEDDED_PY_EOL = '\n'
_SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1'
def env_prefix(self, **kwargs):
'''Build command prefix with environment variables.'''
env = dict(
LANG = C.DEFAULT_MODULE_LANG,
LC_CTYPE = C.DEFAULT_MODULE_LANG,
LC_MESSAGES = C.DEFAULT_MODULE_LANG,
)
env.update(kwargs)
return ' '.join(['%s=%s' % (k, pipes.quote(unicode(v))) for k,v in env.items()])
def join_path(self, *args):
return os.path.join(*args)
def path_has_trailing_slash(self, path):
return path.endswith('/')
def chmod(self, mode, path):
path = pipes.quote(path)
return 'chmod %s %s' % (mode, path)
def remove(self, path, recurse=False):
path = pipes.quote(path)
cmd = 'rm -f '
if recurse:
cmd += '-r '
return cmd + "%s %s" % (path, self._SHELL_REDIRECT_ALLNULL)
def mkdtemp(self, basefile=None, system=False, mode=None):
if not basefile:
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
basetmp = self.join_path(C.DEFAULT_REMOTE_TMP, basefile)
if system and (basetmp.startswith('$HOME') or basetmp.startswith('~/')):
basetmp = self.join_path('/tmp', basefile)
cmd = 'mkdir -p "%s"' % basetmp
cmd += ' && echo "%s"' % basetmp
# change the umask in a subshell to achieve the desired mode
# also for directories created with `mkdir -p`
if mode:
tmp_umask = 0o777 & ~mode
cmd = '(umask %o && %s)' % (tmp_umask, cmd)
return cmd
def expand_user(self, user_home_path):
''' Return a command to expand tildes in a path
It can be either "~" or "~username". We use the POSIX definition of
a username:
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276
'''
# Check that the user_path to expand is safe
if user_home_path != '~':
if not _USER_HOME_PATH_RE.match(user_home_path):
# pipes.quote will make the shell return the string verbatim
user_home_path = pipes.quote(user_home_path)
return 'echo %s' % user_home_path
def checksum(self, path, python_interp):
# The following test needs to be SH-compliant. BASH-isms will
# not work if /bin/sh points to a non-BASH shell.
#
# In the following test, each condition is a check and logical
# comparison (|| or &&) that sets the rc value. Every check is run so
# the last check in the series to fail will be the rc that is
# returned.
#
# If a check fails we error before invoking the hash functions because
# hash functions may successfully take the hash of a directory on BSDs
# (UFS filesystem?) which is not what the rest of the ansible code
# expects
#
# If all of the available hashing methods fail we fail with an rc of
# 0. This logic is added to the end of the cmd at the bottom of this
# function.
# Return codes:
# checksum: success!
# 0: Unknown error
# 1: Remote file does not exist
# 2: No read permissions on the file
# 3: File is a directory
# 4: No python interpreter
# Quoting gets complex here. We're writing a python string that's
# used by a variety of shells on the remote host to invoke a python
# "one-liner".
shell_escaped_path = pipes.quote(path)
test = "rc=flag; [ -r %(p)s ] || rc=2; [ -f %(p)s ] || rc=1; [ -d %(p)s ] && rc=3; %(i)s -V 2>/dev/null || rc=4; [ x\"$rc\" != \"xflag\" ] && echo \"${rc} \"%(p)s && exit 0" % dict(p=shell_escaped_path, i=python_interp)
csums = [
"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3)
"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4
]
cmd = " || ".join(csums)
cmd = "%s; %s || (echo \'0 \'%s)" % (test, cmd, shell_escaped_path)
return cmd
def build_module_command(self, env_string, shebang, cmd, rm_tmp=None):
# don't quote the cmd if it's an empty string, because this will
# break pipelining mode
if cmd.strip() != '':
cmd = pipes.quote(cmd)
cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd]
new_cmd = " ".join(cmd_parts)
if rm_tmp:
new_cmd = '%s; rm -rf "%s" %s' % (new_cmd, rm_tmp, self._SHELL_REDIRECT_ALLNULL)
return new_cmd
|
RoboJackets/robocup-software
|
launch/config_server.launch.py
|
Python
|
apache-2.0
| 388
| 0
|
import launch
from launch import LaunchDescription
import launch_ros.actions
def generate_launch_description():
return LaunchDescription(
[
|
launch_ros.actions.Node(
package="rj_robocup",
executable="config_server",
output="screen",
|
on_exit=launch.actions.Shutdown(),
)
]
)
|
befeltingu/UdacityFinalProject4
|
conference.py
|
Python
|
apache-2.0
| 33,399
| 0.006467
|
#!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
from datetime import datetime
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from models import ConflictException
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import StringMessage
from models import BooleanMessage
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import TeeShirtSize
#Added
from models import Session
from models import SessionForm
from models import SessionForms
from settings import WEB_CLIENT_ID
from settings import ANDROID_CLIENT_ID
from settings import IOS_CLIENT_ID
from settings import ANDROID_AUDIENCE
from utils import getUserId
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
ANNOUNCEMENT_TPL = ('Last chance to attend! The following conferences '
'are nearly sold out: %s')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [ "Default", "Topic" ],
}
SESSION_DEFAULTS = {
"description": '',
"highlights": ["Default"],
"duration": 0.0,
"users": []
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
## create Resource container for post request with Sessions
SESSION_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1)
)
## and for a GET Session request
SESSION_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1)
)
SESSION_GETBYNAME = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1)
)
SESSION_GETBYTYPE = endpoints.ResourceContainer(
message_types.VoidMessage,
sessionType=messages.StringField(1),
websafeConferenceKey=messages.StringField(2)
)
USERWISHLIST = endpoints.ResourceContainer(
message_types.VoidMessage,
sessionKey = messages.StringField(1)
)
GET_FEATURED_SPEAKER = endpoints.ResourceContainer(
speaker = messages.StringField(1)
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api(name='conference', version='v1', audiences=[ANDROID_AUDIENCE],
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID, ANDROID_CLIENT_ID, IOS_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# Task 1.)
# Sessions
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copySessionToForm(self, session):
"""Copy relevant fields from Conference to ConferenceForm."""
sf = SessionForm()
for field in sf.all_fields():
if hasattr(session, field.name):
|
if field.name == 'date':
setattr(sf, field.name, str(getattr(session, field.name)))
else:
setattr(sf, field.name, getattr(session, field.name))
sf.check_initialized()
return sf
def _createSessionObject(self, request):
"""Create or update Session
|
object, returning SessionForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Session 'name' field required")
# get the conf that the session should be added to
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.UnauthorizedException("There must be a valid conference to add the sessions to")
if not request.speaker:
raise endpoints.BadRequestException("Session 'speaker' field required")
if not request.speaker:
raise endpoints.BadRequestException("Session 'type' field required")
# copy SessionForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data["websafeConferenceKey"]
## Check to see if valid start time. Must be between 1-12 am and 1-12 pm
## The format should be 00:xx ex: 09:am
if data['startTime']:
hour = int(data['startTime'][0:2])
ampm = data['startTime'][3:]
print ampm
if not (hour <= 12 and hour >= 1):
raise endpoints.BadRequestException("Start time must be between 1 and 12")
if not (ampm == 'am' or ampm == 'AM' or ampm == 'pm' or ampm == 'PM'):
raise endpoints.BadRequestException("Start time must be either am or pm")
else:
raise endpoints.BadRequestException("We need to know the start time of the session")
# add default values for those missing (both data model & outbound Message)
# convert dates from strings to Date objects; set month based on start_date
if data['date']:
data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date()
else:
raise endpoints.BadRequestException("Session start date required")
for df in SESSION_DEFAULTS:
if data[df] in (None, []):
data[df] = SESSION_DEFAULTS[df]
setattr(request, df, SESSION_DEFAULTS[df])
# if there is a refrence to the Conference that the session is for then
# make the session a child of that Conference.
# creating the session key
s_id = Session.allocate_ids(size=1, parent=conf.key)[0]
s_key = ndb.Key(Session, s_id, parent=conf.key)
data["key"] = s_key
Session(**data).put()
## Additions for Task 4
## first get current featured speaker
curr_speaker = data["speaker"]
taskqueue.add(params={'speaker':curr_speaker, 'websafeConferenceKey': conf.key.urlsafe()},
url='/tasks/setFeaturedSpeaker')
return self._copySessionToForm(request)
# Task 4 Endpoint for getting the current featured speaker
@endpoints.method(message_types.VoidMessage,StringMessage,path='featuredspeaker',
http_method='GET', name='getFeaturedSpeaker')
def getFeaturedSpeaker(self,request):
"""Return the featured speaker for the session """
featured_speaker = memcache.get("featured_speaker")
# if there is not speaker then tell the 'user' there is no speaker
if featured_speaker == None:
featured_speaker = "There is no current featured speaker"
# using the string message class from models.py
string_message = StringMessage()
setattr(string_message,"data",featured_speaker)
return string_message
# Task 1 Enpoint for creating a session
@endpoints.method(SESSION_POST_REQUEST,SessionForm,path='session/{websafeConferenceKey}',
http_method='POST', name='createSession')
def createSession(self,request):
"""Create new
|
iLoop2/ResInsight
|
ThirdParty/Ert/devel/python/python/ert/enkf/obs_data.py
|
Python
|
gpl-3.0
| 2,908
| 0.016162
|
from types import NoneType
from ert.cwrap import BaseCClass, CWrapper
from ert.enkf import ENKF_LIB
from ert.util import Matrix
class ObsData(BaseCClass):
def __init__(self):
c_pointer = ObsData.cNamespace().alloc()
super(ObsData, self).__init__(c_pointer)
def __len__(self):
""" @rtype: int """
return ObsData.cNamespace().active_size(self)
def addBlock(self , obs_key , obs_size):
error_covar = None
error_covar_owner = False
return ObsData.cNamespace().add_block(self , obs_key , obs_size , error_covar , error_covar_owner)
def createDObs(self):
""" @rtype: Matrix """
return ObsData.cNamespace().allocdObs(self)
def createR(self):
""" @rtype: Matrix """
return ObsData.cNamespace().allocR(self)
def createD(self , E , S):
""" @rtype: Matrix """
return ObsData.cNamespace().allocD(self , E , S)
def createE( self , rng , active_ens_size):
""" @rtype: Matrix """
return ObsData.cNamespace().allocE(self , rng , active_ens_size)
def scaleMatrix(self, m):
ObsData.cNamespace().scale_matrix(self , m )
def scaleRMatrix(self, R):
ObsData.cNamespace().scale_Rmatrix(self , R )
def scale(self, S, E=None, D=None, R=None, D_obs=None):
assert isinstance(S, Matrix)
assert isinstance(E, (Matrix, NoneType))
assert isinstance(D, (Matrix, NoneType))
assert isinstance(R, (Matrix, NoneType))
assert isinstance(D_obs, (Matrix, NoneType))
ObsData.cNamespace().scale(self, S, E, D, R, D_obs)
def free(self):
ObsData.cNamespace().free(self)
cwrapp
|
er = CWrapper(ENKF_LIB)
cwrapper.registerObjectType("obs_data", ObsData)
ObsData.cNamespace().alloc = cwrapper.prototype("c_void_p obs_data_alloc()")
ObsData.cNamespace().free = cwrapper.prototype("void obs_data_fre
|
e(obs_data)")
ObsData.cNamespace().active_size = cwrapper.prototype("int obs_data_get_active_size(obs_data)")
ObsData.cNamespace().add_block = cwrapper.prototype("obs_block_ref obs_data_add_block(obs_data , char* , int , matrix , bool)")
ObsData.cNamespace().allocdObs = cwrapper.prototype("matrix_obj obs_data_allocdObs(obs_data)")
ObsData.cNamespace().allocR = cwrapper.prototype("matrix_obj obs_data_allocR(obs_data)")
ObsData.cNamespace().allocD = cwrapper.prototype("matrix_obj obs_data_allocD(obs_data , matrix , matrix)")
ObsData.cNamespace().allocE = cwrapper.prototype("matrix_obj obs_data_allocE(obs_data , rng , int)")
ObsData.cNamespace().scale = cwrapper.prototype("void obs_data_scale(obs_data, matrix, matrix, matrix, matrix, matrix)")
ObsData.cNamespace().scale_matrix = cwrapper.prototype("void obs_data_scale_matrix(obs_data, matrix)")
ObsData.cNamespace().scale_Rmatrix = cwrapper.prototype("void obs_data_scale_Rmatrix(obs_data, matrix)")
|
shantilabs/django-smarturlfield
|
tests/test_modelfield.py
|
Python
|
mit
| 1,155
| 0.001732
|
from django import forms
from example.models import OneUrlModel, ManyUrlsModel
def test_one_url(db):
class F(forms.ModelForm):
class Meta:
model = OneUrlModel
fields = '__all__'
form = F({'url': 'ya.RU'})
instance = form.save()
assert instance.url == 'http://ya.ru'
def test_many_urls(db
|
):
class F(forms.ModelForm):
class Meta:
model = ManyUrlsModel
fields = '__all__'
form = F({'urls': 'ya.RU, xx.com '
'httP://zzz.ff'})
assert form.is_valid()
instance = form.save()
asse
|
rt instance.urls == [
'http://xx.com',
'http://ya.ru',
'http://zzz.ff',
]
form = F(instance=instance)
assert bool(form.errors) == False
def test_model(db):
instance = ManyUrlsModel.objects.create(
urls=['http://ya.ru', 'http://xx.com'],
)
assert ManyUrlsModel.objects.get(id=instance.id).urls == ['http://ya.ru', 'http://xx.com']
instance = ManyUrlsModel.objects.create(
urls='http://ya.ru',
)
assert ManyUrlsModel.objects.get(id=instance.id).urls == ['http://ya.ru']
|
saleswise/talon
|
talon/signature/learning/featurespace.py
|
Python
|
apache-2.0
| 2,878
| 0
|
# -*- coding: utf-8 -*-
""" The module provides functions for conversion of a message body/body lines
into classifiers features space.
The body and the message sender string are converted into unicode before
applying features to them.
"""
from talon.signature.constants import SIGNATURE_MAX_LINES
from talon.signature.learning.helpers import *
def features(sender=''):
'''Returns a list of signature features.'''
return [
# This one isn't from paper.
# Meant to match companies names, sender's names, address.
many_capitalized_words,
# This one is not from paper.
# Line is too long.
# This one is less aggressive than `Line is too short`
lambda line: 1 if len(line) > 60 else 0,
# Line contains email pattern.
binary_regex_search(RE_EMAIL),
# Line contains url.
binary_regex_search(RE_URL),
# Line contains phone number pattern.
binary_regex_search(RE_RELAX_PHONE),
# Line matches the regular expression "^[\s]*---*[\s]*$".
binary_regex_match(RE_SEPARATOR),
# Line has a sequence of 10 or more special characters.
binary_regex_search(RE_SPECIAL_CHARS),
# Line contains any typical signature words.
binary_regex_search(RE_SIGNATURE_WORDS),
# Line contains a pattern like Vitor R. Carvalho or William W. Cohen.
binary_regex_search(RE_NAME),
# Percentage of punctuation symbols in the line is larger than 50%
lambda line: 1 if punctuation_percent(line) > 50 else 0,
# Percentage of punctuation symbols in the line is larger than 90%
lambda line: 1 if punctuation_percent(line) > 90 else 0,
contains_sender_names(sender)
]
def apply_features(body, features):
'''Applies features to message body lines.
Returns list of lists. Each of the lists corresponds to the body line
and is constituted by the numbers of features occurrences (0 or 1).
E.g. if element j of list i equals 1 this means that
feature j occurred in line i (counting from the last line of the body).
'''
# collect all non empty lines
lines = [line for line in body.splitlines() if line.strip()]
# take the last SIGNATURE_MAX_LINES
last_lines = lines[-SIGNATURE_MAX_LINES:]
# apply feature
|
s, fallback to zeros
return ([[f(line) for f
|
in features] for line in last_lines] or
[[0 for f in features]])
def build_pattern(body, features):
'''Converts body into a pattern i.e. a point in the features space.
Applies features to the body lines and sums up the results.
Elements of the pattern indicate how many times a certain feature occurred
in the last lines of the body.
'''
line_patterns = apply_features(body, features)
return reduce(lambda x, y: [i + j for i, j in zip(x, y)], line_patterns)
|
freevo/freevo1
|
src/helpers/__init__.py
|
Python
|
gpl-2.0
| 34
| 0
|
"""
|
Helper and server modules
"""
| |
unioslo/cerebrum
|
Cerebrum/rest/api/__init__.py
|
Python
|
gpl-2.0
| 2,970
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2018 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Application bootstrap"""
from __future__ import absolute_import, unicode_literals
import time
from flask import Flask, g, request
from werkzeug.middleware.proxy_fix import ProxyFix
from six import text_type
from . import database as _database
from . import auth as _auth
from .routing import NormalizedUnicodeConverter
db = _database.DatabaseContext()
proxy_auth = _auth.ProxyAuth()
auth = _auth.Authentication()
def create_app(config=None):
app = Flask(__name__)
app.config.from_object('Cerebrum.rest.default_config')
trusted_hosts = []
if config:
app.config.from_object(config)
trusted_hosts = app.config.get('TRUSTED_HOSTS', [])
app.config['RESTFUL_JSON'] = {'ensure_ascii': False, 'encoding': 'utf-8'}
app.wsgi_app = ProxyFix(app.wsgi_app, x_host=1)
# Replace builtin URL rule converters. Must be done before rules are added.
app.url_map.converters.update({
'default': NormalizedUnicodeConverter,
'string': NormalizedUnicodeConverter,
|
})
from Cerebrum.rest.api import v1
app.register_blueprint(v1.blueprint, url_prefix='/v1')
@app.before_request
def register_request_s
|
tart():
g.request_start = time.time()
db.init_app(app)
proxy_auth.init_app(app, db)
auth.init_app(app, db)
@app.after_request
def log_request_data(response):
req_time = time.time() - g.request_start
req_time_millis = int(round(req_time * 1000))
ip_log = list(request.access_route)
for ip in ip_log:
if ip in trusted_hosts:
ip_log.pop(ip_log.index(ip))
app.logger.info('"{method} {path}" - {code} - {req_time}ms - {auth} - '
'{ip} - "{ua}"'.format(
method=request.method,
path=request.full_path,
code=response.status_code,
auth=text_type(auth.ctx.module),
ip=ip_log,
ua=request.user_agent,
req_time=req_time_millis))
return response
return app
|
cecilulysess/MinerLite
|
main.py
|
Python
|
gpl-3.0
| 1,792
| 0.016183
|
#!/usr/bin/python
# MinerLite - A client side miner controller.
# This will launch cgmin
|
er with a few delay seconds and
# retrieve the local data and post it into somewhere!
#
# Author: Yanxiang Wu
# Release Under GPL 3
# Used code from cgminer python API example
import socket
import json
import sys
import subprocess
import time
import os
path = "/home/ltcminer/mining/cgminer/cgminer"
log_file = "/home/ltcminer/mining/minerlite.log"
def linesplit(socket):
buffer = socket.recv(409
|
6)
done = False
while not done:
more = socket.recv(4096)
if not more:
done = True
else:
buffer = buffer+more
if buffer:
return buffer
def retrieve_cgminer_info(command, parameter):
"""retrieve status of devices from cgminer
"""
api_ip = '127.0.0.1'
api_port = 4028
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((api_ip,int(api_port)))
if not parameter:
s.send(json.dumps({"command":command,"parameter":parameter}))
else:
s.send(json.dumps({"command":command}))
response = linesplit(s)
response = response.replace('\x00','')
return_val = response
response = json.loads(response)
# print response
s.close()
return return_val
def run_cgminer(path):
subprocess.Popen([path, "--api-listen"])
print "Starting cgminer in 2 seconds"
time.sleep(2)
print "Running cgminer ..."
run_cgminer(path)
time.sleep(15)
with open(log_file, 'a') as logfile:
try:
logfile.write( retrieve_cgminer_info("devs", None) )
except socket.error:
pass
|
philkroos/tinkervision
|
src/test/colormatch/pick_color.py
|
Python
|
gpl-2.0
| 2,616
| 0.003058
|
import cv2
import numpy as np
import sys
from time import sleep
class Hue:
h = 0
s = 0
v = 0
class HueAverage:
def __init__(self, wname, wwidth, wheight, avgSize):
self.avgSize = avgSize
self.x = self.y = 0
self.at = Hue();
self.avg = Hue();
self.cli
|
cked = False
self.width = wwidth
self.height = wheight
def mouseHandler(self, event, x, y, flag, param):
if self.hsv == None:
return
if (event == cv2.EVENT_LBUTTONDOWN):
self.x = x
self.y = y
xmin = int(max(0, x - self.avgSize))
xmax = int(mi
|
n(self.width, x + self.avgSize))
ymin = int(max(0, y - self.avgSize))
ymax = int(min(self.height, y + self.avgSize))
self.at.h = self.hsv[y, x][0]
self.at.s = self.hsv[y, x][1]
self.at.v = self.hsv[y, x][2]
h = s = v = 0
for x in range(xmin, xmax):
for y in range(ymin, ymax):
h += self.hsv[y, x][0]
s += self.hsv[y, x][1]
v += self.hsv[y, x][2]
values = len(range(xmin, xmax)) * len(range(ymin, ymax))
self.avg.h = h / values
self.avg.s = s / values
self.avg.v = v / values
wname = 'frame'
face = cv2.FONT_HERSHEY_SIMPLEX
scale = .5
thickness =2
color = 255
size, base = cv2.getTextSize("HSV at %d/%d: 255-255-255. Avg around: 255-255-255" %
(111, 111), face, scale, thickness)
width = size[0]
height = size[1]
def run(cam_id, distance):
print "Using cam", cam_id
cap = cv2.VideoCapture(cam_id)
cv2.namedWindow(wname)
framewidth = cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
frameheight = cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
hp = HueAverage(wname, framewidth, frameheight, distance)
cv2.setMouseCallback(wname, hp.mouseHandler)
while True:
ret, frame = cap.read()
if ret != True:
continue
hp.hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
rows, cols, _ = frame.shape
texty = rows - 10 - height
textx = 10
cv2.putText(frame, "HSV at %d/%d: %d-%d-%d. Avg around: %d-%d-%d" %
(hp.x, hp.y, hp.at.h, hp.at.s, hp.at.v,
hp.avg.h, hp.avg.s, hp.avg.v),
(textx, texty), face, scale, color, thickness)
cv2.imshow(wname, frame)
if cv2.waitKey(1) & 0xFF == ord('q') or hp.clicked:
break
cap.release()
return [hp.avg.h, hp.avg.s, hp.avg.v]
|
yowmamasita/social-listener-exam
|
ferris/core/controller.py
|
Python
|
mit
| 13,807
| 0.002825
|
import webapp2
import re
import weakref
from webapp2 import cached_property
from webapp2_extras import sessions
from google.appengine.api import users
from ferris.core.ndb import encode_key, decode_key
from ferris.core.uri import Uri
from ferris.core import inflector, auth, events, views, request_parsers, response_handlers, routing
from ferris.core.json_util import parse as json_parse, stringify as json_stringify
from bunch import Bunch
_temporary_route_storage = []
def route(f):
"""
Marks a method for automatically routing and accessible via HTTP.
See :mod:`~ferris.core.routing` for more details on how methods are auto-routed.
This decorator should always be the outermost decorator.
For example::
@route
def exterminate(self):
return 'EXTERMINAAATE!'
"""
global _temporary_route_storage
_temporary_route_storage.append((f, (), {}))
return f
def route_with(*args, **kwargs):
"""
Marks a class method to be routed similar to :func:`route` and passes and additional arguments to the webapp2.Route
constructor.
:param template: Sets the URL template for this action
For example::
@route_with(template='/posts/archive/<year>')
def archive_by_year(self, year):
pass
"""
def inner(f):
_temporary_route_storage.append((f, args, kwargs))
return f
return inner
def add_authorizations(*args):
"""
Adds additional authorization chains to a particular action. These are executed after the
chains set in Controller.Meta.
"""
def inner(f):
setattr(f, 'authorizations', args)
return f
return inner
class Controller(webapp2.RequestHandler, Uri):
"""
Controllers allows grouping of common actions and provides them with
automatic r
|
outing, reusable components, request data parsering, and
view rendering.
"""
_controllers = []
class __metaclass__(type):
def __new__(meta, name, bases, dict):
global _temporary_route_storage
cls = type.__new__(meta, name, bases, dict)
|
if name != 'Controller':
# Add to the controller registry
if not cls in Controller._controllers:
Controller._controllers.append(cls)
# Make sure the metaclass as a proper inheritence chain
if not issubclass(cls.Meta, Controller.Meta):
cls.Meta = type('Meta', (cls.Meta, Controller.Meta), {})
cls._route_list = _temporary_route_storage
_temporary_route_storage = []
return cls
# The name of this class, lowercase (automatically determined)
name = 'controller'
#: The current user as determined by ``google.appengine.api.users.get_current_user()``.
user = None
#: View Context, all these variables will be passed to the view.
context = property(lambda self: self.meta.view.context)
class Meta(object):
"""
The Meta class stores configuration information for a Controller. This class is constructed
into an instance and made available at ``self.meta``. This class is optional, Controllers that
do not specify it will receive the default configuration. Additionally, you need not inherit from
this class as Controller's metaclass will ensure it.
For example::
def Posts(Controller):
class Meta: # no inheritance
prefixes = ('admin', )
# all other properties inherited from default.
"""
#: List of components.
#: When declaring a controller, this must be a list or tuple of classes.
#: When the controller is constructed, ``controller.components`` will
#: be populated with instances of these classes.
components = tuple()
#: Prefixes are added in from of controller (like admin_list) and will cause routing
#: to produce a url such as '/admin/name/list' and a name such as 'admin:name:list'
prefixes = tuple()
#: Authorizations control access to the controller. Each authorization is a callable.
#: Authorizations are called in order and all must return True for the request to be
#: processed. If they return False or a tuple like (False, 'message'), the request will
#: be rejected.
#: You should **always** have ``auth.require_admin_for_prefix(prefix=('admin',))`` in your
#: authorization chain.
authorizations = (auth.require_admin_for_prefix(prefix=('admin',)),)
#: Which :class:`~ferris.core.views.View` class to use by default. use :meth:`change_view` to switch views.
View = views.TemplateView
#: Which :class:`RequestParser` class to use by default. See :meth:`Controller.parse_request`.
Parser = 'Form'
def __init__(self, controller):
self._controller = controller
self.view = None
self.change_view(self.View)
def change_view(self, view, persist_context=True):
"""
Swaps the view, and by default keeps context between the two views.
:param view: View class or name.
"""
context = self.view.context if self.view else None
self.View = view if not isinstance(view, basestring) else views.factory(view)
self.view = self.View(self._controller, context)
class Util(object):
"""
Provides some basic utility functions. This class is constructed into an instance
and made available at ``controller.util``.
"""
def __init__(self, controller):
self._controller = controller
#: Decodes a urlsafe ``ndb.Key``.
decode_key = staticmethod(decode_key)
#: Encode an ``ndb.Key`` (or ``ndb.Model`` instance) into an urlsafe string.
encode_key = staticmethod(encode_key)
#: Decodes a json string.
parse_json = staticmethod(json_parse)
#: Encodes a json string.
stringify_json = staticmethod(json_stringify)
def __init__(self, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
self.name = inflector.underscore(self.__class__.__name__)
self.proper_name = self.__class__.__name__
self.util = self.Util(weakref.proxy(self))
self.route = None
def _build_components(self):
self.events.before_build_components(controller=self)
if hasattr(self.Meta, 'components'):
component_classes = self.Meta.components
self.components = Bunch()
for cls in component_classes:
if hasattr(cls, 'name'):
name = cls.name
else:
name = inflector.underscore(cls.__name__)
self.components[name] = cls(weakref.proxy(self))
else:
self.components = Bunch()
self.events.after_build_components(controller=self)
def _init_route(self):
action = self.request.route.handler_method
prefix = None
for possible_prefix in self.Meta.prefixes:
if action.startswith(possible_prefix):
prefix = possible_prefix
action = action.replace(prefix + '_', '')
break
self.route = Bunch(
prefix=prefix,
controller=self.name,
action=action,
name=self.request.route.name,
args=self.request.route_args,
kwargs=self.request.route_kwargs)
def _init_meta(self):
self.user = users.get_current_user()
self._init_route()
self.events = events.NamedBroadcastEvents(prefix='controller_')
self.meta = self.Meta(weakref.proxy(self))
self._build_components()
@classmethod
def _build_routes(cls, router):
"""
Called in the main app router to get all of this controller's routes.
Override to add custom/additional routes.
"""
# Route the rest methods
router.add(routing.build_scaffold_routes_fo
|
yassineS/COSMOS-2.0
|
cosmos/web/views.py
|
Python
|
gpl-3.0
| 8,041
| 0.00398
|
import itertools as it
from operator import attrgetter
from flask import Markup, render_template, Blueprint, redirect, url_for, flash, abort, request
from sqlalchemy import desc
from .. import Execution, Stage, Task, TaskStatus
from ..job.JobManager import JobManager
from . import filters
from ..graph.draw import draw_task_graph, draw_stage_graph
def gen_bprint(cosmos_app):
session = cosmos_app.session
def get_execution(id):
return session.query(Execution).filter_by(id=id).one()
bprint = Blueprint('cosmos', __name__, template_folder='templates', static_folder='static',
static_url_path='/cosmos/static')
filters.add_filters(bprint)
@bprint.route('/execution/delete/<int:id>')
def execution_delete(id):
e = get_execution(id)
e.delete(delete_files=True)
flash('Deleted %s' % e)
return redirect(url_for('cosmos.index'))
@bprint.route('/')
def index():
executions = session.query(Execution).order_by(desc(Execution.created_on)).all()
session.expire_all()
return render_template('cosmos/index.html', executions=executions)
@bprint.route('/')
def home():
return index()
@bprint.route('/execution/<name>/')
# @bprint.route('/execution/<int:id>/')
def execution(name):
execution = session.query(Execution).filter_by(name=name).one()
return render_template('cosmos/execution.html', execution=execution)
@bprint.route('/execution/<execution_name>/<stage_name>/')
def stage(execution_name, stage_name):
ex = session.query(Execution).filter_by(name=execution_name).one()
stage = session.query(Stage).filter_by(execution_id=ex.id, name=stage_name).one()
if stage is None:
return abort(404)
submitted = filter(lambda t: t.status == TaskStatus.submitted, stage.tasks)
jm = JobManager(cosmos_app.get_submit_args)
f = attrgetter('drm')
drm_statuses = {}
for drm, tasks in it.groupby(sorted(submitted, key=f), f):
drm_statuses.update(jm.drms[drm].drm_statuses(list(tasks)))
return render_template('cosmos/stage.html', stage=stage, drm_statuses=drm_statuses)
# x=filter(lambda t: t.status == TaskStatus.submitted, stage.tasks))
@bprint.route('/execution/<int:ex_id>/stage/<stage_name>/delete/')
def stage_delete(ex_id, stage_name):
s = session.query(Stage).filter(Stage.execution_id == ex_id, Stage.name == stage_name).one()
flash('Deleted %s' % s)
ex_url = s.execution.url
s.delete(delete_files=False)
return redirect(ex_url)
# @bprint.route('/task/<int:id>/')
# def task(id):
# task = session.query(Task).get(id)
# if task is None:
# return abort(404)
# return redirect(url_for('cosmos.task_friendly', ex_name=task.execution.name, stage_name=task.stage.
|
name, task_
|
id=task.id))
# @bprint.route('/execution/<ex_name>/<stage_name>/task/')
# def task(ex_name, stage_name):
# # resource_usage = [(category, field, getattr(task, field), profile_help[field]) for category, fields in
# # task.profile_fields for field in fields]
# assert request.method == 'GET'
# tags = request.args
# ex = session.query(Execution).filter_by(name=ex_name).one()
# stage = session.query(Stage).filter_by(execution=ex, name=stage_name).one()
# task = session.query(Task).filter_by(stage=stage, tags=tags).one()
# if task is None:
# return abort(404)
# resource_usage = [(field, getattr(task, field)) for field in task.profile_fields]
# return render_template('cosmos/task.html', task=task, resource_usage=resource_usage)
@bprint.route('/execution/<ex_name>/<stage_name>/task/<task_id>')
def task(ex_name, stage_name, task_id):
# resource_usage = [(category, field, getattr(task, field), profile_help[field]) for category, fields in
# task.profile_fields for field in fields]
task = session.query(Task).get(task_id)
if task is None:
return abort(404)
resource_usage = [(field, getattr(task, field)) for field in task.profile_fields]
return render_template('cosmos/task.html', task=task, resource_usage=resource_usage)
@bprint.route('/execution/<int:id>/taskgraph/<type>/')
def taskgraph(id, type):
from ..graph.draw import pygraphviz_available
ex = get_execution(id)
if pygraphviz_available:
if type == 'task':
svg = Markup(draw_task_graph(ex.task_graph(), url=True))
else:
svg = Markup(draw_stage_graph(ex.stage_graph(), url=True))
else:
svg = 'Pygraphviz not installed, cannot visualize. (Usually: apt-get install graphviz && pip install pygraphviz)'
return render_template('cosmos/taskgraph.html', execution=ex, type=type,
svg=svg)
# @bprint.route('/execution/<int:id>/taskgraph/svg/<type>/')
# def taskgraph_svg(id, type, ):
# e = get_execution(id)
#
# if type == 'task':
# return send_file(io.BytesIO(taskgraph_.tasks_to_image(e.tasks)), mimetype='image/svg+xml')
# else:
# return send_file(io.BytesIO(stages_to_image(e.stages)), mimetype='image/svg+xml')
#
return bprint
profile_help = dict(
# time
system_time='Amount of time that this process has been scheduled in kernel mode',
user_time='Amount of time that this process has been scheduled in user mode. This includes guest time, guest_time (time spent running a virtual CPU, see below), so that applications that are not aware of the guest time field do not lose that time from their calculations',
cpu_time='system_time + user_time',
wall_time='Elapsed real (wall clock) time used by the process.',
percent_cpu='(cpu_time / wall_time) * 100',
# memory
avg_rss_mem='Average resident set size (Kb)',
max_rss_mem='Maximum resident set size (Kb)',
single_proc_max_peak_rss='Maximum single process rss used (Kb)',
avg_virtual_mem='Average virtual memory used (Kb)',
max_virtual_mem='Maximum virtual memory used (Kb)',
single_proc_max_peak_virtual_mem='Maximum single process virtual memory used (Kb)',
major_page_faults='The number of major faults the process has made which have required loading a memory page from disk',
minor_page_faults='The number of minor faults the process has made which have not required loading a memory page from disk',
avg_data_mem='Average size of data segments (Kb)',
max_data_mem='Maximum size of data segments (Kb)',
avg_lib_mem='Average library memory size (Kb)',
max_lib_mem='Maximum library memory size (Kb)',
avg_locked_mem='Average locked memory size (Kb)',
max_locked_mem='Maximum locked memory size (Kb)',
avg_num_threads='Average number of threads',
max_num_threads='Maximum number of threads',
avg_pte_mem='Average page table entries size (Kb)',
max_pte_mem='Maximum page table entries size (Kb)',
#io
nonvoluntary_context_switches='Number of non voluntary context switches',
voluntary_context_switches='Number of voluntary context switches',
block_io_delays='Aggregated block I/O delays',
avg_fdsize='Average number of file descriptor slots allocated',
max_fdsize='Maximum number of file descriptor slots allocated',
#misc
num_polls='Number of times the resource usage statistics were polled from /proc',
names='Names of all descendnt processes (there is always a python process for the profile_working.py script)',
num_processes='Total number of descendant processes that were spawned',
pids='Pids of all the descendant processes',
exit_status='Exit status of the primary process being profiled',
SC_CLK_TCK='sysconf(_SC_CLK_TCK), an operating system variable that is usually equal to 100, or centiseconds',
)
|
scionrep/scioncc
|
src/ion/process/bootstrap/datastore_loader.py
|
Python
|
bsd-2-clause
| 2,396
| 0.004591
|
#!/usr/bin/env python
"""Process that loads the datastore"""
__author__ = 'Michael Meisinger, Thomas Lennan'
"""
Possible Features
- load objects into different datastores
- load from a directory of YML files in ion-definitions
- load from a ZIP of YMLs
- load an additional directory (not under GIT control)
- change timestamp for resources
- load a subset of objects by type, etc
"""
from pyon.public import CFG, log, ImmediateProcess, iex
from pyon.datastore import datastore_admin
from pyon.core import bootstrap
from pyon.core.bootstrap import get_sys_name
class DatastoreAdmin(ImmediateProcess):
"""
bin/pycc -x ion.process.bootstrap.datastore_loader.DatastoreLoader op=clear prefix=ion
bin/pycc -x ion.process.bootstrap.datastore_loader.DatastoreLoader op=dump path=res/preload/local/my_dump
bin/pycc -fc -x ion.process.bootstrap.datastore_loader.DatastoreLoader op=load path=res/preload/local/my_dump
bin/pycc -x ion.process.bootstrap.datastore_loader.DatastoreLoader op=dumpres
"""
def on_init(self):
pass
def on_start(self):
# print env temporarily to debug cei
import os
log.info('ENV vars: %s' % str(os.environ))
op = self.CFG.get("op", None)
datastore = self.CFG.get("datastore", None)
path = self.CFG.get("path", None)
prefix = self.CFG.get("prefix", get_sy
|
s_name()).lower()
log.info("DatastoreLoader: {op=%s, datastore=%s, path=%s, prefix=%s}" % (op, datastore, path, prefix))
self.da = datastore_admin.DatastoreAdmin()
if op:
if op == "load":
self.da.load_datastore(path, datastore, ignore_errors=False)
elif op == "dump":
self.da.dump_datastore(path, datastore)
elif op == "dumpres":
from ion.util.datastore.r
|
esources import ResourceRegistryHelper
rrh = ResourceRegistryHelper()
rrh.dump_resources_as_xlsx(path)
elif op == "blame":
# TODO make generic
self.da.get_blame_objects()
elif op == "clear":
self.da.clear_datastore(datastore, prefix)
else:
raise iex.BadRequest("Operation unknown")
else:
raise iex.BadRequest("No operation specified")
def on_quit(self):
pass
DatastoreLoader = DatastoreAdmin
|
infobloxopen/infoblox-netmri
|
infoblox_netmri/api/remote/models/if_vlan_remote.py
|
Python
|
apache-2.0
| 4,719
| 0.003179
|
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class IfVlanRemote(RemoteModel):
"""
VLANs that an interface is in along with the STP state of the interface. Also includes SVIs (VlanInterfaceInd = 1) and peer interfaces attached to access ports (VlanExtensionInd = 1).
| ``DataSourceID:`` The internal NetMRI identifier for the collector NetMRI that collected this data record.
| ``attribute type:`` number
| ``DeviceID:`` The internal NetMRI identifier of the device to which the interface belongs.
| ``attribute type:`` number
| ``ifVlanChangedCols:`` The fields that changed between this revision of the record and the previous revision.
| ``attribute type:`` string
| ``ifVlanEndTime:`` The ending effective time of this revision of this record, or empty if still in effect.
| ``attribute type:`` datetime
| ``IfVlanID:`` The internal NetMRI identifier for the interface-in-VLAN record.
| ``attribute type:`` number
| ``ifVlanSource:`` Internal tracking information for NetMRI algorithms.
| ``attribute type:`` string
| ``ifVlanStartTime:`` The starting effective time of this revision of the record.
| ``attribute type:`` datetime
| ``ifVlanTimestamp:`` The date and time this recor
|
d was collected or calculated.
| ``attribute type:`` datetime
| ``InterfaceID:`` The internal NetMRI identifier of the interface participating in the VLAN.
| ``attribute type:`` number
| ``StpPortDesignatedBridge:`` The Spanning Tree Protocol designated bridge address of this interface for this VLAN.
| ``attribute type:`` string
| ``StpPortState:`` The Spa
|
nning Tree Protocol state of this interface for this VLAN.
| ``attribute type:`` string
| ``VlanExtensionInd:`` A flag indicating if this record represents an interface attached to an access port rather than on a participating bridge.
| ``attribute type:`` bool
| ``VlanID:`` The internal NetMRI identifier of the VLAN.
| ``attribute type:`` number
| ``VlanInterfaceInd:`` A flag indicating if this record represents the SVI for the VLAN on this device.
| ``attribute type:`` bool
| ``VlanMemberID:`` The internal NetMRI identifier for the VlanMember record of the device to which the interface belongs, for this VLAN.
| ``attribute type:`` number
"""
properties = ("DataSourceID",
"DeviceID",
"ifVlanChangedCols",
"ifVlanEndTime",
"IfVlanID",
"ifVlanSource",
"ifVlanStartTime",
"ifVlanTimestamp",
"InterfaceID",
"StpPortDesignatedBridge",
"StpPortState",
"VlanExtensionInd",
"VlanID",
"VlanInterfaceInd",
"VlanMemberID",
)
@property
@check_api_availability
def data_source(self):
"""
The NetMRI device that collected this record.
``attribute type:`` model
"""
return self.broker.data_source(**{"IfVlanID": self.IfVlanID})
@property
@check_api_availability
def device(self):
"""
The device to which the interface belongs.
``attribute type:`` model
"""
return self.broker.device(**{"IfVlanID": self.IfVlanID})
@property
@check_api_availability
def interface(self):
"""
The interface participating in the VLAN.
``attribute type:`` model
"""
return self.broker.interface(**{"IfVlanID": self.IfVlanID})
@property
@check_api_availability
def vlan(self):
"""
The VLAN to which this interface VLAN membership belongs.
``attribute type:`` model
"""
return self.broker.vlan(**{"IfVlanID": self.IfVlanID})
@property
@check_api_availability
def vlan_member(self):
"""
The VLAN membership record of the device to which the interface belongs, for this VLAN.
``attribute type:`` model
"""
return self.broker.vlan_member(**{"IfVlanID": self.IfVlanID})
@property
@check_api_availability
def infradevice(self):
"""
The device to which the interface belongs.
``attribute type:`` model
"""
return self.broker.infradevice(**{"IfVlanID": self.IfVlanID})
@property
@check_api_availability
def meta(self):
"""
User custom fields
``attribute type:`` model
"""
return self.broker.meta(**{"IfVlanID": self.IfVlanID})
|
fffy2366/image-processing
|
nudetest.py
|
Python
|
mit
| 7,025
| 0.005275
|
#!bin/evn python
# encoding:utf-8
from __future__ import print_function
import os
import sys
from nude import Nude
# from tests.python.nude import Nude
import time
import cv2
import cv2.cv as cv
from PIL import Image
ROOT = os.path.dirname(os.path.abspath(__file__))
# IMAGE_DIR = "/Users/fengxuting/Downloads/photo/photo_pass/photo_pass/"
IMAGE_DIR = "/Users/fengxuting//python/image-processing/public/uploads/nude/"
# IMAGE_DIR = "D:/photo/photo_pass/"
# IMAGE_DIR = "D:/python/image-processing/public/uploads/nude/"
class NudeTest:
# 人脸识别
def face(self,file):
# Get user supplied values
oriImg = IMAGE_DIR + file
#图像压缩处理
# disImg = IMAGE_DIR +"ocrdis"+file
# newImg = resizeImg(ori_img=oriImg,dst_img=disImg,dst_w=2048,dst_h=2048,save_q=100)
# cascPath = "./data/haarcascades/haarcascade_frontalface_alt.xml"
cascPath = "./data/lbpcascades/lbpcascade_frontalface.xml"
# Create the haar 级联
facecascade = cv2.CascadeClassifier(cascPath)
# Read the image
image = cv2.imread(oriImg)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray, gray) # 直方图均衡化:直方图均衡化是通过拉伸像素强度分布范围来增强图像对比度的一种方法。
gray = cv2.medianBlur(gray, 3) # 降噪?
(height, width, a) = image.shape
# Detect faces in the image
faces = facecascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=2,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
# 1,如果小于0.5%的 不认为头像。2,多个头像的 与最大的对比,如果比值小于50%,不认为是头像。
faces_area = []
face_count = 0
for (x, y, w, h) in faces:
face_area = w * h
# 脸占整个图的比例
face_scale = (face_area) / float(height * width) * 100
# print("name %s,scale %s,x %s,y %s,w %s,h %s,area %s" % (file,face_scale,x,y,w,h,face_area))
# if face_scale<0.5:
# continue
faces_area.append(face_area)
# 显示
# cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
# cv2.imshow("Faces found" ,image)
# cv2.waitKey(0)
# 显示
# cv2.destroyAllWindows()
faces_new = []
if(len(faces_area)>1):
face_max = max(faces_area)
for index,face in enumerate(faces) :
(x, y, w, h) = face
# 脸占最大脸的比例
scale = (w*h)/float(face_max) * 100
# print("sc
|
ale %s" % (scale))
if(scale<50):
# delete(faces,index,axis=0)
pass
else:
faces_new.append(face)
else:
faces_new = faces
return faces_new
# 裁剪人脸以下的图片
def cropImg(self,file,faces):
oriImg = IMAGE_DIR + file
# 裁剪人脸以下最多五倍高度的图片
# ipl_image = cv.LoadImage(or
|
iImg)
ipl_image = Image.open(oriImg)
# print(ipl_image.height)
if (len(faces) < 1):
print("no face")
return faces
(x, y, w, h) = faces[0]
yy = int(y + 1.5*h)
hh = h * 6
(width,height) = ipl_image.size
if (hh > height - y):
hh = height - y
if(yy>=height):
return False
dst = ipl_image.crop((x, yy, x + w, y + hh))
dst.save(IMAGE_DIR + file)
# 以下是cv裁剪图片,y值大于100会报错,不知道为什么
# cv.SetImageROI(ipl_image,(x,y,w,h))
# cv.SetImageROI(ipl_image,(x,100,w,hh))
# dst = cv.CreateImage((w,h),ipl_image.depth,ipl_image.nChannels)
# dst = cv.CreateImage((w,hh),ipl_image.depth,ipl_image.nChannels)
# cv.Copy(ipl_image,dst)
# cv.ResetImageROI(ipl_image)
# cv.SaveImage(IMAGE_DIR + "roi_"+file,dst)
# print(dst)
# cv.ShowImage("Faces except" ,ipl_image)
# cv2.waitKey(0)
# cv.ShowImage("Faces except" ,dst)
# cv2.waitKey(0)
# 图片如果宽或高大于300则等比例压缩
def resizeImg(self,**args):
args_key = {'ori_img': '', 'dst_img': '', 'dst_w': '', 'dst_h': '', 'save_q': 75}
arg = {}
for key in args_key:
if key in args:
arg[key] = args[key]
im = Image.open(arg['ori_img'])
ori_w, ori_h = im.size
widthRatio = heightRatio = None
ratio = 1
if (ori_w and ori_w > arg['dst_w']) or (ori_h and ori_h > arg['dst_h']):
if arg['dst_w'] and ori_w > arg['dst_w']:
widthRatio = float(arg['dst_w']) / ori_w # 正确获取小数的方式
if arg['dst_h'] and ori_h > arg['dst_h']:
heightRatio = float(arg['dst_h']) / ori_h
if widthRatio and heightRatio:
if widthRatio < heightRatio:
ratio = widthRatio
else:
ratio = heightRatio
if widthRatio and not heightRatio:
ratio = widthRatio
if heightRatio and not widthRatio:
ratio = heightRatio
newWidth = int(ori_w * ratio)
newHeight = int(ori_h * ratio)
else:
newWidth = ori_w
newHeight = ori_h
im.resize((newWidth, newHeight), Image.ANTIALIAS).save(arg['dst_img'], quality=arg['save_q'])
return arg['dst_img']
#鉴别黄色图片
def isnude(self,file):
#图像压缩处理
imagePath = IMAGE_DIR + file
nudeImg = IMAGE_DIR +"nude_"+file
# disImg = IMAGE_DIR +file
self.resizeImg(ori_img=imagePath,dst_img=nudeImg,dst_w=300,dst_h=300,save_q=100)
# faces = self.face("dis"+file)
faces = self.face("nude_"+file)
if(len(faces)<1):
print("no face")
return -1
else:
self.cropImg("nude_"+file, faces)
n = Nude(nudeImg)
# n = Nude(newImg)
# n.setFaces(faces)
# n.resize(1000,1000)
n.parse()
print(n.result, n.inspect(), '\n<br/>')
# print n.result
return 1 if n.result else 0
if __name__ == '__main__':
nude_test = NudeTest()
# print (nude_test.isnude("1464318775245A552D29.jpg"))
# print (nude_test.isnude("1464320172441A29C28E.jpg"))
# print (nude_test.isnude("nude_3fcca160-50a5-11e6-8012-33ccd5ab34ad.jpg"))
# print (nude_test.isnude("nude_10b68460-50ad-11e6-8012-33ccd5ab34ad.jpeg"))
# print (nude_test.isnude("nude_fecedc00-50ae-11e6-8012-33ccd5ab34ad.jpg"))
# print (nude_test.isnude("1464319611254AF3E1F7.jpg"))
# print (nude_test.isnude("1464318026880ADE2A0B.jpg"))
print (nude_test.isnude(sys.argv[1]))
|
leobarros/use_cabeca_python
|
nester/setup.py
|
Python
|
apache-2.0
| 341
| 0.061765
|
from distutils.core import setup
setup(
|
name = 'nicknester',
version = '1.3.0',
py_modules = ['nester'],
author = 'htýthon',
author_email = 'hfpython@headfirstlabs.com',
url = 'http://www.headfirstlabs.com',
description = 'A simple printer of nested
|
list',
)
|
FabienPean/sofa
|
applications/plugins/SofaPython/python/SofaPython/console.py
|
Python
|
lgpl-2.1
| 5,349
| 0.014208
|
"""a readline console module (unix only).
maxime.tournier@brain.riken.jp
the module starts a subprocess for the readline console and
communicates through pipes (prompt/cmd).
the console is polled through a timer, which depends on PySide.
"""
from select import select
import os
import sys
import signal
if __name__ == '__main__':
import readline
# prompt input stream
fd_in = int(sys.argv[1])
file_in = os.fdopen( fd_in )
# cmd output stream
fd_out = int(sys.argv[2])
file_out = os.fdopen( fd_out, 'w' )
# some helpers
def send(data):
file_out.write(data + '\n')
file_out.flush()
def recv():
while True:
res = file_in.readline().rstrip('\n')
read, _, _ = select([ file_in ], [], [], 0)
if not read: return res
class History:
"""readline history safe open/close"""
def __init__(self, filename):
self.filename = os.path.expanduser( filename )
def __enter__(self):
try:
readline.read_history_file(self.filename)
# print 'loaded console history from', self.filename
except IOError:
pass
return self
def __exit__(self, type, value, traceback):
readline.write_history_file( self.filename )
def cleanup(*args):
print('console cleanup')
os.system('stty sane')
for sig in [signal.SIGQUIT,
signal.SIGTERM,
signal.SIGILL,
signal.SIGSEGV]:
old = signal.getsignal(sig)
def new(*args):
cleanup()
signal.signal(sig, old)
os.kill(os.getpid(), sig)
signal.signal(sig, new)
# main loop
try:
with History( "~/.sofa-console" ):
print 'console started'
while True:
send( raw_input( recv() ) )
except KeyboardInterrupt:
print 'console exited (SIGINT)'
except EOFError:
ppid = os.getppid()
try:
os.kill(os.getppid(), signal.SIGTERM)
print 'console exited (EOF), terminating parent process'
|
except OSError:
pass
else:
import subprocess
import code
import atexit
_cleanup = None
def _register( c ):
global _cleanup
if _cleanup: _cleanup()
_cleanup = c
class Console(code.InteractiveConsole):
def __init__(self, locals = None, timeout = 100):
"""
python interpreter taking input from console subprocess
scope
|
is provided through 'locals' (usually: locals() or globals())
'timeout' (in milliseconds) sets how often is the console polled.
"""
code.InteractiveConsole.__init__(self, locals)
if timeout >= 0:
def callback():
self.poll()
from PySide import QtCore
self.timer = QtCore.QTimer()
self.timer.timeout.connect( callback )
self.timer.start( timeout )
_register( lambda: self.timer.stop() )
# execute next command, blocks on console input
def next(self):
line = recv()
data = '>>> '
if self.push( line ):
data = '... '
send( data )
# convenience
def poll(self):
if ready(): self.next()
# send prompt to indicate we are ready
def send(data):
prompt_out.write(data + '\n')
prompt_out.flush()
# receive command line
def recv():
res = cmd_in.readline()
if res: return res.rstrip('\n')
return res
# is there any available command ?
def ready():
read, _, _ = select([ cmd_in ], [], [], 0)
return read
# communication pipes
prompt = os.pipe()
cmd = os.pipe()
# subprocess with in/out fd, and forwarding stdin
sub = subprocess.Popen(['python', __file__,
str(prompt[0]), str(cmd[1])],
stdin = sys.stdin)
# open the tubes !
prompt_out = os.fdopen(prompt[1], 'w')
cmd_in = os.fdopen(cmd[0], 'r')
# we're ready
send('>>> ')
# def cleanup(*args):
# print('console cleanup')
# os.system('stty sane')
# def exit(*args):
# print 'exit'
# cleanup()
# sys.exit(0) forces cleanup *from python* before the gui
# closes. otherwise pyside causes segfault on python finalize.
def handler(*args):
sub.terminate()
sub.wait()
sys.exit(0)
from PySide import QtCore
app = QtCore.QCoreApplication.instance()
app.aboutToQuit.connect( handler )
# import atexit
# atexit.register( handler )
# import atexit
# atexit.register( exit )
# for sig in [signal.SIGSEGV, signal.SIGILL]:
# old = signal.getsignal(sig)
# def h(*args):
# print args
# sub.terminate()
# signal.signal(sig, old)
# os.kill(os.getpid(), sig)
# signal.signal(sig, h)
|
google-research/google-research
|
ptopk_patch_selection/ntsnet.py
|
Python
|
apache-2.0
| 20,236
| 0.008747
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""NTS-Net adapted for perturbed top-k.
Based on the original PyTorch code
https://github.com/yangze0930/NTS-Net/blob/master/core/model.py
"""
import enum
import functools
import math
from typing import List, Tuple
from absl import app
from absl import flags
from absl import logging
import chex
from clu import platform
import einops
from flax.deprecated import nn
import jax
import jax.numpy as jnp
import ml_collections
import ml_collections.config_flags as config_flags
from off_the_grid.lib import data
from off_the_grid.lib import models
from off_the_grid.lib import utils
import off_the_grid.lib.classification_utils as classification_lib
from off_the_grid.lib.layers import sample_patches
from off_the_grid.lib.layers import transformer
import optax
import tensorflow as tf
FLAGS = flags.FLAGS
config_fl
|
ags.DEFINE_config_file(
"config", None, "Training configuration.", lock_config=True)
flags.DEFINE_string("workdir", None, "Work unit directory.")
NUM_CLASSES = 200
ANCHORS_SETTINGS = (
dict(
layer="p3",
stride=32,
size=48,
scale=[2**(1. / 3.), 2**(2. / 3.)],
aspect_ratio=[0.667, 1, 1.5]), # Anchors 0-5
dict(
layer="p4",
|
stride=64,
size=96,
scale=[2**(1. / 3.), 2**(2. / 3.)],
aspect_ratio=[0.667, 1, 1.5]), # Anchors 6-11
dict(
layer="p5",
stride=128,
size=192,
scale=[1, 2**(1. / 3.), 2**(2. / 3.)],
aspect_ratio=[0.667, 1, 1.5]), # Anchors 12-20
)
class Communication(str, enum.Enum):
NONE = "none"
SQUEEZE_EXCITE_D = "squeeze_excite_d"
SQUEEZE_EXCITE_X = "squeeze_excite_x"
TRANSFORMER = "transformer"
def zeroone(scores, x_min, x_max):
"""Normalize values to lie between [0, 1]."""
return [(x - x_min) / (x_max - x_min + 1e-5) for x in scores]
class ProposalNet(nn.Module):
"""FPN inspired scorer module."""
def apply(self, x,
communication = Communication.NONE,
train = True):
"""Forward pass."""
batch_size = x.shape[0]
if communication is Communication.SQUEEZE_EXCITE_X:
x = sample_patches.SqueezeExciteLayer(x)
# end if squeeze excite x
d1 = nn.relu(nn.Conv(
x, 128, kernel_size=(3, 3), strides=(1, 1), bias=True, name="down1"))
d2 = nn.relu(nn.Conv(
d1, 128, kernel_size=(3, 3), strides=(2, 2), bias=True, name="down2"))
d3 = nn.relu(nn.Conv(
d2, 128, kernel_size=(3, 3), strides=(2, 2), bias=True, name="down3"))
if communication is Communication.SQUEEZE_EXCITE_D:
d1_flatten = einops.rearrange(d1, "b h w c -> b (h w) c")
d2_flatten = einops.rearrange(d2, "b h w c -> b (h w) c")
d3_flatten = einops.rearrange(d3, "b h w c -> b (h w) c")
nd1 = d1_flatten.shape[1]
nd2 = d2_flatten.shape[1]
d_together = jnp.concatenate([d1_flatten, d2_flatten, d3_flatten], axis=1)
num_channels = d_together.shape[-1]
y = d_together.mean(axis=1)
y = nn.Dense(y, features=num_channels // 4, bias=False)
y = nn.relu(y)
y = nn.Dense(y, features=num_channels, bias=False)
y = nn.sigmoid(y)
d_together = d_together * y[:, None, :]
# split and reshape
d1 = d_together[:, :nd1].reshape(d1.shape)
d2 = d_together[:, nd1:nd1+nd2].reshape(d2.shape)
d3 = d_together[:, nd1+nd2:].reshape(d3.shape)
elif communication is Communication.TRANSFORMER:
d1_flatten = einops.rearrange(d1, "b h w c -> b (h w) c")
d2_flatten = einops.rearrange(d2, "b h w c -> b (h w) c")
d3_flatten = einops.rearrange(d3, "b h w c -> b (h w) c")
nd1 = d1_flatten.shape[1]
nd2 = d2_flatten.shape[1]
d_together = jnp.concatenate([d1_flatten, d2_flatten, d3_flatten], axis=1)
positional_encodings = self.param(
"scale_ratio_position_encodings",
shape=(1,) + d_together.shape[1:],
initializer=jax.nn.initializers.normal(1. / d_together.shape[-1]))
d_together = transformer.Transformer(
d_together + positional_encodings,
num_layers=2,
num_heads=8,
is_training=train)
# split and reshape
d1 = d_together[:, :nd1].reshape(d1.shape)
d2 = d_together[:, nd1:nd1+nd2].reshape(d2.shape)
d3 = d_together[:, nd1+nd2:].reshape(d3.shape)
t1 = nn.Conv(
d1, 6, kernel_size=(1, 1), strides=(1, 1), bias=True, name="tidy1")
t2 = nn.Conv(
d2, 6, kernel_size=(1, 1), strides=(1, 1), bias=True, name="tidy2")
t3 = nn.Conv(
d3, 9, kernel_size=(1, 1), strides=(1, 1), bias=True, name="tidy3")
raw_scores = (jnp.split(t1, 6, axis=-1) +
jnp.split(t2, 6, axis=-1) +
jnp.split(t3, 9, axis=-1))
# The following is for normalization.
t = jnp.concatenate((jnp.reshape(t1, [batch_size, -1]),
jnp.reshape(t2, [batch_size, -1]),
jnp.reshape(t3, [batch_size, -1])), axis=1)
t_min = jnp.reshape(jnp.min(t, axis=-1), [batch_size, 1, 1, 1])
t_max = jnp.reshape(jnp.max(t, axis=-1), [batch_size, 1, 1, 1])
normalized_scores = zeroone(raw_scores, t_min, t_max)
stats = {
"scores": normalized_scores,
"raw_scores": t,
}
# removes the split dimension. scores are now b x h' x w' shaped
normalized_scores = [s.squeeze(-1) for s in normalized_scores]
return normalized_scores, stats
def extract_weighted_patches(x,
weights,
kernel,
stride,
padding):
"""Weighted average of patches using jax.lax.scan."""
logging.info("recompiling for kernel=%s and stride=%s and padding=%s", kernel,
stride, padding)
x = jnp.pad(x, ((0, 0),
(padding[0], padding[0] + kernel[0]),
(padding[1], padding[1] + kernel[1]),
(0, 0)))
batch_size, _, _, channels = x.shape
_, k, weights_h, weights_w = weights.shape
def accumulate_patches(acc, index_i_j):
i, j = index_i_j
patch = jax.lax.dynamic_slice(
x,
(0, i * stride[0], j * stride[1], 0),
(batch_size, kernel[0], kernel[1], channels))
weight = weights[:, :, i, j]
weighted_patch = jnp.einsum("bk, bijc -> bkijc", weight, patch)
acc += weighted_patch
return acc, None
indices = jnp.stack(
jnp.meshgrid(jnp.arange(weights_h), jnp.arange(weights_w), indexing="ij"),
axis=-1)
indices = indices.reshape((-1, 2))
init_patches = jnp.zeros((batch_size, k, kernel[0], kernel[1], channels))
patches, _ = jax.lax.scan(accumulate_patches, init_patches, indices)
return patches
def weighted_anchor_aggregator(x, weights):
"""Given a tensor of weights per anchor computes the weighted average."""
counter = 0
all_sub_aggregates = []
for anchor_info in ANCHORS_SETTINGS:
stride = anchor_info["stride"]
size = anchor_info["size"]
for scale in anchor_info["scale"]:
for aspect_ratio in anchor_info["aspect_ratio"]:
kernel_size = (
int(size * scale / float(aspect_ratio) ** 0.5),
int(size * scale * float(aspect_ratio) ** 0.5))
padding = (
math.ceil((kernel_size[0] - stride) / 2.),
math.ceil((kernel_size[1] - stride) / 2.))
aggregate = extract_weighted_patches(
x, weights[counter], kernel_size, (stride, stride), padding)
aggregate = jnp.reshape(aggregate,
[-1, kernel_size[0], kerne
|
TingPing/plugins
|
XChat/define.py
|
Python
|
mit
| 1,320
| 0.039394
|
import urllib
import ast
import xchat
__module_name__ =
|
"Define"
__module_author__ = "TingPing"
__module_version__ = "2"
__module_description__ = "Show word definitions"
# based on google dictionary script by Sridarshan Shetty - http://srid
|
arshan.co.cc
def define(word, word_eol, userdata):
if len(word) >= 2:
_word = xchat.strip(word[1])
_number = 1
if len(word) >= 3:
_number = int(xchat.strip(word[2]))
else:
xchat.prnt('Define Usage: /define word [number]')
xchat.prnt(' number being alternate definition')
return xchat.EAT_ALL
url="http://www.google.com/dictionary/json?callback=s&q=" + _word + "&sl=en&tl=en&restrict=pr,de&client=te"
obj=urllib.urlopen(url);
content=obj.read()
obj.close()
content=content[2:-10]
dic=ast.literal_eval(content)
if dic.has_key("webDefinitions"):
webdef=dic["webDefinitions"]
webdef=webdef[0]
webdef=webdef["entries"]
index=1
for i in webdef:
if index == _number:
if i["type"]=="meaning":
ans=i["terms"]
op=ans[0]['text']
split=op.split(';')
xchat.prnt(_word + ': ' + split[0].strip())
index+=1
return xchat.EAT_ALL
else:
xchat.prnt('Description unavailable for ' + _word)
return xchat.EAT_ALL
xchat.hook_command("define", define)
xchat.prnt(__module_name__ + ' version ' + __module_version__ + ' loaded.')
|
luzheqi1987/nova-annotation
|
nova/network/security_group/neutron_driver.py
|
Python
|
apache-2.0
| 23,777
| 0
|
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from neutronclient.common import exceptions as n_exc
from neutronclient.neutron import v2_0 as neutronv20
from oslo.config import cfg
from oslo.utils import excutils
import six
from webob import exc
from nova.compute import api as compute_api
from nova import exception
from nova.i18n import _, _LE
from nova.network import neutronv2
from nova.network.security_group import security_group_base
from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# NOTE: Neutron client has a max URL length of 8192, so we have
# to limit the number of IDs we include in any single search. Really
# doesn't seem to be any point in making this a config value.
MAX_SEARCH_IDS = 150
class SecurityGroupAPI(security_group_base.SecurityGroupBase):
id_is_uuid = True
def create_security_group(self, context, name, description):
neutron = neutronv2.get_client(context)
body = self._make_neutron_security_group_dict(name, description)
try:
security_group = neutron.create_security_group(
body).get('security_group')
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
LOG.exception(_("Neutron Error creating security group %s"),
name)
if e.status_code == 401:
# TODO(arosen) Cannot raise generic response from neutron here
# as this error code could be related to bad input or over
# quota
raise exc.HTTPBadRequest()
elif e.status_code == 409:
self.raise_over_quota(six.text_type(e))
raise exc_info[0], exc_info[1], exc_info[2]
return self._convert_to_nova_security_group_format(security_group)
def update_security_group(self, context, security_group,
name, description):
neutron = neutronv2.get_client(context)
body = self._make_neutron_security_group_dict(name, description)
try:
security_group = neutron.update_security_group(
security_group['id'], body).get('security_group')
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
|
LOG.exception(_("Neutron Error updating security group %s"),
name)
if e.status_code == 401:
# TODO(arosen) Cannot raise generic response from neutron here
|
# as this error code could be related to bad input or over
# quota
raise exc.HTTPBadRequest()
raise exc_info[0], exc_info[1], exc_info[2]
return self._convert_to_nova_security_group_format(security_group)
def _convert_to_nova_security_group_format(self, security_group):
nova_group = {}
nova_group['id'] = security_group['id']
nova_group['description'] = security_group['description']
nova_group['name'] = security_group['name']
nova_group['project_id'] = security_group['tenant_id']
nova_group['rules'] = []
for rule in security_group.get('security_group_rules', []):
if rule['direction'] == 'ingress':
nova_group['rules'].append(
self._convert_to_nova_security_group_rule_format(rule))
return nova_group
def _convert_to_nova_security_group_rule_format(self, rule):
nova_rule = {}
nova_rule['id'] = rule['id']
nova_rule['parent_group_id'] = rule['security_group_id']
nova_rule['protocol'] = rule['protocol']
if (nova_rule['protocol'] and rule.get('port_range_min') is None and
rule.get('port_range_max') is None):
if rule['protocol'].upper() in ['TCP', 'UDP']:
nova_rule['from_port'] = 1
nova_rule['to_port'] = 65535
else:
nova_rule['from_port'] = -1
nova_rule['to_port'] = -1
else:
nova_rule['from_port'] = rule.get('port_range_min')
nova_rule['to_port'] = rule.get('port_range_max')
nova_rule['group_id'] = rule['remote_group_id']
nova_rule['cidr'] = self.parse_cidr(rule.get('remote_ip_prefix'))
return nova_rule
def get(self, context, name=None, id=None, map_exception=False):
neutron = neutronv2.get_client(context)
try:
if not id and name:
# NOTE(flwang): The project id should be honoured so as to get
# the correct security group id when user(with admin role but
# non-admin project) try to query by name, so as to avoid
# getting more than duplicated records with the same name.
id = neutronv20.find_resourceid_by_name_or_id(
neutron, 'security_group', name, context.project_id)
group = neutron.show_security_group(id).get('security_group')
except n_exc.NeutronClientNoUniqueMatch as e:
raise exception.NoUniqueMatch(six.text_type(e))
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
LOG.debug("Neutron security group %s not found", name)
self.raise_not_found(six.text_type(e))
else:
LOG.error(_LE("Neutron Error: %s"), e)
raise exc_info[0], exc_info[1], exc_info[2]
return self._convert_to_nova_security_group_format(group)
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
"""Returns list of security group rules owned by tenant."""
neutron = neutronv2.get_client(context)
search_opts = {}
if names:
search_opts['name'] = names
if ids:
search_opts['id'] = ids
if project:
search_opts['tenant_id'] = project
try:
security_groups = neutron.list_security_groups(**search_opts).get(
'security_groups')
except n_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Neutron Error getting security groups"))
converted_rules = []
for security_group in security_groups:
converted_rules.append(
self._convert_to_nova_security_group_format(security_group))
return converted_rules
def validate_id(self, id):
if not uuidutils.is_uuid_like(id):
msg = _("Security group id should be uuid")
self.raise_invalid_property(msg)
return id
def destroy(self, context, security_group):
"""This function deletes a security group."""
neutron = neutronv2.get_client(context)
try:
neutron.delete_security_group(security_group['id'])
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
self.raise_not_found(six.text_type(e))
elif e.status_code == 409:
self.raise_invalid_property(six.text_type(e))
else:
LOG.error(_LE("Neutron Error: %s"), e)
raise exc_info[0], exc_info[1], exc_info[2]
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't
|
emgreen33/easy_bake
|
oven.py
|
Python
|
mit
| 583
| 0.013722
|
import RPi.GPIO as gpio
from datetime import datetime
import time
import controller
gpio.setmode(gpio.BOARD)
# switch_pins = [10, 40, 38]
switch = 10
gpio.setup(switch, gpio.OUT, initial=False)
# gpio.setup(switch_pins, gpio.OUT, initial=False)
def switch_on():
|
gpio.output(switch, True)
print "Oven switched ON at " + str(datetime.now())
def switch_off():
gpio.output(switch, False)
print "Oven switched OFF at " + s
|
tr(datetime.now())
def status():
return gpio.input(switch)
switch_on()
print status()
time.sleep(3)
switch_off()
print status()
# gpio.cleanup()
|
robertodr/kleisli
|
setup.py
|
Python
|
lgpl-2.1
| 2,121
| 0.002357
|
#!/usr/bin/env python
# This file is autogenerated by Autocmake
# Copyright (c) 2015 by Radovan Bast and Jonas Juselius
# See https://github.com/scisoft/autocmake/
import os
import sys
sys.path.append('cmake/lib')
from config import configure
import docopt
options = """
Usage:
./setup.py [options] [<builddir>]
./setup.py (-h | --help)
Options:
--cxx=<CXX> C++ compiler [default: g++].
--extra-cxx-flags=<EXTRA_CXXFLAGS> Extra C++ compiler flags [default: ''].
--coverage Enable code coverage [default: False].
--type=<TYPE> Set the CMake build type (debug, release, or relwithdeb) [default: release].
--generator=<STRING> Set the CMake build system generator [default: Unix Makefiles].
--show Show CMake command and exit.
--cmake-options=<OPTIONS> Define options to CMake [default: None].
<builddir> Build directory.
-h --help Show this screen.
"""
def gen_cmake_command(options, arguments):
"""
Generate CMake
|
command based on options and arguments.
"""
command = []
command.append('CXX=%s' % arguments['--cxx'])
command.append('cmake')
command.append('-DEXTRA_CXXFLAGS="%s"' % arguments['--extra-cxx-flags'])
command.append('-DENABLE_CODE_COVERAGE=%s' % arguments['--coverage'])
command.append('-DCMAKE_BUILD_
|
TYPE=%s' % arguments['--type'])
command.append('-G "%s"' % arguments['--generator'])
if(arguments['--cmake-options']):
command.append('%s' % arguments['--cmake-options'])
return ' '.join(command)
try:
arguments = docopt.docopt(options, argv=None)
except docopt.DocoptExit:
sys.stderr.write('ERROR: bad input to %s\n' % sys.argv[0])
sys.stderr.write(options)
sys.exit(-1)
root_directory = os.path.dirname(os.path.realpath(__file__))
build_path = arguments['<builddir>']
cmake_command = '%s %s' % (gen_cmake_command(options, arguments), root_directory)
configure(root_directory, build_path, cmake_command, arguments['--show'])
|
esetomo/mio
|
script/pymio/mmd/pmd/pmd_model.py
|
Python
|
gpl-3.0
| 28,845
| 0.009256
|
import copy
import math
from pymio.material.material_list import MaterialList
from pymio.material.phong_material import PhongMaterial
from pymio.material.textured_phong_material import TexturedPhongMaterial
from pymio.material.by_polygon_material_mapping import ByPolygonMaterialMapping
from pymio.geometry.indexed_mesh import IndexedMesh
from pymio.texture.indexed_texture_mapping import IndexedTextureMapping
from pymio.texture.lazy_il_texture import LazyILTexture
from pymio.rigging.armature import Armature
from pymio.rigging.joint import Joint
from pymio.rigging.pose import Pose
from pymio.rigging.joint_change import JointChange
from pymio.ik.ik_armature import IKArmature
from pymio.ik.ik_joint_parameters import IKJointParameters
from pymio.ik.ik_end_effector import IKEndEffector
from pymio.ik.ik_joint import IKJoint
from pymio.ik.ik_pose import IKPose
from pymio.skinning.indexed_joint_weight_mapping import IndexedJointWeightMapping
from pymio.scenegraph.rigged_primitive import RiggedPrimitive
from pymio import vector3
from pymio.vector3 import Vector3
from pymio import vector3
from pymio.point3 import Point3
from pymio.rgba import Rgba
from pymio.uv import Uv
from pymio.quaternion import Quaternion
from pymio import quaternion
from pymio.matrix4x4 import Matrix4x4
from pymio import matrix4x4
from bone import Bone
from expression import Expression
def coordinate_system(v1):
v1 = vector3.normalize(v1)
if abs(v1.x) > abs(v1.y):
inv_len = 1.0 / math.sqrt(v1.x**2 + v1.z**2)
v2 = Vector3(-v1.z * inv_len, 0.0, v1.x * inv_len)
else:
inv_len = 1.0 / math.sqrt(v1.y**2 + v1.z**2)
v2 = Vector3(0.0, v1.z * inv_len, -v1.y * inv_len)
v3 = vector3.cross(v1,v2)
return (v1, v2, v3)
class PMDModel(object):
"""
Represent the data stored in a .pmd file.
"""
def __init__(self):
"""
Create an empty instance of C{PMDModel}.
"""
self.vertices = []
self.bones = []
self.ik_chains = []
self.expressions = []
self.base_expression = Expression("base")
self.triangle_count = 0
self.materials = []
self.triangle_vertex_indices = []
self.bones_by_name = {}
def append_vertex(self, vertex):
"""
Add a vertex data item.
@param vertex: a vertex data item
@type vertex: L{pymio.mmd.pmd.vertex.Vertex}
"""
self.vertices.append(vertex)
def append_bone(self, bone):
"""
Add a bone data item.
@param bone: a bone data item
@type bone: L{pymio.mmd.pmd.bone.Bone}
"""
self.bones.append(bone)
bone.index = len(self.bones)-1
self.bones_by_name[bone.name] = bone
def get_bone_by_name(self, name):
"""
Return a bone with the given name.
@param name: a bone's name
@type name: string
"""
return self.bones_by_name[name]
def append_expression(self, expression):
self.expressions.append(expression)
def append_material(self, material):
self.materials.append(material)
def append_triangle_vertex_index(self, index):
self.triangle_vertex_indices.append(index)
def append_ik_chain(self, ik_chain):
self.ik_chains.append(ik_chain)
def get_ik_chain_by_ik_bone_index(self, ik_bone_index):
for ik_chain in self.ik_chains:
if ik_chain.ik_bone_index == ik_bone_index:
return ik_chain
raise KeyError("cannot find IK chain represented with a bone with the given index")
def get_ik_chain_by_ik_bone_name(self, ik_bone_name):
for ik_chain in self.ik_chains:
if self.bones[ik_chain.ik_bone_index].name == ik_bone_name:
return ik_chain
raise KeyError("cannot find IK chain represented with a bone with the given index")
def get_material_list(self):
material_list = MaterialList()
for material in self.materials:
if len(material.texture_filename) > 0:
texture = LazyILTexture(material.texture_filename)
pymio_material = TexturedPhongMaterial(material.ambient,
material.diffuse,
Rgba(0,0,0,0),
material.specular,
material.shininess,
texture)
else:
pymio_material = PhongMaterial(material.ambient,
material.diffuse,
Rgba(0,0,0,0),
material.specular,
material.shininess)
material_list.append_material(pymio_material)
return material_list
def get_rest_mesh(self):
mesh = IndexedMesh()
for vertex in self.vertices:
mesh.append_vertex(Point3(vertex.position.x, vertex.position.y, -vertex.position.z))
mesh.append_normal(Vector3(vertex.normal.x, vertex.normal.y, -vertex.normal.z))
#mesh.append_vertex(Point3(vertex.position.x, vertex.position.y, vertex.position.z))
#mesh.append_normal(Vector3(vertex.normal.x, vertex.normal.y, vertex.normal.z))
for i in xrange(self.triangle_count):
v0 = self.triangle_vertex_indices[3*i]
v1 = self.triangle_vertex_indices[3*i+1]
v2 = self.triangle_vertex_indices[3*i+2]
mesh.append_new_polygon()
mesh.append_vertex_normal_index_to_last_polygon(v2, v2)
mesh.append_vertex_normal_index_to_last_polygon(v1, v1)
mesh.append_vertex_normal_index_to_last_polygon(v0, v0)
return mesh
def get_texture_mappin
|
g(self):
mapping = IndexedTextureMapping()
for vertex in self.vertices:
mapping.append_tex_coord(Uv(vertex.tex_coord.u, 1.0 - vertex.tex_coord.v))
for i in xrange(self.triangle_
|
count):
v0 = self.triangle_vertex_indices[3*i]
v1 = self.triangle_vertex_indices[3*i+1]
v2 = self.triangle_vertex_indices[3*i+2]
mapping.append_new_polgon()
mapping.append_tex_coord_index_to_last_polygon(v2)
mapping.append_tex_coord_index_to_last_polygon(v1)
mapping.append_tex_coord_index_to_last_polygon(v0)
return mapping
def get_material_mapping(self):
mapping = ByPolygonMaterialMapping()
for material_index, material in enumerate(self.materials):
for i in xrange(material.triangle_count):
mapping.append_mapping(material_index)
return mapping
def get_rest_armature(self):
armature = Armature()
necessary = [False for i in xrange(len(self.bones))]
for ik_chain in self.ik_chains:
for index in ik_chain.affected_bone_indices:
necessary[index] = True
necessary[ik_chain.end_effector_index] = True
for vertex in self.vertices:
necessary[vertex.bone0_number] = True
necessary[vertex.bone1_number] = True
for bone_index, bone in enumerate(self.bones):
if (necessary[bone_index] or
(bone.bone_type in [Bone.BONE_ROTATE,
Bone.BONE_ROTATE_TRANSLATE,
Bone.BONE_IK_ROTATION_INFLUENCED,
Bone.BONE_IK_TARGET,
Bone.BONE_ROTATION_INFLUENCED])):
|
y-zeng/grpc
|
tools/run_tests/run_tests.py
|
Python
|
bsd-3-clause
| 49,895
| 0.009941
|
#!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run tests in parallel."""
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
from six.moves import urllib
import uuid
import jobset
import report_utils
import watch_dirs
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
'linux': ['epoll', 'poll', 'legacy']
|
}
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(self, config, environ=None, timeout_multiplier=1, tool_prefix=[]):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_mu
|
ltiplier
def job_spec(self, cmdline, timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None, environ={}, cpu_cost=1.0, flaky=False):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds if timeout_seconds else None),
flake_retries=5 if flaky or args.allow_flakes else 0,
timeout_retries=3 if args.allow_flakes else 0)
def get_c_tests(travis, test_lang) :
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/tests.json') as f:
js = json.load(f)
return [tgt
for tgt in js
if tgt['language'] == test_lang and
platform_string() in tgt[platforms_str] and
not (travis and tgt['flaky'])]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception('Compiler %s not supported (on this platform).' % compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
"""Returns True if running running as a --use_docker child."""
return True if os.getenv('RUN_TESTS_COMMAND') else False
_PythonConfigVars = collections.namedtuple(
'_ConfigVars', ['shell', 'builder', 'builder_prefix_arguments',
'venv_relative_python', 'toolchain', 'runner'])
def _python_config_generator(name, major, minor, bits, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_python_pattern_function(major=major, minor=minor, bits=bits)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_pypy_pattern_function(major=major)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return '/c/Python{major}{minor}/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == '2':
return 'pypy'
elif major == '3':
return 'pypy3'
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
self._make_options = [_windows_toolset_option(self.args.compiler),
_windows_arch_option(self.args.arch)]
else:
self._docker_distro, self._make_options = self._compiler_options(self.args.use_docker,
self.args.compiler)
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
polling_strategies = (_POLLING_STRATEGIES.get(self.platform, ['all'])
if target.get('uses_polling', True)
else ['all'])
for polling_strategy in polling_strategies:
env={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/lib/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY': polling_strategy,
'GRPC_VERBOSITY': 'DEBUG'}
shortname_ext = '' if polling_strategy=='all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
if self.config.build_config in target['exclude_configs']:
continue
if self.platform == 'windows':
binary = 'vsprojects/%s%s/%s.exe' % (
'x64/' if self.args.arch == 'x64' else '',
_MSBUILD_CONFIG[self.config.build_config],
target['name'])
else:
binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
if os.path.isfile(binary):
if 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a
# complete list of the tests contained in a binary
# for each test, we then add a job to run, filtering for just that
# test
|
the-gigi/conman
|
conman/conman_etcd.py
|
Python
|
mit
| 3,240
| 0
|
"""A configuration management class built on top of etcd
See: http://python-etcd.readthedocs.org/
It provides a read-only access and just exposes a nested dict
"""
import functools
import time
import etcd3
from conman.conman_base import ConManBase
def thrice(delay=0.5):
"""This decorator tries failed operations 3 times before it gives up
The delay determines how long to wait between tries (in seconds)
"""
def decorated(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
for i in range(3):
try:
return f(*args, **kwargs)
except Exception:
if i == 2:
raise
time.sleep(delay)
return wrapped
return decorated
class ConManEtcd(ConManBase):
def __init__(self,
host='127.0.0.1',
port=2379,
ca_cert=None,
cert_key=None,
cert_cert=None,
timeout=None,
user=None,
|
password=None,
grpc_options=None,
on_change=lambda e: None):
ConManBase.__init__(self)
self.on_change = on_change
self.client = etcd3.client(
host=host,
port=port,
ca_cert=ca_cert,
cert_key=cert_key,
cert_cert=cert_cert,
timeout=timeout,
|
user=user,
password=password,
grpc_options=grpc_options,
)
def _add_key_recursively(self, etcd_result):
ok = False
target = self._conf
for x in etcd_result:
ok = True
value = x[0].decode()
key = x[1].key.decode()
components = key.split('/')
t = target
for c in components[:-1]:
if c not in t:
t[c] = {}
t = t[c]
t[components[-1]] = value
if not ok:
raise Exception('Empty result')
def watch(self, key):
watch_id = self.client.add_watch_callback(key, self.on_change)
return watch_id
def watch_prefix(self, key):
return self.client.watch_prefix(key)
def cancel(self, watch_id):
self.client.cancel_watch(watch_id)
def add_key(self, key, watch=False):
"""Add a key to managed etcd keys and store its data
:param str key: the etcd path
:param bool watch: determine if need to watch the key
When a key is added all its data is stored as a dict
"""
etcd_result = self.client.get_prefix(key, sort_order='ascend')
self._add_key_recursively(etcd_result)
if watch:
self.watch(key)
def refresh(self, key=None):
"""Refresh an existing key or all keys
:param key: the key to refresh (if None refresh all keys)
If the key parameter doesn't exist an exception will be raised.
No need to watch again the conf keys.
"""
keys = [key] if key else self._conf.keys()
for k in keys:
if k in self._conf:
del self._conf[k]
self.add_key(k, watch=False)
|
dayatz/taiga-back
|
taiga/projects/userstories/api.py
|
Python
|
agpl-3.0
| 18,791
| 0.002289
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import apps
from django.db import transaction
from django.utils.translation import ugettext as _
from django.http import HttpResponse
from taiga.base import filters as base_filters
from taiga.base import exceptions as exc
from taiga.base import response
from taiga.base import status
from taiga.base.decorators import list_route
from taiga.base.api.mixins import BlockedByProjectMixin
from taiga.base.api import ModelCrudViewSet
from taiga.base.api import ModelListViewSet
from taiga.base.api.utils import get_object_or_404
from taiga.base.utils import json
from taiga.projects.history.mixins import HistoryResourceMixin
from taiga.projects.history.services import take_snapshot
from taiga.projects.milestones.models import Milestone
from taiga.projects.mixins.by_ref import ByRefMixin
from taiga.projects.models import Project, UserStoryStatus
from taiga.projects.notifications.mixins import WatchedResourceMixin
from taiga.projects.notifications.mixins import WatchersViewSetMixin
from taiga.projects.occ import OCCResourceMixin
from taiga.projects.tagging.api import TaggedResourceMixin
from taiga.projects.votes.mixins.viewsets import VotedResourceMixin
from taiga.projects.votes.mixins.viewsets import VotersViewSetMixin
from taiga.projects.userstories.utils import attach_extra_info
from . import filters
from . import models
from . import permissions
from . import serializers
from . import services
from . import validators
class UserStoryViewSet(OCCResourceMixin, VotedResourceMixin, HistoryResourceMixin, WatchedResourceMixin,
ByRefMixin, TaggedResourceMixin, BlockedByProjectMixin, ModelCrudViewSet):
validator_class = validators.UserStoryValidator
queryset = models.UserStory.objects.all()
permission_classes = (permissions.UserStoryPermission,)
filter_backends = (base_filters.CanViewUsFilterBackend,
filters.EpicFilter,
base_filters.OwnersFilter,
base_filters.AssignedToFilter,
base_filters.StatusesFilter,
base_filters.TagsFilter,
base_filters.WatchersFilter,
base_filters.QFilter,
base_filters.CreatedDateFilter,
base_filters.ModifiedDateFilter,
base_filters.FinishDateFilter,
base_filters.MilestoneEstimatedStartFilter,
base_filters.MilestoneEstimatedFinishFilter,
base_filters.OrderByFilterMixin)
filter_fields = ["project",
"project__slug",
"milestone",
"milestone__isnull",
"is_closed",
"status__is_archived",
"status__is_closed"]
order_by_fields = ["backlog_order",
"sprint_order",
"kanban_order",
"epic_order",
"total_voters"]
def get_serializer_class(self, *args, **kwargs):
if self.action in ["retrieve", "by_ref"]:
return serializers.UserStoryNeighborsSerializer
if self.action == "list":
return serializers.UserStoryListSerializer
return serializers.UserStorySerializer
def get_queryset(self):
qs = super().get_queryset()
qs = qs.select_related("milestone",
"project",
"status",
"owner",
"assigned_to",
"generated_from_issue")
include_attachments = "include_attachments" in self.request.QUERY_PARAMS
include_tasks = "include_tasks" in self.request.QUERY_PARAMS
epic_id = self.request.QUERY_PARAMS.get("epic", None)
# We can be filtering by more than one epic so epic_id can consist
# of different ids separete by comma. In that situation we will use
# only the first
if epic_id is not None:
epic_id = epic_id.split(",")[0]
qs = attach_extra_info(qs, user=self.request.user,
include_attachments=include_attachments,
include_tasks=include_tasks,
epic_id=epic_id)
return qs
def pre_conditions_on_save(self, obj):
super().pre_conditions_on_save(obj)
if obj.milestone and obj.milestone.project != obj.project:
raise exc.PermissionDenied(_("You don't have permissions to set this sprint "
|
"to this user story."))
if obj.status and obj.status.project != obj.project:
raise exc.PermissionDenied(_("You don't have permissions to set this status "
"to this user story."))
"""
|
Updating some attributes of the userstory can affect the ordering in the backlog, kanban or taskboard
These three methods generate a key for the user story and can be used to be compared before and after
saving
If there is any difference it means an extra ordering update must be done
"""
def _backlog_order_key(self, obj):
return "{}-{}".format(obj.project_id, obj.backlog_order)
def _kanban_order_key(self, obj):
return "{}-{}-{}".format(obj.project_id, obj.status_id, obj.kanban_order)
def _sprint_order_key(self, obj):
return "{}-{}-{}".format(obj.project_id, obj.milestone_id, obj.sprint_order)
def pre_save(self, obj):
# This is very ugly hack, but having
# restframework is the only way to do it.
#
# NOTE: code moved as is from serializer
# to api because is not serializer logic.
related_data = getattr(obj, "_related_data", {})
self._role_points = related_data.pop("role_points", None)
if not obj.id:
obj.owner = self.request.user
else:
self._old_backlog_order_key = self._backlog_order_key(self.get_object())
self._old_kanban_order_key = self._kanban_order_key(self.get_object())
self._old_sprint_order_key = self._sprint_order_key(self.get_object())
super().pre_save(obj)
def _reorder_if_needed(self, obj, old_order_key, order_key, order_attr,
project, status=None, milestone=None):
# Executes the extra ordering if there is a difference in the ordering keys
if old_order_key != order_key:
extra_orders = json.loads(self.request.META.get("HTTP_SET_ORDERS", "{}"))
data = [{"us_id": obj.id, "order": getattr(obj, order_attr)}]
for id, order in extra_orders.items():
data.append({"us_id": int(id), "order": order})
return services.update_userstories_order_in_bulk(data,
order_attr,
project,
status=status,
milestone=milestone)
return {}
|
id2669099/turbo-waffle
|
task_07_02.py
|
Python
|
mit
| 249
| 0
|
import ra
|
ndom
from string import digits, ascii_letters, punctuation
def password_generator(length):
while True:
values = list(digits + ascii_letters + punctuation)
yield ''.join([random.choice(values) for i in range(length)]
|
)
|
cdapio/cdap-ambari-service
|
src/main/resources/common-services/CDAP/6.0.0/package/scripts/master.py
|
Python
|
apache-2.0
| 4,572
| 0.00175
|
# coding=utf8
# Copyright © 2015-2017 Cask Data, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
import ambari_helpers as helpers
from resource_management import *
class Master(Script):
def install(self, env):
print('Install the CDAP Master')
import params
# Add repository file
helpers.add_repo(
params.files_dir + params.repo_file,
params.os_repo_dir
)
# Install any global packages
self.install_packages(env)
# Workaround for CDAP-3961
helpers.package('cdap-hbase-compat-1.1')
# Install package
helpers.package('cdap-master')
self.configure(env)
def start(self, env, upgrade_type=None):
print('Start the CDAP Master')
import params
import status_params
env.set_params(params)
self.configure(env)
helpers.create_hdfs_dir(params.hdfs_namespace, params.cdap_hdfs_user, 775)
# Create user's HDFS home
helpers.create_hdfs_dir('/user/' + params.cdap_user, params.cdap_user, 775)
if params.cdap_hdfs_user != params.cdap_user:
helpers.create_hdfs_dir('/user/' + params.cdap_hdfs_user, params.cdap_hdfs_user, 775)
# Hack to work around CDAP-1967
self.remove_jackson(env)
daemon_cmd = format('/opt/cdap/master/bin/cdap master start')
no_op_test = format('ls {status_params.cdap_master_pid_file} >/dev/null 2>&1 && ps -p $(<{status_params.cdap_master_pid_file}) >/dev/null 2>&1')
Execute(
daemon_cmd,
user=params.cdap_user,
not_if=no_op_test
)
def stop(self, env, upgrade_type=None):
print('Stop the CDAP Master')
import status_params
daemon_cmd = format('service cdap-master stop')
no_op_test = format('ls {status_params.cdap_master_pid_file} >/dev/null 2>&1 && ps -p $(<{status_params.cdap_master_pid_file}) >/dev/null 2>&1')
Execute(
daemon_cmd,
only_if=no_op_test
)
def status(self, env):
import status_params
check_process_status(status_params.cdap_master_pid_file)
def confi
|
gure(self, env):
print('Configure the CDAP Master')
import params
env.set_params(params)
helpers.cdap_config('master')
def upgrade(self, env):
self.run_class(
env,
classname='io.cdap.cdap.data.tools.UpgradeTool',
label='CDAP Upgrade Tool',
arguments
|
='upgrade force'
)
def upgrade_hbase(self, env):
self.run_class(
env,
classname='io.cdap.cdap.data.tools.UpgradeTool',
label='CDAP HBase Coprocessor Upgrade Tool',
arguments='upgrade_hbase force'
)
def postupgrade(self, env):
self.run_class(
env,
classname='io.cdap.cdap.data.tools.flow.FlowQueuePendingCorrector',
label='CDAP Post-Upgrade Tool'
)
def queue_debugger(self, env):
self.run_class(
env,
classname='io.cdap.cdap.data.tools.SimpleHBaseQueueDebugger',
label='CDAP Queue Debugger Tool'
)
def jobqueue_debugger(self, env):
self.run_class(
env,
classname='io.cdap.cdap.data.tools.JobQueueDebugger',
label='CDAP Job Queue Debugger Tool'
)
def run_class(self, env, classname, label=None, arguments=''):
if label is None:
label = classname
print('Running: ' + label)
import params
cmd = format("/opt/cdap/master/bin/cdap run %s %s" % (classname, arguments))
Execute(
cmd,
user=params.cdap_user
)
def remove_jackson(self, env):
jackson_check = format('ls -1 /opt/cdap/master/lib/org.codehaus.jackson* 2>/dev/null')
Execute(
'rm -f /opt/cdap/master/lib/org.codehaus.jackson.jackson-*',
not_if=jackson_check
)
if __name__ == "__main__":
Master().execute()
|
JudaismBot/JudaismBot
|
scrapers/mechonMamre.py
|
Python
|
mit
| 2,147
| 0.014905
|
from bs4 import BeautifulSoup as Soup
import json
import re
import requests
from common import *
from nltk.corpus import words
entries = []
WEBSITE = 'http://www.mechon-mamre.org/jewfaq/glossary.htm'
SITE_TITLE = "Mechon Mamre"
source_object = {"site":WEBSITE, "title":SITE_TITLE}
def main():
parseMechonMamre()
def parseMechonMamre():
response = requests.get(WEBSITE)
page = Soup(response.content, "lxml")
stack = [page]
while(len(stack)>0):
node = stack.pop()
for child in node.contents:
if child.name == 'dl':
parseList(child)
elif child.name:
stack.append(child)
print("Done")
def parseList(node):
entry = {"language":"Hebrew", "english":[]}
entryDone = False
foundTerm = False
for line in [line for line in node.contents if line.name or len(line.strip())>0]:
if line.name == "dt":
parseTerm(entry, line.text)
else:
breaklineCount = 0
if entryDone:
if len(entry["english"])>0 and not entry["english"][0].endswith("CSULB") and not entry["english"][0].startswith("email"):
addEntry(entry)
entry = {"language":"Yiddish", "english":[]}
entryDone = False
foundTerm = False
if not foundTerm:
split = line.split(":", 1)
term = split[0]
foundTerm = True
for t in term.split("/"):
entry["english"].append(t.strip().lower())
if len(split) > 1:
entry["definition"] = {"text":split[1].strip(),"source":source_object}
else:
pass
else:
if "d
|
efinition" in entry:
entry["definition"]["text"] += " "+line.strip()
else:
entry["definition"] = {"text":line.strip(),"source":source_object}
def parseTerm(entry, term):
if(term.startswith("Kohein")):
pass
else:
|
matches = re.findall("([a-zA-Z-'\d][a-zA-Z- '\d]+)(?: \(([^;\)]*)(;[^;\)]*)*\))?(;|$)",term)
return matches[0][0]
if __name__ == '__main__':
main()
|
atiqueahmedziad/addons-server
|
src/olympia/amo/middleware.py
|
Python
|
bsd-3-clause
| 12,302
| 0
|
import contextlib
import re
import socket
import urllib
import uuid
from django.conf import settings
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.contrib.auth.models import AnonymousUser
from django.contrib.sessions.middleware import SessionMiddleware
from django.db import transaction
from django.urls import is_valid_path
from django.http import (
HttpResponsePermanentRedirect, HttpResponseRedirect,
JsonResponse)
from django.middleware import common
from django.utils.cache import patch_cache_control, patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
from django.utils.encoding import force_bytes, iri_to_uri
from django.utils.translation import activate, ugettext_lazy as _
from rest_framework import permissions
import MySQLdb as mysql
from corsheaders.middleware import CorsMiddleware as _CorsMiddleware
from olympia import amo
from olympia.amo.utils import render
from . import urlresolvers
from .templatetags.jinja_helpers import urlparams
auth_path = re.compile('%saccounts/authenticate/?$' % settings.DRF_API_REGEX)
class LocaleAndAppURLMiddleware(MiddlewareMixin):
"""
1. search for locale first
2. see if there are acceptable apps
3. save those matched parameters in the request
4. strip them from the URL so we can do stuff
"""
def process_request(self, request):
# Find locale, app
prefixer = urlresolvers.Prefixer(request)
if settings.DEBUG:
redirect_type = HttpResponseRedirect
else:
redirect_type = HttpResponsePermanentRedirect
urlresolvers.set_url_prefix(prefixer)
full_path = prefixer.fix(prefixer.shortened_path)
if (prefixer.app == amo.MOBILE.short and
request.path.rstrip('/').endswith('/' + amo.MOBILE.short)):
return redirect_type(request.path.replace('/mobile', '/android'))
if ('lang' in request.GET and not re.match(
settings.SUPPORTED_NONAPPS_NONLOCALES_REGEX,
prefixer.shortened_path)):
# Blank out the locale so that we can set a new one. Remove lang
# from query params so we don't have an infinite loop.
prefixer.locale = ''
new_path = prefixer.fix(prefixer.shortened_path)
query = dict((force_bytes(k), request.GET[k]) for k in request.GET)
query.pop('lang')
return redirect_type(urlparams(new_path, **query))
if full_path != request.path:
query_string = request.META.get('QUERY_STRING', '')
full_path = urllib.quote(full_path.encode('utf-8'))
if query_string:
query_string = query_string.decode('utf-8', 'ignore')
full_path = u'%s?%s' % (full_path, query_string)
response = redirect_type(full_path)
# Cache the redirect for a year.
if not settings.DEBUG:
patch_cache_control(response, max_age=60 * 60 * 24 * 365)
# Vary on Accept-Language or User-Agent if we changed the locale or
# app.
old_app = prefixer.app
old_locale = prefixer.locale
new_locale, new_app, _ = prefixer.split_path(full_path)
if old_locale != new_locale:
patch_vary_headers(response, ['Accept-Language'])
if old_app != new_app:
patch_vary_headers(response, ['User-Agent'])
return response
request.path_info = '/' + prefixer.shortened_path
request.LANG = prefixer.locale or prefixer.get_language()
activate(request.LANG)
request.APP = amo.APPS.get(prefixer.app, amo.FIREFOX)
# Match legacy api requests too - IdentifyAPIRequestMiddleware is v3+
# TODO - remove this when legacy_api goes away
# https://github.com/mozilla/addons-server/issues/9274
request.is_legacy_api = request.path_info.startswith('/api/')
class AuthenticationMiddlewareWithoutAPI(AuthenticationMiddleware):
"""
Like AuthenticationMiddleware, but disabled for the API, which uses its
own authentication mechanism.
"""
def process_request(self, request):
legacy_or_drf_api = request.is_api or request.is_legacy_api
if legacy_or_drf_api and not auth_path.match(request.path):
request.user = AnonymousUser()
else:
return super(
AuthenticationMiddlewareWithoutAPI,
self).process_request(request)
class NoVarySessionMiddleware(SessionMiddleware):
"""
SessionMiddleware sets Vary: Cookie anytime request.session is accessed.
request.session is accessed indirectly anytime request.user is touched.
We always touch request.user to see if the user is authenticated, so every
request would be sending vary, so we'd get no caching.
We skip the cache in Zeus if someone has an AMOv3+ cookie, so varying on
Cookie at this level only hurts us.
"""
def process_response(self, request, response):
if settings.READ_ONLY:
return response
# Let SessionMiddleware do its processing but prevent it from changing
# the Vary header.
vary = None
if hasattr(response, 'get'):
vary = response.get('Vary', None)
new_response = (
super(NoVarySessionMiddleware, self)
.process_response(request, response))
if vary:
new_response['Vary'] = vary
else:
del new_response['Vary']
return new_response
class RemoveSlashMiddleware(MiddlewareMixin):
"""
Middleware that tries to remove a trailing slash if there was a 404.
If the response is a 404 because url resolution failed, we'll look for a
better url without a trailing slash.
"""
def process_response(self, request, response):
if (response.status_code == 404 and
request.path_info.endswith('/') and
not is_valid_path(request.path_info) and
is_valid_path(request.path_info[:-1])):
# Use request.path because we munged app/locale in path_info.
newurl = request.path[:-1]
if request.GET:
with safe_query_string(request):
newurl += '?' + request.META.get('QUERY_STRING', '')
return HttpResponsePermanentRedirect(newurl)
else:
return response
@contextlib.contextmanager
def safe_query_string(request):
"""
Turn the QUERY_STRING into a unicode- and ascii-safe string.
We need unicode so it can be combined with a reversed UR
|
L, but it has to be
ascii to go in a Location header. iri_to_uri seems like a good compromise.
"""
qs = request.META.get('QUERY_STRING', '')
try:
request.META['QUERY_STRING'] = iri_to_uri(qs)
yield
finally:
request.META['QUERY_STRING'] = qs
class CommonMiddleware(common.CommonMiddleware):
def process_request(self, request):
with safe_query_string(request):
return super(CommonMiddle
|
ware, self).process_request(request)
class NonAtomicRequestsForSafeHttpMethodsMiddleware(MiddlewareMixin):
"""
Middleware to make the view non-atomic if the HTTP method used is safe,
in order to avoid opening and closing a useless transaction.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
# This uses undocumented django APIS:
# - transaction.get_connection() followed by in_atomic_block property,
# which we need to make sure we're not messing with a transaction
# that has already started (which happens in tests using the regular
# TestCase class)
# - _non_atomic_requests(), which set the property to prevent the
# transaction on the view itself. We can't use non_atomic_requests
# (without the '_') as it returns a *new* view, and we can't do that
# in a middleware, we need to modify it in place and return None so
# that the rest of the middlewares are run.
is_method_safe = request.method in ('HEAD
|
kbussell/django-auditlog
|
src/auditlog/filters.py
|
Python
|
mit
| 1,023
| 0.000978
|
from django.contrib.admin
|
import SimpleListFilter
from django.contrib.contenttypes.models import ContentType
from django.db.models import Value
from django.db.models.functions import Concat
from auditlog.registry import auditlog
class ResourceTypeFilter(SimpleListFilter):
title = 'Resource Type'
parameter_name = 'resource_type'
def lookups(self, request, model_admin):
|
tracked_model_names = [
'{}.{}'.format(m._meta.app_label, m._meta.model_name)
for m in auditlog.list()
]
model_name_concat = Concat('app_label', Value('.'), 'model')
content_types = ContentType.objects.annotate(
model_name=model_name_concat,
).filter(
model_name__in=tracked_model_names,
)
return content_types.order_by('model_name').values_list('id', 'model_name')
def queryset(self, request, queryset):
if self.value() is None:
return queryset
return queryset.filter(content_type_id=self.value())
|
scheib/chromium
|
third_party/blink/web_tests/external/wpt/fetch/api/resources/redirect.h2.py
|
Python
|
bsd-3-clause
| 505
| 0.005941
|
from wptserve.utils import isomorphic_decode, isomorphic_encode
def handle_headers(frame, request, response):
status = 302
if b'redirect_status' in request.GET:
status = int(request.GET[b'redirect_status'])
response.status = status
if b'locat
|
ion' in request.GET:
url = isomorphic_decode(request.GET[b'location'])
|
response.headers[b'Location'] = isomorphic_encode(url)
response.headers.update([('Content-Type', 'text/plain')])
response.write_status_headers()
|
jamesbdunlop/defaultShotgunLibrary
|
server/tk_sgsrv.py
|
Python
|
apache-2.0
| 420
| 0.002381
|
import config_constants as configCONST
from shotgun_api3 import Shotgun
base_url = configCONST.SHOTGUN_URL
script_name = configCONST.SHOTGUN_TOOLKIT_NAME
api_key = configCONST.SHOTGUN_TOOLKIT_API_KEY
SHOTGUN_SERVER = Shotgun(b
|
ase_url=base_url,
script_name=script_name,
api_key=api_key,
ensure_ascii=True,
connect=True)
| |
digitalocean/netbox
|
netbox/ipam/migrations/0030_3569_vlan_fields.py
|
Python
|
apache-2.0
| 760
| 0
|
from django.db import migrations, models
VLAN_STATUS_CHOICES = (
(1, 'active'),
(2, 'reserved'),
(3, 'deprecated'),
)
def vlan_status_to_slug(apps, schema_editor):
VLAN = apps.get_model('ipam', 'VLAN')
for id, slug in VLAN_STAT
|
US_CHOICES:
VLAN.objects.filter(status=str(id)).update(status=slug)
class Migration(migrations.Migration):
atomic = False
dependencies = [
('ipam', '0029_3569_ipaddress_fields'),
]
operations = [
# VLAN.status
migrations.AlterField(
model_name='vlan',
name='status',
field=models.CharField(default='active', max_length=50),
),
|
migrations.RunPython(
code=vlan_status_to_slug
),
]
|
tsaylor/remember-forever
|
remember_forever/urls.py
|
Python
|
mit
| 1,038
| 0.003854
|
from django.conf.urls import patterns, include, url
from django.views.generic.base import RedirectView
# Uncomment the next two lines to enable the admin:
from django.contrib import
|
admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'core.views.home', name='home'),
url(r'^morsel/create/$', 'core.views.create_morsel', name='morsel_create'),
url(r'^morsel/(?P<pk>\d+)/$', 'core.views.morsel_detail',
|
name='morsel_detail'),
url(r'^morsel/(?P<pk>\d+)/delete/$', 'core.views.morsel_delete', name='morsel_delete'),
url(r'^accounts/login/', 'django.contrib.auth.views.login', name='login'),
url(r'^accounts/logout/', 'django.contrib.auth.views.logout', {'next_page': '/'}, name="logout"),
url(r'^accounts/', include('registration.backends.default.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
chpatrick/hubsan
|
test.py
|
Python
|
gpl-3.0
| 530
| 0.013208
|
from hubsan import *
import logging
import math
logging.basicConfig(level = logging.INFO)
hubsan = Hubsan()
hubsan.init()
hubsan.bind()
hubsan.safety()
print "bind complete"
session_id = hubsan.session_id
channel = hubsan.channel
print "channel: %d" % channel
print "session_id: %s" % format_packet(session_id)
print "closing, press any key"
raw_input()
hubsan.close()
hubsan2 = Hubsan()
hubsan2.ini
|
t()
hubsan2.bind(session_id = session_id, channel = channel)
print "resumed"
while True:
hubsan2.control(0.05, 0, 0,
|
0)
|
abad623/verbalucce
|
verbalucce/nltk/misc/wordfinder.py
|
Python
|
apache-2.0
| 4,113
| 0.013372
|
# Natural Language Toolkit: Word Finder
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
# Simplified from PHP version by Robert Klein <brathna@gmail.com>
# http://fswordfinder.sourceforge.net/
from __future__ import print_function
import random
from string import strip
# reverse a word with probability 0.5
def revword(word):
if random.randint(1,2) == 1:
return word[::-1]
return word
# try to insert word at position x,y; direction encoded in xf,yf
def step(word, x, xf, y, yf, grid):
for i in range(len(word)):
if grid[xf(i)][yf(i)] != "" and grid[xf(i)][yf(i)] != word[i]:
return False
for i in range(len(word)):
grid[xf(i)][yf(i)] = word[i]
return True
# try to insert word at position x,y, in direction dir
def check(word, dir, x, y, grid, rows, cols):
if dir==1:
if x-len(word)<0 or y-len(word)<0:
return False
return step(word, x, lambda i:x-i, y, lambda i:y-i, grid)
elif dir==2:
if x-len(word)<0:
return False
return step(word, x, lambda i:x-i, y, lambda i:y, grid)
elif dir==3:
if x-len(word)<0 or y+(len(word)-1)>=cols:
return False
return step(word, x, lambda i:x-i, y, lambda i:y+i, grid)
elif dir==4:
if y-len(word)<0:
return False
return step(word, x, lambda i:x, y, lambda i:y-i, grid)
def wordfinder(words, rows=20, cols=20, attempts=50,
alph='ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
"""
Attempt to arrange words into a letter-grid with the specified
number of rows and columns. Try each word in several positions
and directions, until it can be fitted into the grid, or the
maximum number of allowable attempts is exceeded. Returns a tuple
consisting of the grid and the words that were successfully
placed.
:param words: the list of words to be put into the grid
:type words: list
:param rows: the number of rows in the grid
:type rows: int
:param cols: the number of columns in the grid
:type cols: int
:param attempts: the number of times to attempt placing a word
:type attempts: int
:param alph: the alphabet, to be used for filling blank cells
:type alph: list
:rtype: tuple
"""
# place longer words first
words.sort(cmp=lambda x,y:cmp(len(x),len(y)), reverse=True)
grid = [] # the letter grid
used = [] # the words we used
# initialize the grid
for i in range(rows):
grid.append([""] * cols)
# try to place each word
for word in words:
word = strip(word).upper() # normalize
save = word # keep a record of the word
word = revword(word)
for attempt in range(attempts):
r = random.randint(0, len(word))
dir = random.choice([1,2,3,4])
x = random.randint(0,rows)
y = random.randint(0,cols)
if dir==1: x+=r; y+=r
elif dir==2: x+=r
elif dir==3: x+=r; y-=r
elif dir==4: y+=r
if 0<=x<rows and 0<=y<cols:
if check(word, dir, x, y, grid, rows, cols):
# used.append((save, dir, x, y, word))
used.append(save)
break
# Fill up the remaining spaces
for i in range(rows):
for j in range(cols):
if
|
grid[i][j] == '':
grid[i][j] = random.choice(alph)
return grid, used
def word_finder():
from nltk.corpus import words
wordlist = words.words()
random.shuffle(wordlist)
wordlist = wordlist[:200]
wordlist = [w for w in wordlist if 3 <= len(w)
|
<= 12]
grid, used = wordfinder(wordlist)
print("Word Finder\n")
for i in range(len(grid)):
for j in range(len(grid[i])):
print(grid[i][j], end=' ')
print()
print()
for i in range(len(used)):
print("%d:" % (i+1), used[i])
if __name__ == '__main__':
word_finder()
|
klahnakoski/ActiveData
|
vendor/mo_threads/queues.py
|
Python
|
mpl-2.0
| 18,763
| 0.001705
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
# THIS THREADING MODULE IS PERMEATED BY THE please_stop SIGNAL.
# THIS SIGNAL IS IMPORTANT FOR PROPER SIGNALLING WHICH ALLOWS
# FOR FAST AND PREDICTABLE SHUTDOWN AND CLEANUP OF THREADS
from __future__ import absolute_import, division, unicode_literals
import types
from collections import deque
from copy import copy
from datetime import datetime
from time import time
from mo_dots import Null, coalesce
from mo_future import long
from mo_logs import Except, Log
from mo_threads.lock import Lock
from mo_threads.signals import Signal
from mo_threads.threads import THREAD_STOP, THREAD_TIMEOUT, Thread
from mo_threads.till import Till
DEBUG = False
# MAX_DATETIME = datetime(2286, 11, 20, 17, 46, 39)
DEFAULT_WAIT_TIME = 10 * 60 # SECONDS
datetime.strptime('2012-01-01', '%Y-%m-%d') # http://bugs.python.org/issue7980
class Queue(object):
"""
SIMPLE MULTI-THREADED QUEUE
(multiprocessing.Queue REQUIRES SERIALIZATION, WHICH
IS DIFFICULT TO USE JUST BETWEEN THREADS)
"""
def __init__(self, name, max=None, silent=False, unique=False, allow_add_after_close=False):
"""
max - LIMIT THE NUMBER IN THE QUEUE, IF TOO MANY add() AND extend() WILL BLOCK
silent - COMPLAIN IF THE READERS ARE TOO SLOW
unique - SET True IF YOU WANT ONLY ONE INSTANCE IN THE QUEUE AT A TIME
"""
self.name = name
self.max = coalesce(max, 2 ** 10)
self.silent = silent
self.allow_add_after_close=allow_add_after_close
self.unique = unique
self.closed = Signal("stop adding signal for " + name) # INDICATE THE PRODUCER IS DONE GENERATING ITEMS TO QUEUE
self.lock = Lock("lock for queue " + name)
self.queue = deque()
def __iter__(self):
try:
while True:
value = self.pop()
if value is THREAD_STOP:
break
if value is not None:
yield value
except Exception as e:
Log.warning("Tell me about what happened here", e)
def add(self, value, timeout=None, force=False):
"""
:param value: ADDED THE THE QUEUE
:param timeout: HOW LONG TO WAIT FOR QUEUE TO NOT BE FULL
:param force: ADD TO QUEUE, EVEN IF FULL (USE ONLY WHEN CONSUMER IS RETURNING WORK TO THE QUEUE)
:return: self
"""
with self.lock:
if value is THREAD_STOP:
# INSIDE THE lock SO THAT EXITING WILL RELEASE wait()
self.queue.append(value)
self.closed.go()
return
if not force:
self._wait_for_queue_space(timeout=timeout)
if self.closed and not self.allow_add_after_close:
Log.error("Do not add to closed queue")
if self.unique:
if value not in self.queue:
self.queue.append(value)
else:
self.queue.append(value)
return self
def push(self, value):
"""
SNEAK value TO FRONT OF THE QUEUE
"""
if self.closed and not self.allow_add_after_close:
Log.error("Do not push to closed queue")
with self.lock:
self._wait_for_queue_space()
if not self.closed:
self.queue.appendleft(value)
return self
def push_all(self, values):
"""
SNEAK values TO FRONT OF THE QUEUE
"""
if self.closed and not self.allow_add_after_close:
Log.error("Do not push to closed queue")
with self.lock:
self._wait_for_queue_space()
if not self.closed:
self.queue.extendleft(values)
return self
def pop_message(self, till=None):
"""
RETURN TUPLE (message, payload) CALLER IS RESPONSIBLE FOR CALLING message.delete() WHEN DONE
DUMMY IMPLEMENTATION FOR DEBUGGING
"""
if till is not None and not isinstance(till, Signal):
Log.error("Expecting a signal")
return Null, self.pop(till=till)
def extend(self, values):
if self.closed and not self.allow_add_after_close:
Log.error("Do not push to closed queue")
with self.lock:
# ONCE THE queue IS BELOW LIMIT, ALLOW ADDING MORE
self._wait_for_queue_space()
if not self.closed:
if self.unique:
for v in values:
if v is THREAD_STOP:
self.closed.go()
continue
if v not in self.queue:
self.queue.append(v)
else:
for v in values:
if v is THREAD_STOP:
self.closed.go()
continue
self.queue.append(v)
return self
def _wait_for_queue_space(self, timeout=None):
"""
EXPECT THE self.lock TO BE HAD, WAITS FOR self.queue TO HAVE A LITTLE SPACE
:param timeout: IN SECONDS
"""
wait_time = 5
(DEBUG and len(self.queue) > 1 * 1000 * 1000) and Log.warning("Queue {{name}} has over a million items")
start = time()
stop_waiting = Till(till=start+coalesce(timeout, DEFAULT_WAIT_TIME))
while not self.closed and len(self.queue) >= self.max:
if stop_waiting:
Log.error(THREAD_TIMEOUT)
if self.silent:
self.lock.wait(stop_waiting)
else:
self.lock.wait(Till(seconds=wait_time))
if not stop_waiting and len(self.queue) >= self.max:
now = time()
Log.alert(
"Queue with name {{name|quote}} is full with ({{num}} items), thread(s) have been waiting {{wait_time}} sec",
name=self.name,
num=len(self.queue),
wait_time=now-start
)
def __len__(self):
with self.lock:
return len(self.queue)
def __nonzero__(self):
with self.lock:
return any(r != THREAD_STOP for r in self.queue)
def pop(self, till=None):
"""
WAIT FOR NEXT ITEM ON THE QUEUE
RETURN THREAD_STOP IF QUEUE IS CLOSED
RETURN None IF till IS REACHED AND QUEUE IS STILL EMPTY
:param till: A `Signal` to stop waiting and return None
:return: A value, or a THREAD_STOP or None
"""
if till is not None and not isinstance(till, Signal):
Log.error("expecting a signal")
with self.lock:
while True:
if self.queue:
return self.queue.popleft()
if self.closed:
break
|
if not self.lock.wait(till=self.closed | till):
if self.closed:
break
return None
(DEBUG or not self.silent) and Log.note(self.name + " queue closed")
return THREAD_STOP
def pop_all(self):
"""
NON-BLOCKING POP ALL IN QUEUE, IF ANY
"""
with self.lock:
output = list(self.queue)
s
|
elf.queue.clear()
return output
def pop_one(self):
"""
NON-BLOCKING POP IN QUEUE, IF ANY
"""
with self.lock:
if self.closed:
return THREAD_STOP
elif not self.queue:
return None
else:
v =self.queue.popleft()
if v is THREAD_STOP: # SENDING A STOP INTO THE QUEUE IS ALSO AN OPTION
self.closed.go()
return v
def close(self):
self.closed.go()
def commit(self):
pass
|
hatchetation/freeipa
|
ipalib/plugins/baseldap.py
|
Python
|
gpl-3.0
| 76,643
| 0.002623
|
# Authors:
# Pavel Zuna <pzuna@redhat.com>
#
# Copyright (C) 2009 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Base classes for LDAP plugins.
"""
import re
import json
import time
from copy import deepcopy
import base64
from ipalib import api, crud, errors
from ipalib import Method, Object, Command
from ipalib import Flag, Int, Str
from ipalib.base import NameSpace
from ipalib.cli import to_cli, from_cli
from ipalib import output
from ipalib.text import _
from ipalib.util import json_serialize, validate_hostname
from ipapython.dn import DN, RDN
global_output_params = (
Flag('has_password',
label=_('Password'),
),
Str('member',
label=_('Failed members'),
),
Str('member_user?',
label=_('Member users'),
),
Str('member_group?',
label=_('Member groups'),
),
Str('memberof_group?',
label=_('Member of groups'),
),
Str('member_host?',
label=_('Member hosts'),
),
Str('member_hostgroup?',
label=_('Member host-groups'),
),
Str('memberof_hostgroup?',
label=_('Member of host-groups'),
),
Str('memberof_permission?',
label=_('Permissions'),
),
Str('memberof_privilege?',
label='Privileges',
),
Str('memberof_role?',
label=_('Roles'),
),
Str('memberof_sudocmdgroup?',
label=_('Sudo Command Groups'),
),
Str('member_privilege?',
label='Granted to Privilege',
),
Str('member_role?',
label=_('Granting privilege to roles'),
),
Str('member_netgroup?',
label=_('Member netgroups'),
),
Str('memberof_netgroup?',
label=_('Member of netgroups'),
),
Str('member_service?',
label=_('Member services'),
),
Str('member_servicegroup?',
label=_('Member service groups'),
),
Str('memberof_servicegroup?',
label='Member of service groups',
),
Str('member_hbacsvc?',
label=_('Member HBAC service'),
),
Str('member_hbacsvcgroup?',
label=_('Member HBAC service groups'),
),
Str('memberof_hbacsvcgroup?',
label='Member of HBAC service groups',
),
Str('member_sudocmd?',
label='Member Sudo commands',
),
Str('memberof_sudorule?',
label='Member of Sudo rule',
),
Str('memberof_hbacrule?',
label='Member of HBAC rule',
),
Str('memberindirect_user?',
label=_('Indirect Member users'),
),
Str('memberindirect_group?',
label=_('Indirect Member groups'),
),
Str('memberindirect_host?',
label=_('Indirect Member hosts'),
),
Str('memberindirect_hostgroup?',
label=_('Indirect Member host-groups'),
),
Str('memberindirect_role?',
label=_('Indirect Member of roles'),
),
Str('memberindirect_permission?',
label=_('Indirect Member permissions'),
),
Str('memberindirect_hbacsvc?',
label=_('Indirect Member HBAC service'),
),
Str('memberindirect_hbacsvcgrp?',
label=_('Indirect Member HBAC service group'),
),
Str('memberindirect_netgroup?',
label=_('Indirect Member netgroups'),
),
Str('memberofindirect_group?',
label='Indirect Member of group',
),
Str('memberofindirect_netgroup?',
label='Indirect Member of netgroup',
),
Str('memberofindirect_hostgroup?',
label='Indirect Member of host-group',
),
Str('memberofindirect_role?',
label='Indirect Member of role',
),
Str('memberofindirect_sudorule?',
label='Indirect Member of Sudo rule',
),
Str('memberofindirect_hbacrule?',
label='Indirect Member of HBAC rule',
),
Str('sourcehost',
label=_('Failed source hosts/hostgroups'),
),
Str('memberhost',
label=_('Failed hosts/hostgroups'),
),
Str('memberuser',
label=_('Failed users/groups'),
),
Str('memberservice',
label=_('Failed service/service groups'),
),
Str('failed',
label=_('Failed to remove'),
flags=['suppress_empty'],
),
Str('ipasudorunas',
label=_('Failed RunAs'),
),
Str('ipasudorunasgroup',
label=_('Failed RunAsGroup'),
),
)
def validate_add_attribute(ugettext, attr):
validate_attribute(ugettext, 'addattr', attr)
def validate_set_attribute(ugettext, attr):
validate_attribute(ugettext, 'setattr', attr)
def validate_del_attribute(ugettext, attr):
validate_attribute(ugettext, 'delattr', attr)
def validate_attribute(ugettext, name, attr):
m = re.match("\s*(.*?)\s*=\s*(.*?)\s*$", attr)
if not m or len(m.groups()) != 2:
raise errors.ValidationError(
name=name, error=_('Invalid format. Should be name=value'))
def get_effective_rights(ldap, dn, attrs=None):
assert isinstance(dn, DN)
if attrs is None:
attrs = ['*', 'nsaccountlock', 'cospriority']
rights = ldap.get_effective_rights(dn, attrs)
rdict = {}
if 'attributelevelrights' in rights[1]:
rights = rights[1]['attributelevelrights']
rights = rights[0].split(', ')
for r in rights:
(k,v) = r.split(':')
rdict[k.strip().lower()] = v
return rdict
def entry_from_entry(entry, newentry):
"""
Python is more or less pass-by-value except for immutable objects. So if
you pass in a dict to a function you are free to change members of that
dict but you can't create a new dict in the function and expect to replace
what was passed in.
In some post-op plugins that is exactly what we want to do, so here is a
clumsy way around the problem.
"""
# Wipe out the current data
for e in entry.keys():
del entry[e]
# Re-populate it with new wentry
for e in newentry:
entry[e] = newentry[e]
def wait_for_value(ldap, dn, attr, value):
"""
389-ds postoperation plugins are executed after the data has been
returned to a client. This means that plugins that add data in a
postop are not included in data returned to the user.
The downside of waiting is that this increases the time of the
command.
The updated entry is returned.
"""
# Loop a few times to give the postop-plugin a chance to complete
# Don't sleep for more than 6 seconds.
x = 0
while x < 20:
# sleep first because the first search, even on a quiet system,
# almost always fails.
time.sleep(.3)
x = x + 1
# FIXME: put a try/except around here? I think it is probably better
# to just let the exception filter up to the caller.
(dn, entry_attrs) = ldap.get_entry( dn, ['*'])
if attr in entry_attrs:
if isinstance(entry_attrs[attr], (list, tuple)):
values = map(lambda y:y.lower(), entry_attrs[attr])
if value.lower() in values:
break
|
else:
if value.lower() == entry_attrs[attr].lower():
break
return entry_attrs
def validate_externalhost(ugettext, hostname):
try:
validate_hostname(hostname, check_fqdn=False, allow_underscore=True)
except ValueError, e:
return unicode(e)
external_host_param = S
|
tr('externalhost*', validate_externalhost,
label=_('External host'),
flags=['no_option'],
)
def add_external_pre_callback(membertype, ldap, dn, keys, options):
"""
Pre callback to validate external me
|
ihh/dart
|
python/XGram/Tests/GeneratorCodonsTest.py
|
Python
|
gpl-2.0
| 6,123
| 0.019108
|
import unittest
import string
from pprint import pprint
import XGram, XGram.Parser, XGram.Exceptions
from XGram.Generator.Prebuilt import Codons
from XGram.Model import Annotation
import Bio.Data.CodonTable
class GeneratorCodonsTest(unittest.TestCase):
"""
A test class for testing a grammar
"""
def setUp(self):
"""
set up data used in the tests.
setUp is called before each test function execution.
"""
self.mInputFile= XGram.PATH_DATA+"/dpse_dmel.stk"
self.mXgram = XGram.XGram()
self.mXgram.setDebug()
def tearDown(self):
"""
tear down any data used in tests
tearDown is called after each test function execution.
"""
pass
def testModelF3X4Two(self):
"""test f3x4-two model.
"""
self.buildAndCheckModel( "f3x4-two" )
def testModelF3X4Four(self):
"""test f3x4-four model.
"""
self.buildAndCheckModel( "f3x4-four" )
def testModelF3X4Four(self):
"""test f3x4-four model.
"""
self.buildAndCheckModel( "f3x4-fourproducts" )
def testModelCodonsTwo(self):
"""test codons-two model
"""
self.buildAndCheckModel( "codons-two" )
codons = Bio.Data.CodonTable.standard_dna_table.forward_table
codon_frequencies = {}
n = 1
f = 61 * 62 / 2
for codon in Bio.Data.CodonTable.standard_dna_table.forward_table:
codon_frequencies[codon] = float(n)/f
n += 1
|
self.buildAndCheckModel( "codons-four", codon_frequencies = codon_frequencies )
def testModelCodonsFour(self):
"""test codons-four model
"""
self.buildAndCheckModel( "codons-four" )
codons = Bio.Data.CodonTable.standard_dna_table.forward_table
codon_frequencies = {}
n = 1
f = 61 * 62 / 2
|
for codon in Bio.Data.CodonTable.standard_dna_table.forward_table:
codon_frequencies[codon] = float(n)/f
n += 1
self.buildAndCheckModel( "codons-four", codon_frequencies = codon_frequencies )
def buildAndCheckModel(self, codon_model, **kwargs):
"""build various models checking parameter settings."""
model = Codons.buildCodonML(codon_model = codon_model,
**kwargs )
self.checkModel( model )
model = Codons.buildCodonML(codon_model = codon_model,
fix_kappa = True,
**kwargs )
self.checkModel( model )
model = Codons.buildCodonML(codon_model = codon_model,
fix_omega = True,
**kwargs )
self.checkModel( model )
model = Codons.buildCodonML(codon_model = codon_model,
fix_omega = True,
fix_kappa = True,
**kwargs )
self.checkModel( model )
model = Codons.buildCodonML( codon_model,
num_blocks=2,
grammar_type="linear-blocks",
shared_frequencies = False,
shared_rates = False,
**kwargs )
self.checkModel(model)
num_blocks = 2
model = Codons.buildCodonML( codon_model,
num_blocks=num_blocks,
grammar_type="linear-blocks",
shared_frequencies = True,
shared_rates = False,
**kwargs)
self.checkModel(model)
num_blocks = 2
model = Codons.buildCodonML( codon_model,
num_blocks=num_blocks,
grammar_type="linear-blocks",
shared_frequencies = False,
shared_rates = True,
**kwargs)
self.checkModel(model)
num_blocks = 2
model = Codons.buildCodonML( codon_model,
num_blocks=num_blocks,
grammar_type="linear-blocks",
shared_frequencies = True,
shared_rates = True,
**kwargs)
self.checkModel(model)
## test model with annotations
## build annotation
labels = string.letters.upper()
annotate_terminals = {}
for x in range(num_blocks):
annotations = []
key = []
for c in range( 0,3 ):
t = "B%i_COD%i" % (x, c)
key.append(t)
annotations.append( Annotation( row = "STATE",
column = t,
label = labels[x % len(labels)] ))
annotate_terminals[ tuple(key) ] = annotations
model = Codons.buildCodonML( codon_model,
num_blocks=2,
grammar_type="linear-blocks",
shared_frequencies = True,
annotate_terminals = annotate_terminals,
**kwargs )
# print model.getGrammar()
self.checkModel(model)
def checkModel(self, model ):
"""check a model."""
model.getGrammar()
frequencies = model.evaluateTerminalFrequencies()
matrix = model.evaluateRateMatrix()
if __name__ == '__main__':
unittest.main()
|
mjn19172/Savu
|
savu/test/test_utils.py
|
Python
|
apache-2.0
| 4,791
| 0
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: test_utils
:platform: Unix
:synopsis: utilities for the test framework
.. moduleauthor:: Mark Basham <scientificsoftware@diamond.ac.uk>
"""
import inspect
import tempfile
import os
import savu.plugins.utils as pu
from savu.data.structures import Data, PassThrough
from savu.data.structures import RawTimeseriesData, ProjectionData, VolumeData
def get_test_data_path(name):
"""Gets the full path to the test data
:param name: The name of the test file.
:type name: str
:returns: The full path to the example data.
"""
path = inspect.stack()[0][1]
return '/'.join(os.path.split(path)[0].split(os.sep)[:-2] +
['test_data', name])
def get_nx_tomo_test_data():
"""Gets the nx_tomo test data and returns it in the RawData Structure
:returns: a RawTimeseriesData Object containing the example data.
"""
path = get_test_data_path('24737.nxs')
raw_timeseries_data = RawTimeseriesData()
raw_timeseries_data.populate_from_nx_tomo(path)
return raw_timeseries_data
def get_projection_test_data():
"""Gets the test data and returns it in the ProjectionData Structure
:returns: a ProjectionData Object containing the example data.
"""
path = get_test_data_path('projections.h5')
projection_data = ProjectionData()
projection_data.populate_from_h5(path)
return projection_data
def get_appropriate_input_data(plugin):
data = []
if plugin.required_data_type() == RawTimeseriesData:
data.append(get_nx_tomo_test_data())
elif plugin.required_data_type() == ProjectionData:
data.append(get_projection_test_data())
elif plugin.required_data_type() == Data:
data.append(get_nx_tomo_test_data())
data.append(get_projection_test_data())
return data
def get_appropriate_output_data(plugin, data, mpi=False, file_name=None):
output = []
if plugin.output_data_type() == PassThrough:
output.append(data[0])
temp_file = file_name
if temp_file is None:
temp_file = tempfile.NamedTemporaryFile(suffix='.h5', delete=False)
temp_file = temp_file.name
if plugin.output_data_type() == RawTimeseriesData:
output.append(pu.get_raw_data(data[0], temp_file,
plugin.name, mpi,
plugin.get_output_shape(data[0])))
elif plugin.output_data_type() == ProjectionData:
output.append(pu.get_projection_data(data[0], temp_file,
plugin.name, mpi,
plugin.get_output_shape(data[0])))
elif plugin.output_data_type() == VolumeData:
output.append(pu.get_volume_data(data[0], temp_file,
plugin.name, mpi,
plugin.get_output_shape(data[0])))
elif plugin.output_data_type() == Data:
if type(data) is not list:
data = [data]
for datum in data:
if file_name is None:
temp_file = tempfile.NamedTemporaryFile(suffix='.h5',
delete=False)
temp_file = temp_file.name
if isinstance(datum, RawTimeseriesData):
output.append(pu.get_raw_data(datum, temp_file,
plugin.name, mpi,
plugin.get_output_shape(datum)))
elif isinstance(datum, ProjectionData):
|
output.append(pu.get_projection_data(datum, temp_file,
plugin.name, mpi,
plugin.get_output_shape(
datum)))
elif isinstance(datum, VolumeData):
output.append(pu.get_volume_data(datum, temp_file,
plugin.name, mpi,
|
plugin.get_output_shape(
datum)))
return output
|
CHEN-JIANGHANG/qcspgen
|
qcspgen/aggregator.py
|
Python
|
mit
| 2,819
| 0.001064
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from qcspgen_exception import QCSPGenException
import functools
def _verify_type(
|
func):
"""
decorator to verify item type supplied for the wrapped function
:param func: decorated function
:return: wrapper
"""
@functools.wraps(func)
def wrapper(self, *item):
if not isinstance(item[-1], self.item_type):
raise QCSPGenException("- typeError: item is not type {}".format(self.item_type))
return func(self, *item)
return wrapper
class Aggregator(object):
"""
python list l
|
ike base class to store items in the same type, example:
::
a = Aggregator(str)
a.append("hello")
a.append("world")
print a.aggregator, a.item_type
:param item_kind: the item type for aggregator's elements
"""
def __init__(self, item_kind):
super(Aggregator, self).__init__()
self._aggregation = []
self._type = item_kind
@_verify_type
def append(self, item):
"""
to add an item for an aggregator
:param item: the item to be appended
:return: None
"""
self._aggregation.append(item)
@_verify_type
def remove(self, item):
"""
to remove an item from an aggregator
:param item: the item to be removed
:return: None
"""
try:
self._aggregation.remove(item)
except ValueError:
raise QCSPGenException("- item is not in {}, cannot remove".format(self.__class__.__name__))
def empty(self):
"""
to empty an aggregator
:return: None
"""
self._aggregation = []
@property
def aggregation(self):
"""
getter for _aggregation
:return: the list of added items
"""
return self._aggregation
@property
def item_type(self):
"""
getter for _type
:return: acceptable type for items of an aggregator
"""
return self._type
@property
def size(self):
"""
getter for item size
:return: the size of items
"""
return len(self._aggregation)
def __getitem__(self, item):
return self.aggregation[item]
@_verify_type
def __setitem__(self, key, value):
self.aggregation[key] = value
def __str__(self):
return str([str(item) for item in self._aggregation])
__repr__ = __str__
if __name__ == "__main__":
# try:
# a = Aggregator(str)
# a.append("hello")
# a.append("123")
# a[2] = 123
# print a[0]
# print a.aggregation, a.item_type
# except QCSPGenException, e:
# e.display()
# except Exception, e:
# print e.message
pass
|
Auquan/auquan-toolbox-python
|
auquanToolbox/resultviewer.py
|
Python
|
mit
| 13,093
| 0.003208
|
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("TkAgg") # important to call this right after
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib import style
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as mtick
try:
import Tkinter as tk
import ttk
import tkFont
import tkMessageBox
except:
import tkinter as tk
from tkinter import ttk
from tkinter import font as tkFont
from tkinter import messagebox as tkMessageBox
from auquanToolbox.metrics import metrics, baseline
def loadgui(back_data, exchange, base_index, budget, logger):
######################
# Setup data
######################
position = back_data['POSITION']
close = back_data['CLOSE']
# position as % of total portfolio
long_position = (position * close).div(back_data['VALUE'], axis=0)
short_position = long_position.copy()
long_position[long_position < 0] = 0
short_position[short_position > 0] = 0
daily_pnl = back_data['DAILY_PNL'] / budget
total_pnl = back_data['TOTAL_PNL'] / budget
if base_index:
baseline_data = baseline(exchange, base_index, total_pnl.index, logger)
stats = metrics(daily_pnl, total_pnl, baseline_data, base_index)
else:
baseline_data = {}
stats = metrics(daily_pnl, total_pnl, {}, base_index)
daily_return = daily_pnl.sum(axis=1)
total_return = total_pnl.sum(axis=1)
long_exposure = long_position.sum(axis=1)
short_exposure = short_position.sum(axis=1)
zero_line = np.zeros(daily_pnl.index.size)
# print to logger
for x in stats.keys():
logger.info('%s : %0.2f' % (x, stats[x]))
def isDate(val):
# Function to validate if a given entry is valid date
try:
d = pd.to_datetime(val)
if d > daily_pnl.index[0] and d < daily_pnl.index[-1]:
return True
else:
return False
except ValueError:
raise ValueError("Not a Valid Date")
return False
def newselection(event):
# Function to autoupdate chart on new selection from dropdown
i = dropdown.current()
market = ['TOTAL PORTFOLIO'] + daily_pnl.columns.values.tolist()
plot(daily_pnl, total_pnl, long_position, short_position, baseline_data, base_index, market[i], box_value2.get(), box_value3.get())
def plot(daily_pnl, total_pnl, long_position, short_position,
baseline_data, base_index, market='TOTAL PORTFOLIO',
start=daily_pnl.index.format()[0],
end=daily_pnl.index.format()[-1]):
# New plot when custom fields are changed
plt.clf()
# plt.style.use("seaborn-whitegrid")
daily_pnl = daily_pnl.loc[start:end]
total_pnl = total_pnl.loc[start:end]
long_position = long_position.loc[start:end]
short_position = short_position.loc[start:end]
if market == 'TOTAL PORTFOLIO':
daily_return = daily_pnl.sum(axis=1)
total_return = total_pnl.sum(axis=1)
long_exposure = long_position.sum(axis=1)
short_exposure = short_position.sum(axis=1)
else:
daily_return = daily_pnl[market]
total_return = total_pnl[market]
long_exposure = long_position[market]
short_exposure = short_position[market]
zero_line = np.zeros(daily_pnl.index.size)
# f, plot_arr = plt.subplots(3, sharex=True)
total_plot = plt.subplot2grid((10, 8), (0, 0), colspan=12, rowspan=4)
daily_plot = plt.subplot2grid((10, 8), (5, 0), colspan=12, rowspan=2, sharex=total_plot)
position_plot = plt.subplot2grid((10, 8), (8, 0), colspan=12, rowspan=2, sharex=total_plot)
ind = np.arange(len(daily_pnl.index))
total_plot.set_title('Total PnL')
total_plot.plot(ind, zero_line, 'k')
total_plot.plot(ind, total_return.values, 'b', linewidth=0.5, label='strategy')
total_plot.legend(loc='upper left')
total_plot.autoscale(tight=True)
plt.setp(total_plot.get_xticklabels(), visible=False)
total_plot.yaxis.set_major_formatter(mtick.FuncFormatter(format_perc))
total_plot.set_ylabel('Cumulative Performance')
total_plot.legend(bbox_to_anchor=(0.03, 0.97), loc='lower left', borderaxespad=0.)
if base_index:
total_plot.plot(ind, baseline_data['TOTAL_PNL'], 'g', linewidth=0.5, label=base_index)
daily_plot.set_title('Daily PnL')
daily_plot.plot(ind, zero_line, 'k')
daily_plot.bar(ind, daily_return.values, 0.2, align='center', color='c', label='strategy')
daily_plot.legend(loc='upper left')
daily_plot.autoscale(tight=True)
plt.setp(daily_plot.get_xticklabels(), visible=False)
daily_plot.yaxis.set_major_formatter(mtick.FuncFormatter(format_perc))
daily_plot.set_ylabel('Daily Performance')
daily_plot.legend(bbox_to_anchor=(0.03, 0.97), loc='lower left', borderaxespad=0.)
position_plot.set_title('Daily Exposure')
position_plot.plot(ind, zero_line, 'k')
position_plot.bar(ind, short_exposure.values, 0.3, linewidth=0, align='center', color='r', label='short')
position_plot.bar(ind, long_exposure.values, 0.3, linewidth=0, align='center', color='b', label='long')
position_plot.legend(loc='upper left')
position_plot.autoscale(tight=True)
position_plot.xaxis.set_major_formatter(mtick.FuncFormatter(format_date))
position_plot.yaxis.set_major_formatter(mtick.FuncFormatter(format_perc))
position_plot.set_ylabel('Long/Sh
|
ort %')
position_plot.legend(bbox_to_anchor=(0.03, 0.97), loc='lower left', borderaxespad=0.)
plt.gcf().canvas.draw()
def update_plot():
# Callback Function for plot button
try:
|
d1 = pd.to_datetime(box_value2.get())
d2 = pd.to_datetime(box_value3.get())
if d1 >= daily_pnl.index[0] and d2 <= daily_pnl.index[-1]:
plot(daily_pnl, total_pnl, long_position, short_position, baseline_data, base_index, box_value.get(), box_value2.get(), box_value3.get())
else:
tkMessageBox.showinfo("Date out of Range", "Please enter a date from %s to %s" % (daily_pnl.index[0].strftime('%Y-%m-%d'), daily_pnl.index[-1].strftime('%Y-%m-%d')))
except ValueError:
raise ValueError("Not a Valid Date")
def close_window():
# Callback function for Quit Button
GUI.destroy()
GUI.quit()
def format_date(x, pos=None):
# Format axis ticklabels to dates
thisind = np.clip(int(x + 0.5), 0, len(daily_pnl.index) - 1)
return daily_pnl.index[thisind].strftime('%b-%y')
def format_perc(y, pos=None):
# Format axis ticklabels to %
if budget > 1:
return '{percent:.2%}'.format(percent=y)
else:
return y
def onFrameConfigure(canvas):
canvas.configure(scrollregion=canvas.bbox("all"))
######################
# GUI mainloop
######################
# Create widget
GUI = tk.Tk()
GUI.title('Backtest Results')
winCanvas = tk.Canvas(GUI, borderwidth=0, background="#ffffff", width=1500, height=1000)
frame = tk.Frame(winCanvas, background="#ffffff")
vsb = tk.Scrollbar(GUI, orient="vertical", command=winCanvas.yview)
hsb = tk.Scrollbar(GUI, orient="horizontal", command=winCanvas.xview)
winCanvas.configure(yscrollcommand=vsb.set)
winCanvas.configure(xscrollcommand=hsb.set)
vsb.pack(side="left", fill="y")
hsb.pack(side="bottom", fill="x")
winCanvas.pack(side="right", fill="both", expand=True)
winCanvas.create_window((50, 50), window=frame, anchor="nw")
frame.bind("<Configure>", lambda event, canvas=winCanvas: onFrameConfigure(winCanvas))
# Create dropdown for market
Label_1 = tk.Label(frame, text="Trading Performance:")
L
|
serendi-app/serendi-server
|
docs/conf.py
|
Python
|
agpl-3.0
| 9,191
| 0.006093
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Serendi documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 14 10:26:42 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Serendi'
copyright = '2015, Matthias, Roland'
author = 'Matthias, Roland'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()'
|
will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor direc
|
tives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Serendidoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Serendi.tex', 'Serendi Documentation',
'Matthias, Roland', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
|
highco-groupe/odoo
|
addons/share/wizard/share_wizard.py
|
Python
|
agpl-3.0
| 50,906
| 0.006149
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import random
import time
import uuid
from openerp import SUPERUSER_ID
import simplejson
from openerp import api
from openerp import tools
from openerp.osv import fields, osv
from openerp.osv import expression
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval
import openerp
_logger = logging.getLogger(__name__)
FULL_ACCESS = ('perm_read', 'perm_write', 'perm_create', 'perm_unlink')
READ_WRITE_ACCESS = ('perm_read', 'perm_write')
READ_ONLY_ACCESS = ('perm_read',)
UID_ROOT = 1
# Pseudo-domain to represent an empty filter, constructed using
# osv.expression's DUMMY_LEAF
DOMAIN_ALL = [(1, '=', 1)]
# A good selection of easy to read password characters (e.g. no '0' vs 'O', etc.)
RANDOM_PASS_CHARACTERS = 'aaaabcdeeeefghjkmnpqrstuvwxyzAAAABCDEEEEFGHJKLMNPQRSTUVWXYZ23456789'
def generate_random_pass():
return ''.join(random.sample(RANDOM_PASS_CHARACTERS,10))
class share_wizard(osv.TransientModel):
_name = 'share.wizard'
_description = 'Share Wizard'
def _assert(self, condition, error_message, context=None):
"""Raise a user error with the given message if condition is not met.
The error_message should have been translated with _().
"""
if not condition:
raise osv.except_osv(_('Sharing access cannot be created.'), error_message)
def has_group(self, cr, uid, module, group_xml_id, context=None):
"""Returns True if current user is a member of the group identified by the module, group_xml_id pair."""
# if the group was deleted or does not exist, we say NO (better safe than sorry)
try:
model, group_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, module, group_xml_id)
except ValueError:
return False
return group_id in self.pool.get('res.users').read(cr, uid, [uid], ['groups_id'], context=context)[0]['groups_id']
def has_share(self, cr, uid, unused_param, context=None):
return self.has_group(cr, uid, module='base', group_xml_id='group_no_one', context=context)
def _user_type_selection(self, cr, uid, context=None):
"""Selection values may be easily overridden/extended via inheritance"""
return [('embedded', _('Direct link or embed code')), ('emails',_('Emails')), ]
"""Override of create() to auto-compute the action name"""
def create(self, cr, uid, values, context=None):
if 'action_id' in values and not 'name' in values:
action = self.pool.get('ir.actions.actions').browse(cr, uid, values['action_id'], context=context)
values['name'] = action.name
return super(share_wizard,self).create(cr, uid, values, context=context)
@api.cr_uid_ids_context
def share_url_template(self, cr, uid, _ids, context=None):
# NOTE: take _ids in parameter to allow usage through browse_record objects
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default='', context=context)
if base_url:
base_url += '/login?db=%(dbname)s&login=%(login)s&key=%(password)s'
extra = context and context.get('share_url_template_extra_arguments')
if extra:
base_url += '&' + '&'.join('%s=%%(%s)s' % (x,x) for x in extra)
hash_ = context and context.get('share_url_template_hash_arguments')
if hash_:
base_url += '#' + '&'.join('%s=%%(%s)s' % (x,x) for x in hash_)
return base_url
def _share_root_url(self, cr, uid, ids, _fieldname, _args, context=None):
result = dict.fromkeys(ids, '')
data = dict(dbname=cr.dbname, login='', password='')
for this in self.browse(cr, uid, ids, context=context):
result[this.id] = this.share_url_template() % data
return result
def _generate_embedded_code(self, wizard, options=None):
cr, uid, context = wizard.env.args
if options is None:
options = {}
js_options = {}
title = options['title'] if 'title' in options else wizard.embed_option_title
search = (options['search'] if 'search' in options else wizard.embed_option_search) if wizard.access_mode != 'readonly' else False
if not title:
js_options['display_title'] = False
if search:
js_options['search_view'] = True
js_options_str = (', ' + simplejson.dumps(js_options)) if js_options else ''
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default=None, context=context)
user = wizard.result_line_ids[0]
return """
<script type="text/javascript" src="%(base_url)s/web/webclient/js"></script>
<script type="text/javascript">
new openerp.init(%(init)s).web.embed(%(server)s, %(dbname)s, %(login)s, %(password)s,%(action)d%(options)s);
</script> """ % {
'init': simplejson.dumps(openerp.conf.server_wide_modules),
'base_url': base_url or '',
'server': simplejson.dumps(base_url),
'dbname': simplejson.dumps(cr.dbname),
'login': simplejson.dumps(user.login),
'password': simplejson.dumps(user.password),
'action': user.user_id.action_id.id,
'options': js_options_str,
}
def _embed_code(self, cr, uid, ids, _fn, _args, context=None):
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
result[this.id] = self._generate_embedded_code(this)
return result
def _embed_url(self, cr, uid, ids, _fn, _args, context=None):
if context is None:
context = {}
result = dict.fromkeys(ids, '')
for this in self.browse(cr, u
|
id, ids, context=context):
if this.result_line_ids:
ctx = dict(context, share_url_template_hash_arguments=['action'])
user = this.result_line_ids[0]
data = dict(dbname=cr.dbname, login=user.login, passw
|
ord=user.password, action=this.action_id.id)
result[this.id] = this.share_url_template(context=ctx) % data
return result
_columns = {
'action_id': fields.many2one('ir.actions.act_window', 'Action to share', required=True,
help="The action that opens the screen containing the data you wish to share."),
'view_type': fields.char('Current View Type', required=True),
'domain': fields.char('Domain', help="Optional domain for further data filtering"),
'user_type': fields.selection(lambda s, *a, **k: s._user_type_selection(*a, **k),'Sharing method', required=True,
help="Select the type of user(s) you would like to share data with."),
'new_users': fields.text("Emails"),
'email_1': fields.char('New user email', size=64),
'email_2': fields.char('New user email', size=64),
'email_3': fields.char('New user email', size=64),
'invite': fields.boolean('Invite users to OpenSocial record'),
'access_mode': fields.selection([('readonly','Can view'),('readwrite','Can edit')],'Access Mode', required=True,
|
jgontrum/jgsnippets
|
jgsnippets/elasticsearch/__init__.py
|
Python
|
mit
| 46
| 0.021739
|
from jgsnippets.elastics
|
earch.c
|
rud import Crud
|
goldsborough/algs4
|
stacks-queues/python/queue.py
|
Python
|
mit
| 205
| 0.034146
|
#!/usr/bin/en
|
v python
# -*- coding: utf-8 -*-
class Queue:
def __init__(self):
self.queue = []
def e
|
nqueue(self, item):
self.queue.append(item)
def dequeue(self, item):
return self.queue.pop(0)
|
aldenjenkins/foobargamingwebsite
|
djangobb_forum/forms.py
|
Python
|
bsd-3-clause
| 22,552
| 0.004257
|
# coding: utf-8
from __future__ import unicode_literals
import os.path
from datetime import timedelta
from django import forms
from django.conf import settings
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from djangobb_forum.models import Topic, Post, Profile, Reputation, Report, \
Attachment, Poll, PollChoice
from djangobb_forum import settings as forum_settings
from djangobb_forum.util import convert_text_to_html, set_language
from django.core.validators import MaxValueValidator
import types
from PIL import Image
User = get_user_model()
SORT_USER_BY_CHOICES = (
('username', _('Username')),
('registered', _('Registered')),
('num_posts', _('No. of posts')),
)
SORT_POST_BY_CHOICES = (
('0', _('Post time')),
('1', _('Author')),
('2', _('Subject')),
('3', _('Forum')),
)
SORT_DIR_CHOICES = (
('ASC', _('Ascending')),
('DESC', _('Descending')),
)
SHOW_AS_CHOICES = (
('topics', _('Topics')),
('posts', _('Posts')),
)
SEARCH_IN_CHOICES = (
('all', _('Message text and topic subject')),
('message', _('Message text only')),
('topic', _('Topic subject only')),
)
class AddPostForm(forms.ModelForm):
FORM_NAME = "AddPostForm" # used in view and template submit button
name = forms.CharField(label=_('Subject'), max_length=255,
widget=forms.TextInput(attrs={'size':'115'}))
attachment = forms.FileField
|
(label=_('Attachment'), required=False)
subscribe = forms.BooleanField(label=_('Subscribe'), help_text=_("Subscribe this topic."), required=False)
class Meta:
model = Post
fields = ['body']
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
self.topic = kwargs.pop('topic', None)
self.forum = kwargs.pop('forum', None)
self.ip = kwargs.pop('ip', None)
self.contest = kwargs.pop('co
|
ntest', None)
super(AddPostForm, self).__init__(*args, **kwargs)
if self.topic:
self.fields['name'].widget = forms.HiddenInput()
self.fields['name'].required = False
if self.contest:
self.fields['body'].widget = forms.Textarea(attrs={'class':'markup derpflav', 'rows':'20', 'cols':'95'})
else:
self.fields['body'].widget = forms.Textarea(attrs={'class': 'markup', 'rows': '20', 'cols': '95'})
if not forum_settings.ATTACHMENT_SUPPORT:
self.fields['attachment'].widget = forms.HiddenInput()
self.fields['attachment'].required = False
def clean(self):
'''
checking is post subject and body contains not only space characters
'''
errmsg = _('Can\'t be empty nor contain only whitespace characters')
cleaned_data = self.cleaned_data
body = cleaned_data.get('body')
subject = cleaned_data.get('name')
if subject:
if not subject.strip():
self._errors['name'] = self.error_class([errmsg])
del cleaned_data['name']
if body:
if not body.strip():
self._errors['body'] = self.error_class([errmsg])
del cleaned_data['body']
return cleaned_data
def clean_attachment(self):
if self.cleaned_data['attachment']:
memfile = self.cleaned_data['attachment']
memfile_string = memfile.name.decode("utf-8")
if memfile.size > forum_settings.ATTACHMENT_SIZE_LIMIT:
raise forms.ValidationError(_('Attachment is too big! Your size: {} bytes, max size {} bytes'.format(memfile.size,forum_settings.ATTACHMENT_SIZE_LIMIT)))
if len(memfile_string) > 20:
raise forms.ValidationError(_('Attachment name cannot be greater than 20 characters including extension.'))
return self.cleaned_data['attachment']
def save(self):
if self.forum:
topic = Topic(forum=self.forum,
user=self.user,
name=self.cleaned_data['name'])
topic.save()
else:
topic = self.topic
if self.cleaned_data['subscribe']:
# User would like to subscripe to this topic
topic.subscribers.add(self.user)
post = Post(topic=topic, user=self.user, user_ip=self.ip,
markup=self.user.forum_profile.markup,
body=self.cleaned_data['body'])
post.save()
if forum_settings.ATTACHMENT_SUPPORT:
self.save_attachment(post, self.cleaned_data['attachment'])
return post
def save_attachment(self, post, memfile):
if memfile:
obj = Attachment(size=memfile.size, content_type=memfile.content_type,
name=memfile.name, post=post)
dir = os.path.join(settings.MEDIA_ROOT, forum_settings.ATTACHMENT_UPLOAD_TO)
fname = '%d.0' % post.id
path = os.path.join(dir, fname)
open(path, 'wb').write(memfile.read())
obj.path = fname
obj.save()
class UploadAvatarForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['avatar']
avatar = forms.ImageField(label=_('Avatar'), required=False)
def __init__(self, *args, **kwargs):
extra_args = kwargs.pop('extra_args', {})
self.profile = kwargs['instance']
super(UploadAvatarForm, self).__init__(*args, **kwargs)
#
# def clean_attachment(self):
# if self.cleaned_data['attachment']:
# memfile = self.cleaned_data['avatar']
# if memfile.size > forum_settings.ATTACHMENT_SIZE_LIMIT:
# raise forms.ValidationError(_('Attachment is too big'))
# return self.cleaned_data['attachment']
def save(self, commit=True):
self.profile.avatar = self.cleaned_data['avatar']
if commit:
self.profile.save()
return self.profile
def clean_avatar(self):
data = self.cleaned_data['avatar']
if type(data) != types.BooleanType:
if data.size > 1024*1024/2:
error = _("Your file is too big (%(size)s bytes), "
"the maximum allowed size is %(max_valid_size)s bytes")
raise forms.ValidationError(error % {
'size': data.size,
'max_valid_size': 1024*1024/2
})
return ""
if data.image.size[0] > 80 or data.image.size[1] > 80:
error = _("%(width)s x %(height)s is an invalid image resolution,"
" Authorized resolutions are at most: %(max_width)s x %(max_height)s pixels.")
raise forms.ValidationError(error %
{'width': data.image.size[0],
'height': data.image.size[1],
'max_width': settings.DJANGOBB_AVATAR_WIDTH,
'max_height': settings.DJANGOBB_AVATAR_HEIGHT})
return ""
if settings.AVATAR_ALLOWED_FILE_EXTS:
root, ext = os.path.splitext(data.name.lower())
if ext not in settings.AVATAR_ALLOWED_FILE_EXTS:
valid_exts = ", ".join(settings.AVATAR_ALLOWED_FILE_EXTS)
error = _("%(ext)s is an invalid file extension. "
"Authorized extensions are: %(valid_exts_list)s")
raise forms.ValidationError(error %
{'ext': ext,
'valid_exts_list': valid_exts})
return ""
# The image passes the tests!
return data
else:
#The avatar field is empty therefore the user chose to delete the image in the form.
return ""
class EditPostForm(forms.ModelForm):
name = forms.CharField(required=False, label=_('Subject'),
|
amwelch/a10sdk-python
|
a10sdk/core/ip/ip_nat_alg.py
|
Python
|
apache-2.0
| 802
| 0.011222
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Alg(A10BaseClass):
"""Class Description::
Change NAT ALG Settings.
Class alg supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/ip/nat/alg`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "alg"
self.a10_url="/axapi/v3/ip/nat/alg"
self.DeviceProxy = ""
self.pptp = {}
|
for keys, value in kwargs.items():
setattr(self,keys,
|
value)
|
picklepete/pyicloud
|
tests/test_account.py
|
Python
|
mit
| 4,486
| 0.002006
|
"""Account service tests."""
from unittest import TestCase
from . import PyiCloudServiceMock
from .const import AUTHENTICATED_USER, VALID_PASSWORD
class AccountServ
|
iceTest(TestCase):
"""Account service tests."""
s
|
ervice = None
def setUp(self):
"""Set up tests."""
self.service = PyiCloudServiceMock(AUTHENTICATED_USER, VALID_PASSWORD).account
def test_repr(self):
"""Tests representation."""
# fmt: off
assert repr(self.service) == "<AccountService: {devices: 2, family: 3, storage: 3020076244 bytes free}>"
# fmt: on
def test_devices(self):
"""Tests devices."""
assert self.service.devices
assert len(self.service.devices) == 2
for device in self.service.devices:
assert device.name
assert device.model
assert device.udid
assert device["serialNumber"]
assert device["osVersion"]
assert device["modelLargePhotoURL2x"]
assert device["modelLargePhotoURL1x"]
assert device["paymentMethods"]
assert device["name"]
assert device["model"]
assert device["udid"]
assert device["modelSmallPhotoURL2x"]
assert device["modelSmallPhotoURL1x"]
assert device["modelDisplayName"]
# fmt: off
assert repr(device) == "<AccountDevice: {model: "+device.model_display_name+", name: "+device.name+"}>"
# fmt: on
def test_family(self):
"""Tests family members."""
assert self.service.family
assert len(self.service.family) == 3
for member in self.service.family:
assert member.last_name
assert member.dsid
assert member.original_invitation_email
assert member.full_name
assert member.age_classification
assert member.apple_id_for_purchases
assert member.apple_id
assert member.first_name
assert not member.has_screen_time_enabled
assert not member.has_ask_to_buy_enabled
assert not member.share_my_location_enabled_family_members
assert member.dsid_for_purchases
# fmt: off
assert repr(member) == "<FamilyMember: {name: "+member.full_name+", age_classification: "+member.age_classification+"}>"
# fmt: on
def test_storage(self):
"""Tests storage."""
assert self.service.storage
# fmt: off
assert repr(self.service.storage) == "<AccountStorage: {usage: 43.75% used of 5368709120 bytes, usages_by_media: OrderedDict([('photos', <AccountStorageUsageForMedia: {key: photos, usage: 0 bytes}>), ('backup', <AccountStorageUsageForMedia: {key: backup, usage: 799008186 bytes}>), ('docs', <AccountStorageUsageForMedia: {key: docs, usage: 449092146 bytes}>), ('mail', <AccountStorageUsageForMedia: {key: mail, usage: 1101522944 bytes}>)])}>"
# fmt: on
def test_storage_usage(self):
"""Tests storage usage."""
assert self.service.storage.usage
usage = self.service.storage.usage
assert usage.comp_storage_in_bytes or usage.comp_storage_in_bytes == 0
assert usage.used_storage_in_bytes
assert usage.used_storage_in_percent
assert usage.available_storage_in_bytes
assert usage.available_storage_in_percent
assert usage.total_storage_in_bytes
assert usage.commerce_storage_in_bytes or usage.commerce_storage_in_bytes == 0
assert not usage.quota_over
assert not usage.quota_tier_max
assert not usage.quota_almost_full
assert not usage.quota_paid
# fmt: off
assert repr(usage) == "<AccountStorageUsage: "+str(usage.used_storage_in_percent)+"% used of "+str(usage.total_storage_in_bytes)+" bytes>"
# fmt: on
def test_storage_usages_by_media(self):
"""Tests storage usages by media."""
assert self.service.storage.usages_by_media
for usage_media in self.service.storage.usages_by_media.values():
assert usage_media.key
assert usage_media.label
assert usage_media.color
assert usage_media.usage_in_bytes or usage_media.usage_in_bytes == 0
# fmt: off
assert repr(usage_media) == "<AccountStorageUsageForMedia: {key: "+usage_media.key+", usage: "+str(usage_media.usage_in_bytes)+" bytes}>"
# fmt: on
|
agarciamontoro/TFG
|
Software/Stuff/test_ray.py
|
Python
|
gpl-2.0
| 3,739
| 0.000802
|
import os
i
|
mport numpy as np
from matplotlib import pyplot as plt
from numpy import genfromtxt
from matplotlib import cm
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import matplotlib as mpl
from mp
|
l_toolkits.mplot3d import Axes3D
def axisEqual3D(ax):
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
centers = np.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize/2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def drawRay(ax, filePath):
# Retrieve ray points
sphericalPoints = genfromtxt(filePath, delimiter=',')
# Retrieve the actual data
r = sphericalPoints[:, 3]
theta = sphericalPoints[:, 4]
phi = sphericalPoints[:, 5]
cosT = np.cos(theta)
sinT = np.sin(theta)
cosP = np.cos(phi)
sinP = np.sin(phi)
x = r * sinT * cosP
y = r * sinT * sinP
z = r * cosT
ax.plot(x, y, z, label='Ray0')
def drawRays(ax, filePath):
# Retrieve ray points
data = genfromtxt(filePath, delimiter=',')
for i in range(0, 100, 10):
ray = data[data[:, 0] == i, :]
ray = ray[ray[:, 2].argsort()[::-1]]
print(ray)
r = ray[:, 3]
theta = ray[:, 4]
phi = ray[:, 5]
cosT = np.cos(theta)
sinT = np.sin(theta)
cosP = np.cos(phi)
sinP = np.sin(phi)
x = r * cosT * sinP
y = r * sinT * sinP
z = r * cosP
ax.plot(x, y, z, label='Ray0', c='blue')
def drawCamera(ax):
camR = 100
camTheta = np.pi/2
camPhi = 0
camX = camR * np.sin(camTheta) * np.cos(camPhi)
camY = camR * np.sin(camTheta) * np.sin(camPhi)
camZ = camR * np.cos(camTheta)
ax.scatter(camX, camY, camZ, s=100, c='red')
x = [1, 1, -1, -1]
y = [1, -1, -1, 1]
z = [-1, -1, -1, -1]
verts = [(x[i], y[i], z[i]) for i in range(4)]
# ax.add_collection3d(Poly3DCollection(verts))
def drawAxes(ax, d=150):
ax.plot((-d, d), (0, 0), (0, 0), 'grey')
ax.plot((0, 0), (-d, d), (0, 0), 'grey')
ax.plot((0, 0), (0, 0), (-d, d), 'gray')
def drawBlackHole(ax, r=5):
# Draw black hole
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = r * np.outer(np.cos(u), np.sin(v))
y = r * np.outer(np.sin(u), np.sin(v))
z = r * np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x, y, z, rstride=4, cstride=4, color='black')
def absoluteFilePaths(directory):
for dirpath, _, filenames in os.walk(directory):
for f in filenames:
yield os.path.abspath(os.path.join(dirpath, f))
if __name__ == '__main__':
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_axis_off()
ax.set_xlim3d(-25, 25)
ax.set_ylim3d(-25, 25)
ax.set_zlim3d(-25, 25)
# axisEqual3D(ax)
drawAxes(ax)
drawBlackHole(ax)
drawCamera(ax)
# drawRay(ax, "Data/rayPositions.csv")
# drawRay(ax, "Data/middleRay.csv")
# drawRays(ax, "Data/rays.csv")
# for fileName in absoluteFilePaths("Data/Spin00001"):
# if fileName.endswith(".csv"):
# drawRay(ax, fileName)
#
drawRay(ax, "Data/Spin00001/ray00.csv")
drawRay(ax, "Data/Spin00001/ray10.csv")
drawRay(ax, "Data/Spin00001/ray20.csv")
# drawRay(ax, "Data/Spin00001/ray30.csv")
drawRay(ax, "Data/Spin00001/ray40.csv")
drawRay(ax, "Data/Spin00001/ray50.csv")
drawRay(ax, "Data/Spin00001/ray60.csv")
# drawRay(ax, "Data/Spin00001/ray70.csv")
drawRay(ax, "Data/Spin00001/ray80.csv")
drawRay(ax, "Data/Spin00001/ray90.csv")
drawRay(ax, "Data/Spin00001/ray99.csv")
# ax.legend()
plt.show()
|
jleclanche/pywow
|
wdbc/main.py
|
Python
|
cc0-1.0
| 10,011
| 0.032864
|
from cStringIO import StringIO
from struct import pack, unpack, error as StructError
from .log import log
from .structures import fields
class DBFile(object):
"""
Base class for WDB and DBC files
"""
@classmethod
def open(cls, file, build, structure, environment):
if isinstance(file, basestring):
file = open(file, "rb")
instance = cls(file, build, environment)
instance._readHeader()
instance.setStructure(structure)
instance._rowDynamicFields = 0 # Dynamic fields index, used when parsing a row
instance._readAddresses()
return instance
def __init__(self, file=None, build=None, environment=None):
self
|
._addresses = {}
self._values = {}
self.file = file
self.build = build
self.environment = environment
def __repr__(self):
return "%s(file=%r, build=%r)" % (self.__class__.__name__, self.file, self.build)
def __contains__(self, id):
return id in self._addresses
def __getitem__(self, item):
if isinstance(item, slice):
keys = sorted(self._addresses.keys())[item]
return [self[k] for k in keys]
if item not in
|
self._values:
self._parse_row(item)
return self._values[item]
def __setitem__(self, item, value):
if not isinstance(item, int):
raise TypeError("DBFile indices must be integers, not %s" % (type(item)))
if isinstance(value, DBRow):
self._values[item] = value
self._addresses[item] = -1
else:
# FIXME technically we should allow DBRow, but this is untested and will need resetting parent
raise TypeError("Unsupported type for DBFile.__setitem__: %s" % (type(value)))
def __delitem__(self, item):
if item in self._values:
del self._values[item]
del self._addresses[item]
def __iter__(self):
return self._addresses.__iter__()
def __len__(self):
return len(self._addresses)
def _add_row(self, id, address, reclen):
if id in self._addresses: # Something's wrong here
log.warning("Multiple instances of row %r found in %s" % (id, self.file.name))
self._addresses[id] = (address, reclen)
def _parse_field(self, data, field, row=None):
"""
Parse a single field in stream.
"""
if field.dyn > self._rowDynamicFields:
return None # The column doesn't exist in this row, we set it to None
ret = None
try:
if isinstance(field, fields.StringField):
ret = self._parse_string(data)
elif isinstance(field, fields.DataField): # wowcache.wdb
length = getattr(row, field.master)
ret = data.read(length)
elif isinstance(field, fields.DynamicMaster):
ret, = unpack("<I", data.read(4))
self._rowDynamicFields = ret
else:
ret, = unpack("<%s" % (field.char), data.read(field.size))
except StructError:
log.warning("Field %s could not be parsed properly" % (field))
ret = None
return ret
def supportsSeeking(self):
return hasattr(self.file, "seek")
def append(self, row):
"""
Append a row at the end of the file.
If the row does not have an id, one is automatically assigned.
"""
i = len(self) + 1 # FIXME this wont work properly in incomplete files
if "_id" not in row:
row["_id"] = i
self[i] = row
def clear(self):
"""
Delete every row in the file
"""
for k in self.keys(): # Use key, otherwise we get RuntimeError: dictionary changed size during iteration
del self[k]
def keys(self):
return self._addresses.keys()
def items(self):
return [(k, self[k]) for k in self]
def parse_row(self, data, reclen=0):
"""
Assign data to a DBRow instance
"""
return DBRow(self, data=data, reclen=reclen)
def values(self):
"""
Return a list of the file's values
"""
return [self[id] for id in self]
def setRow(self, key, **values):
self.__setitem__(key, DBRow(self, columns=values))
def size(self):
if hasattr(self.file, "size"):
return self.file.size()
elif isinstance(self.file, file):
from os.path import getsize
return getsize(self.file.name)
raise NotImplementedError
def update(self, other):
"""
Update file from iterable other
"""
for k in other:
self[k] = other[k]
def write(self, filename=""):
"""
Write the file data on disk. If filename is not given, use currently opened file.
"""
_filename = filename or self.file.name
data = self.header.data() + self.data() + self.eof()
f = open(_filename, "wb") # Don't open before calling data() as uncached rows would be empty
f.write(data)
f.close()
log.info("Written %i bytes at %s" % (len(data), f.name))
if not filename: # Reopen self.file, we modified it
# XXX do we need to wipe self._values here?
self.file.close()
self.file = open(f.name, "rb")
class DBRow(list):
"""
A database row.
Names of the variables of that class should not be used in field names of structures
"""
initialized = False
def __init__(self, parent, data=None, columns=None, reclen=0):
self._parent = parent
self._values = {} # Columns values storage
self.structure = parent.structure
self.initialized = True # needed for __setattr__
if columns:
if type(columns) == list:
self.extend(columns)
elif type(columns) == dict:
self._default()
_cols = [k.name for k in self.structure]
for k in columns:
try:
self[_cols.index(k)] = columns[k]
except ValueError:
log.warning("Column %r not found" % (k))
elif data:
dynfields = 0
data = StringIO(data)
for field in self.structure:
_data = parent._parse_field(data, field, self)
self.append(_data)
if reclen:
real_reclen = reclen + self._parent.row_header_size
if data.tell() != real_reclen:
log.warning("Reclen not respected for row %r. Expected %i, read %i. (%+i)" % (self.id, real_reclen, data.tell(), real_reclen-data.tell()))
def __dir__(self):
result = self.__dict__.keys()
result.extend(self.structure.column_names)
return result
def __getattr__(self, attr):
if attr in self.structure:
return self._get_value(attr)
if attr in self.structure._abstractions: # Union abstractions etc
field, func = self.structure._abstractions[attr]
return func(field, self)
if "__" in attr:
return self._query(attr)
return super(DBRow, self).__getattribute__(attr)
def __int__(self):
return self.id
def __setattr__(self, attr, value):
# Do not preserve the value in DBRow! Use the save method to save.
if self.initialized and attr in self.structure:
self._set_value(attr, value)
return super(DBRow, self).__setattr__(attr, value)
def __setitem__(self, index, value):
if not isinstance(index, int):
raise TypeError("Expected int instance, got %s instead (%r)" % (type(index), index))
list.__setitem__(self, index, value)
col = self.structure[index]
self._values[col.name] = col.to_python(value, row=self)
def _get_reverse_relation(self, table, field):
"""
Return a list of rows matching the reverse relation
"""
if not hasattr(self._parent, "_reverse_relation_cache"):
self._parent._reverse_relation_cache = {}
cache = self._parent._reverse_relation_cache
tfield = table + "__" + field
if tfield not in cache:
cache[tfield] = {}
# First time lookup, let's build the cache
table = self._parent.environment.dbFile(table)
for row in table:
row = table[row]
id = row._raw(field)
if id not in cache[tfield]:
cache[tfield][id] = []
cache[tfield][id].append(row)
return cache[tfield].get(self.id, None)
def _matches(self, **kwargs):
for k, v in kwargs.items():
if not self._query(k, v):
return False
return True
def _query(self, rel, value=None):
"""
Parse a django-like multilevel relationship
"""
rels = rel.split("__")
if "" in rels: # empty string
raise ValueError("Invalid relation string")
first = rels[0]
if not hasattr(self, first):
if self._parent.environment.hasDbFile(first):
# Handle reverse relations, eg spell__item for item table
remainder = rel[len(first + "__"):]
return self._get_reverse_relation(first, remainder)
raise ValueError("Invalid relation string")
ret = self
rels = rels[::-1]
special = {
"contains": lambda x, y: x in y,
"exact": lambda x, y: x == y,
"icontains": lambda x, y: x.lower() in y.lower(),
"iexact": lambda x,
|
jjdmol/LOFAR
|
SAS/OTDB_Services/getParset.py
|
Python
|
gpl-3.0
| 2,018
| 0.002973
|
#!/usr/bin/env python
#coding: iso-8859-15
#
# Copyright (C) 2015
# ASTRON (Netherlands Institute for Radio Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The LOFAR
|
software suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A
|
PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
#
# $Id$
"""
"""
from lofar.messaging.RPC import RPC
from lofar.sas.otdb.config import DEFAULT_OTDB_SERVICE_BUSNAME
from lofar.sas.otdb.otdbrpc import OTDBRPC
if __name__ == "__main__":
from optparse import OptionParser
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
# Check the invocation arguments
parser = OptionParser("%prog -o obsid [options]")
parser.add_option("-B", "--busname", dest="busname", type="string", default=DEFAULT_OTDB_SERVICE_BUSNAME,
help="Busname on which OTDB commands are sent")
parser.add_option("-o", "--obsid", dest="obsid", type="int", default=0,
help="Observation/tree ID to get parset of")
(options, args) = parser.parse_args()
if not options.busname or not options.obsid:
parser.print_help()
sys.exit(1)
with OTDBRPC(busname=options.busname) as otdbrpc:
parset = otdbrpc.taskGetSpecification(otdb_id=options.obsid)["specification"]
for k,v in parset.iteritems():
print "%s = %s" % (k,v)
|
magne-max/zipline-ja
|
tests/data/test_us_equity_pricing.py
|
Python
|
apache-2.0
| 12,525
| 0
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sys import maxsize
from nose_parameterized import parameterized
from numpy import (
arange,
datetime64,
)
from numpy.testing import (
assert_array_equal,
)
from pandas import (
DataFrame,
Timestamp,
)
from pandas.util.testing import assert_index_equal
from zipline.data.us_equity_pricing import (
BcolzDailyBarReader,
NoDataBeforeDate,
NoDataAfterDate,
)
from zipline.pipeline.loaders.synthetic import (
OHLCV,
asset_start,
asset_end,
expected_bar_value,
expected_bar_values_2d,
make_bar_data,
)
from zipline.testing import seconds_to_timestamp
from zipline.testing.fixtures import (
WithBcolzEquityDailyBarReader,
ZiplineTestCase,
)
from zipline.utils.calendars import get_calendar
TEST_CALENDAR_START = Timestamp('2015-06-01', tz='UTC')
TEST_CALENDAR_STOP = Timestamp('2015-06-30', tz='UTC')
TEST_QUERY_START = Timestamp('2015-06-10', tz='UTC')
TEST_QUERY_STOP = Timestamp('2015-06-19', tz='UTC')
# One asset for each of the cases enumerated in load_raw_arrays_from_bcolz.
EQUITY_INFO = DataFrame(
[
# 1) The equity's trades start and end before query.
{'start_date': '2015-06-01', 'end_date': '2015-06-05'},
# 2) The equity's trades start and end after query.
{'start_date': '2015-06-22', 'end_date': '2015-06-30'},
# 3) The equity's data covers all dates in range.
{'start_date': '2015-06-02', 'end_date': '2015-06-30'},
# 4) The equity's trades start before the query start, but stop
# before the query end.
{'start_date': '2015-06-01', 'end_date': '2015-06-15'},
# 5) The equity's trades start and end during the query.
{'start_date': '2015-06-12', 'end_date': '2015-06-18'},
# 6) The equity's trades start during the query, but extend through
# the whole query.
{'start_date': '2015-06-15', 'end_date': '2015-06-25'},
],
index=arange(1, 7),
columns=['start_date', 'end_date'],
).astype(datetime64)
EQUITY_INFO['symbol'] = [chr(ord('A') + n) for n in range(len(EQUITY_INFO))]
TEST_QUERY_ASSETS = EQUITY_INFO.index
class BcolzDailyBarTestCase(WithBcolzEquityDailyBarReader, ZiplineTestCase):
EQUITY_DAILY_BAR_START_DATE = TEST_CALENDAR_START
EQUITY_DAILY_BAR_END_DATE = TEST_CALENDAR_STOP
@classmethod
def make_equity_info(cls):
return EQUITY_INFO
@classmethod
def make_equity_daily_bar_data(cls):
return make_bar_data(
EQUITY_INFO,
cls.equity_daily_bar_days,
)
@classmethod
def init_class_fixtures(cls):
super(BcolzDailyBarTestCase, cls).init_class_fixtures()
cls.sessions = cls.trading_calendar.sessions_in_range(
cls.trading_calendar.minute_to_session_label(TEST_CALENDAR_START),
cls.trading_calendar.minute_to_session_label(TEST_CALENDAR_STOP)
)
@property
def assets(self):
return EQUITY_INFO.index
def trading_days_between(self, start, end):
return self.sessions[self.sessions.slice_indexer(start, end)]
def asset_start(self, asset_id):
return asset_start(EQUITY_INFO, asset_id)
def asset_end(self, asset_id):
return asset_end(EQUITY_INFO, asset_id)
def dates_for_asset(self, asset_id):
start, end = self.asset_start(asset_id), self.asset_end(asset_id)
return self.trading_days_between(start, end)
def test_write_ohlcv_content(self):
result = self.bcolz_daily_bar_ctable
for column in OHLCV:
idx = 0
data = result[column][:]
multiplier = 1 if column == 'volume' else 1000
for asset_id in self.assets:
for date in self.dates_for_asset(asset_id):
self.assertEqual(
expected_bar_value(
asset_id,
date,
column
) * multiplier,
data[idx],
)
idx += 1
self.assertEqual(idx, len(data))
def test_write_day_and_id(self):
result = self.bcolz_daily_bar_ctable
idx = 0
ids = result['id']
days = result['day']
for asset_id in self.assets:
for date in self.dates_for_asset(asset_id):
self.assertEqual(ids[idx], asset_id)
self.assertEqual(date, seconds_to_timestamp(days[idx]))
idx += 1
def test_write_attrs(self):
result = self.bcolz_daily_bar_ctable
expected_first_row = {
'1': 0,
'2': 5, # Asset 1 has 5 trading days.
'3': 12, # Asset 2 has 7 trading days.
'4': 33, # Asset 3 has 21 trading days.
'5': 44, # Asset 4 has 11 trading days.
'6': 49, # Asset 5 has 5 trading days.
}
expected_last_row = {
'1': 4,
'2': 11,
'3': 32,
'4': 43,
'5': 48,
'6': 57, # Asset 6 has 9 trading days.
}
expected_calendar_offset = {
'1': 0, # Starts on 6-01, 1st trading day of month.
'2': 15, # Starts on 6-22, 16th trading day of month.
'3': 1, # Starts on 6-02, 2nd trading day of month.
'4': 0, # Starts on 6-01, 1st trading day of month.
'5': 9, # Starts on 6-12, 10th trading day of month.
'6': 10, # Starts on 6-15, 11th trading day of month.
}
self.assertEqual(result.attrs['first_row'], expected_first_row)
self.assertEqual(result.attrs['last_row'], expected_last_row)
self.assertEqual(
result.attrs['calendar_offset'],
expected_calendar_offset,
)
cal = get_calendar(result.attrs['calendar_name'])
first_session = Timestamp(result.attrs['start_session_ns'], tz='UTC')
end_session = Timestamp(result.attrs['end_session_ns'], tz='UTC')
sessions = cal.sessions_in_range(first_session, end_session)
assert_index_equal(
self.sessions,
sessions
)
def test_read_first_trading_day(self):
self.assertEqual(
self.bcolz_equity_daily_bar_reader.first_trading_day,
self.sessions[0],
)
def _check_read_results(self, columns, assets, start_date, end_date):
results = self.bcolz_equity_daily_bar_reader.load_raw_arrays(
columns,
start_date,
end_date,
assets,
)
dates = self.trading_days_between(start_date, end_date)
for column, result in zip(columns, results):
assert_array_equal(
result,
expected_bar_values_2d(
dates,
EQUITY_INFO,
column,
)
)
@parameterized.expand([
(['open'],),
(['close', 'volume'],),
(['volume', 'high', 'low'],),
(['open', 'high', 'low', 'close', 'volume'],),
])
def test_read(self, columns):
self._check_read_results(
columns,
self.assets,
TEST_QUERY_START,
TEST_QUERY_STOP,
)
def test_start_on_asset_start(self):
"""
Test loading with queries that starts on the first day of
|
each asset's
lifetime.
"""
columns = ['high', 'volume']
for as
|
set in self.assets:
self._check_read_results(
colum
|
rowhit/h2o-2
|
py/testdir_single_jvm/notest_exec2_poppush2_fail.py
|
Python
|
apache-2.0
| 2,065
| 0.008717
|
import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_browse as h2b, h2o_exec as h2e, h2o_import as h2i
initList = [
('crunk', "crunk=function(x){x+98};"),
('r.hex', 'r.hex=i.hex'),
]
# maybe can't have unused functions
phrases = [
"crunk=function(x){x+98};",
# "crunk=function(x){x+98};",
# fail
"r.hex[,3]=4;"
# fail
# "r.hex[,3]=crunk(2);",
#"r.hex[,3]=crunk(2);",
]
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_exec2_poppush2_fail(self):
bucket = 'smalldata'
csvPathname = 'iris/iris2.csv'
hexKey = 'i.hex'
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='put', hex_key=hexKey)
exprList = []
while (len(exprList)!=20):
exprs = [random.choice(phrases) for j in range(random.randint(1,2))]
# check if we have mean2() before function defn
functionFound = False
for i, e in enumerate(exprs):
if 'function' in e:
functionFound = True
# h2o has problems with assigns after functions
if functionFound and len(exprs)> 1:
# pass
exprList.append("".join(exprs))
else:
exprList.append("".join(exprs))
# add this one for good measure (known fail)
# exprList += "crunk=function(x){x+98};r.hex[,3]=4;"
exprList += ["function(x
|
){x+98};r.hex[,3]=4;"]
f
|
or resultKey, execExpr in initList:
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=resultKey, timeoutSecs=4)
for execExpr in exprList:
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=None, timeoutSecs=4)
if __name__ == '__main__':
h2o.unit_main()
|
jlazic/GlogSMS
|
sms/management/commands/check_failed_messages.py
|
Python
|
mit
| 1,774
| 0.003946
|
# -*- coding: utf-8 -*-
__author__ = 'josip@lazic.info'
from django.core.management.base import BaseCommand, CommandError
from sms.models import Message, StatusLog
from datetime import datetime, timedelta
from django.core.mail import mail_admins
from django.conf import settings
from sms.tasks import queue_message
class Command(BaseCommand):
"""
Command is called through Celery Beat scheduler every hour
"""
args = ''
help = 'Check for failed messages, and resend them'
def handle(self, *args, **options):
"""
Check for messages with status 'sent to phone' that were not updated in last hour, and try to resend these
messages.
TODO: Maybe have some limit on resending, after 3 resend actions, cancel message.
"""
|
try:
failed_messages = Message.objects.filter(status='sent', updated__lte=datetime.now() - timedelta(hours=1))
for message in failed_messages:
message.status = 'queued'
s = StatusLog(status='requeueing', error='Poruka nije poslana unutar sat v
|
remena',
log='Requeueing message due to inactivity in last hour',
phone_number='1234', message=message)
s.save()
message.save()
if settings.SMS_AMQP_ENABLED:
# Send message to AMQP queue again. This could lead to one message being delivered multiple times.
# TODO: Test this!
queue_message.delay(message)
except Exception, e:
"""
Send email to ADMINS, but continue to process failed_messages
"""
mail_admins('Error while checking for failed messages', str(e))
|
dgaston/ddbio-ngsflow
|
ddb_ngsflow/pipeline.py
|
Python
|
mit
| 1,788
| 0.002237
|
"""
.. module:: pipeline
:platform: Unix, OSX
:synopsis: A wrapper module for executing commands and logging their output.
.. moduleauthor:: Daniel Gaston <daniel.gaston@dal.ca>
"""
import sys
import subprocess as sub
def run_and_log_command(command, logfile):
"""This function uses the python subprocess method to run the specified command and writes all error to the
specified logfile
:param command: The command-line command to execute.
:type name: str.
:param logfile: The logfile to output error messages to.
:type logfile: str.
:returns: Nothing
:rai
|
ses: RuntimeError
"""
with open(logfile, "wb") as err:
sys.stdout.write("Executing {} and writing to logfile {}\n".format(command, logfile))
err.write("Command: {}\n".format(command))
p = sub.Popen(command, stdout=sub.PIPE, stderr=err, shell=True)
output = p.comm
|
unicate()
code = p.returncode
if code:
raise RuntimeError("An error occurred when executing the commandline: {}. "
"Please check the logfile {} for details\n".format(command, logfile))
def spawn_batch_jobs(job):
"""
This is simply a placeholder root job for the workflow
"""
job.fileStore.logToMaster("Initializing workflow\n")
def spawn_variant_jobs(job):
"""
This is simply a placeholder job to create a node in the graph for spawning
off the multiple variant callers
"""
job.fileStore.logToMaster("Spawning all variant calling methods\n")
def spawn_stranded_jobs(job):
"""
This is simply a placeholder job to create a node in the graph for spawning
off the multiple variant callers
"""
job.fileStore.logToMaster("Spawning jobs for stranded libraries\n")
|
pattisdr/osf.io
|
scripts/analytics/migrate_analytics.py
|
Python
|
apache-2.0
| 20,106
| 0.004377
|
# A script to migrate old keen analytics to a new collection, generate in-between points for choppy
# data, or a little of both
import os
import csv
import copy
import pytz
import logging
import argparse
import datetime
from dateutil.parser import parse
from keen.client import KeenClient
from website.settings import KEEN as keen_settings
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
VERY_LONG_TIMEFRAME = 'this_20_years'
def parse_args():
parser = argparse.ArgumentParser(
description='Enter a start date and end date to gather, smooth, and send back analytics for keen'
)
parser.add_argument('-s', '--start', dest='start_date')
parser.add_argument('-e', '--end', dest='end_date')
parser.add_argument('-t', '--transfer', dest='transfer_collection', action='store_true')
parser.add_argument('-sc', '--source', dest='source_collection')
parser.add_argument('-dc', '--destination', dest='destination_collection')
parser.add_argument('-sm', '--smooth', dest='smooth_events', action='store_true')
parser.add_argument('-o', '--old', dest='old_analytics', action='store_true')
parser.add_argument('-d', '--dry', dest='dry', action='store_true')
parser.add_argument('-r', '--reverse', dest='reverse', action='store_true')
parser.add_argument('-re', '--removeevent', dest='remove_event')
parsed = parser.parse_args()
validate_args(parsed)
return parsed
def validate_args(args):
""" Go through supplied command line args an determine if you have enough to continue
:param args: argparse args object, to sift through and figure out if you need more info
:return: None, just raise errors if it finds something wrong
"""
if args.dry:
logger.info('Running analytics on DRY RUN mode! No data will actually be sent to Keen.')
potential_operations = [args.smooth_events, args.transfer_collection, args.old_analytics]
if len([arg for arg in potential_operations if arg]) > 1:
raise ValueError('You may only choose one analytic type to run: transfer, smooth, or import old analytics.')
if args.smooth_events and not (args.start_date and args.end_date):
raise ValueError('To smooth data, please enter both a start date and end date.')
if args.start_date and args.end_date:
if parse(args.start_date) > parse(args.end_date):
raise ValueError('Please enter an end date that is after the start date.')
if args.smooth_events and not args.source_collection:
raise ValueError('Please specify a source collection to smooth data from.')
if args.transfer_collection and not (args.source_collection and args.destination_collection):
raise ValueError('To transfer between keen collections, enter both a source and a destination collection.')
if any([args.start_date, args.end_date]) and not all([args.start_date, args.end_date]):
raise ValueError('You must provide both a start and an end date if you provide either.')
if args.remove_event and not args.source_collection:
raise ValueError('You must provide both a source collection to remove an event from.')
def fill_in_event_gaps(collection_name, events):
""" A method to help fill in gaps between events that might be far apart,
so that one event happens per day.
:param collection_name: keen collection events are from
:param events: events to fill in gaps between
:return: list of "generated and estimated" events to send that will fill in gaps.
"""
given_days = [parse(event['keen']['timestamp']).date() for event in events if not event.get('generated')]
given_days.sort()
date_chunks = [given_days[x-1:x+1] for x in range(1, len(given_days))]
events_to_add = []
if given_days:
if collection_name == 'addon_snapshot':
all_providers = list(set([event['provider']['name'] for event in events]))
for provider in all_providers:
for date_pair in date_chunks:
if date_pair[1] - date_pair[0] > datetime.timedelta(1) and date_pair[0] != date_pair[1]:
first_event = [
event for event in events if date_from_event_ts(event) == date_pair[0] and event['provider']['name'] == provider and not event.get('generated')
]
if first_event:
events_to_add += generate_events_between_events(date_pair, first_event[0])
elif collection_name == 'institution_summary':
all_instutitions = list(set([event['institution']['name'] for event in events]))
for institution in all_instutitions:
for date_pair in date_chunks:
if date_pair[1] - date_pair[0] > datetime.timedelta(1) and date_pair[0] != date_pair[1]:
first_event = [
event for event in events if date_from_event_ts(event) == date_pair[0] and event['institution']['name'] == institution and not event.get('generated')
]
if first_event:
events_to_add += generate_events_between_events(date_pair, first_event[0])
else:
for date_pair in date_chunks:
if date_pair[1] - date_pair[0] > datetime.timedelta(1) and date_pair[0] != date_pair[1]:
first_event = [event for event in events if date_from_event_ts(event) == date_pair[0] and not event.get('generated')]
if first_event:
events_to_add += generate_events_between_events(date_pair, first_event[0])
logger.info('Generated {} events to add to the {} collection.'.format(len(events_to_add), collection_name))
else:
logger.info('Could not retrieve events for the date range you provided.')
return events_to_add
def date_from_event_ts(event):
return parse(event['keen']['timestamp']).date()
def generate_events_between_events(given_days, first_event):
first_day = given_days[0]
last_day = given_days[-1]
next_day = first_day + datetime.timedelta(1)
first_event['keen'].pop('created_at')
first_event['keen'].pop('id')
first_event['generated'] = True # Add value to tag generated data
generated_events = []
while next_day < last_day:
new_event = copy.deepcopy(first_event)
new_event['keen']['timestamp'] = datetime.datetime(next_day.year, next_day.month, next_day.day).replace(tzinfo=pytz.UTC).isoformat()
if next_day not in given_days:
generated_events.append(new_event)
next_day += datetime.timedelta(1)
if generated_events:
logger.info('Generated {} events for the interval {} to {}'.format(
len(generated_events),
given_days[0].isoformat(),
given_days[1].isoformat()
)
)
return generated_events
def get_keen_client():
keen_project = keen_settings['private'].get('pro
|
ject_id')
read_key = keen_settings['priva
|
te'].get('read_key')
master_key = keen_settings['private'].get('master_key')
write_key = keen_settings['private'].get('write_key')
if keen_project and read_key and master_key:
client = KeenClient(
project_id=keen_project,
read_key=read_key,
master_key=master_key,
write_key=write_key
)
else:
raise ValueError('Cannot connect to Keen clients - all keys not provided.')
return client
def extract_events_from_keen(client, event_collection, start_date=None, end_date=None):
""" Get analytics from keen to use as a starting point for smoothing or transferring
:param client: keen client to use for connection
:param start_date: datetime object, datetime to start gathering from keen
:param end_date: datetime object, datetime to stop gathering from keen
:param event_collection: str, name of the event collection to gather from
:return: a list of keen events to use in other methods
"""
timeframe = VERY_LONG_TIMEFRAME
if start_date and end_date:
log
|
hrantzsch/signature-verification
|
tools/svm.py
|
Python
|
gpl-3.0
| 1,068
| 0.000936
|
from sklearn import svm
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
import pickle
import sys
import numpy as np
def load_data(path):
data = pickle.load(open(path, 'rb'))
keys = sorted(list(data.keys()))
X = []
y = []
for k in keys:
for x in data[k]:
X.append(x)
y.append(k[-3:]) # HACK
# y.append(k)
|
return np.array(X, np.float32), np.array(y, np.int32)
if __name__ == '__main__':
X, y = load_data(sys.argv[1])
Xt, yt = load_data(sys.argv[2])
# === train ===
clf = OneVsOneClassifier(svm.LinearSVC(verbose=1,
max_iter=10000,
dual=False,
), 5)
clf.fit(X, y)
pickle.dump(clf, open('svm.pkl', 'wb'))
|
# clf = pickle.load(open('svm.pkl', 'rb'))
# === test ===
prediction = clf.predict(Xt)
correct = prediction == yt
print("\n========")
print("Accuracy: {}".format(sum(correct) / len(correct)))
|
gvb/odoo
|
addons/sale/sale.py
|
Python
|
agpl-3.0
| 71,046
| 0.005588
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
import openerp.addons.decimal_precision as dp
from openerp import workflow
class res_company(osv.Model):
_inherit = "res.company"
_columns = {
'sale_note': fields.text('Default Terms and Conditions', translate=True, help="Default terms and conditions for quotations."),
}
class sale_order(osv.osv):
_name = "sale.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Sales Order"
_track = {
'state': {
'sale.mt_order_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state in ['manual', 'progress'],
'sale.mt_order_sent': lambda self, cr, uid, obj, ctx=None: obj.state in ['sent']
},
}
def _amount_line_tax(self, cr, uid, line, context=None):
val = 0.0
for c in self.pool.get('account.tax').compute_all(cr, uid, line.tax_id, line.price_unit * (1-(line.discount or 0.0)/100.0), line.product_uom_qty, line.product_id, line.order_id.partner_id)['taxes']:
val += c.get('amount', 0.0)
return val
def _amount_all_wrapper(self, cr, uid, ids, field_name, arg, context=None):
""" Wrapper because of direct method passing as parameter for function fields """
return self._amount_all(cr, uid, ids, field_name, arg, context=context)
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
cur_obj = self.pool.get('res.currency')
res = {}
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
val += self._amount_line_tax(cr, uid, line, context=context)
res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed'] = cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total'] = res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
if sale.invoiced:
res[sale.id] = 100.0
continue
tot = 0.0
for invoice in sale.invoice_ids:
if invoice.state not in ('draft', 'cancel'):
tot += invoice.amount_untaxed
if tot:
res[sale.id] = min(100.0, tot * 100.0 / (sale.amount_untaxed or 1.00))
else:
res[sale.id] = 0.0
return res
def _invoice_exists(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
res[sale.id] = False
if sale.invoice_ids:
res[sale.id] = True
return res
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
res[sale.id] = True
invoice_existence = False
for invoice in sale.invoice_ids:
if invoice.state!='cancel':
invoice_existence = True
if invoice.state != 'paid':
res[sale.id] = False
break
if not invoice_existence or sale.state == 'manual':
res[sale.id] = False
return res
def _invoiced_search(self, cursor, user, obj, name, args, context=None):
if not len(args):
return []
clause = ''
sale_clause = ''
no_invoiced = False
for arg in args:
if (arg[1] == '=' and arg[2]) or (arg[1] == '!=' and not arg[2]):
clause += 'AND inv.state = \'paid\''
else:
clause += 'AND inv.state != \'cancel\' AND sale.state != \'cancel\' AND inv.state <> \'paid\' AND rel.order_id = sale.id '
|
sale_claus
|
e = ', sale_order AS sale '
no_invoiced = True
cursor.execute('SELECT rel.order_id ' \
'FROM sale_order_invoice_rel AS rel, account_invoice AS inv '+ sale_clause + \
'WHERE rel.invoice_id = inv.id ' + clause)
res = cursor.fetchall()
if no_invoiced:
cursor.execute('SELECT sale.id ' \
'FROM sale_order AS sale ' \
'WHERE sale.id NOT IN ' \
'(SELECT rel.order_id ' \
'FROM sale_order_invoice_rel AS rel) and sale.state != \'cancel\'')
res.extend(cursor.fetchall())
if not res:
return [('id', '=', 0)]
return [('id', 'in', [x[0] for x in res])]
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _get_default_company(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
return company_id
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
section_id = self._resolve_section_id_from_context(cr, uid, context=context) or False
if not section_id:
section_id = self.pool.get('res.users').browse(cr, uid, uid, context).default_section_id.id or False
return section_id
def _resolve_section_id_from_context(self, cr, uid, context=None):
""" Returns ID of section based on the value of 'section_id'
context key, or None if it cannot be resolved to a single
Sales Team.
"""
if context is None:
context = {}
if type(context.get('default_section_id')) in (int, long):
return context.get('default_section_id')
if isinstance(context.get('default_section_id'), basestring):
section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=context['default_section_id'], context=context)
if len(section_ids) == 1:
return int(section_ids[0][0])
return None
_columns = {
'name': fields.char('Order Reference', required=True, copy=False,
readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, select=True),
'origin': fields.char('Source Document', help="Reference of the document that generated this sales order request."),
'client_order_ref': fie
|
Clarify/clarify_python
|
tests/__init__.py
|
Python
|
mit
| 2,307
| 0.008236
|
import sys
import os
import re
if sys.version_info[0] < 3:
from io import open
host = 'https://api.clarify.io'
def load_body(filename):
text = None
with open(os.path.join('.', 'tests', 'data', filename), encoding="utf-8") as f:
text = f.read()
return text if text else '{}'
def register_uris(httpretty):
httpretty.register_uri('POST', host + '/v1/bundles',
body=load_body('bundle_ref.json'), status=201,
content_type='application/json')
httpretty.register_uri('GET', host + '/v1/bundles',
body=load_body('bundles_1.json'), status=200,
content_type='app
|
lication/json')
httpretty.register_uri('GET', re.compile(host + '/v1/bundles/(\w+)\?embed=(.*)$'),
body=load_body('bundle_embedded.json'), status=200,
content_type='application/json', match_querystring=True)
httpretty.register_uri('GET', re.compile(host + '/v1/bundles/(\w+)$'),
body=load_body('bundle.json'), status=200,
con
|
tent_type='application/json', match_querystring=True)
httpretty.register_uri('GET', re.compile(host + '/v1/bundles/(\w+)/tracks$'),
body=load_body('tracks.json'), status=200,
content_type='application/json')
httpretty.register_uri('GET', re.compile(host + '/v1/bundles/(\w+)/metadata$'),
body=load_body('metadata.json'), status=200,
content_type='application/json')
httpretty.register_uri('GET', re.compile(host + '/v1/bundles/(\w+)/insights$'),
body=load_body('insights.json'), status=200,
content_type='application/json')
httpretty.register_uri('GET', re.compile(host + '/v1/bundles/(\w+)/insights/(\w+)$'),
body=load_body('insight_classification.json'), status=200,
content_type='application/json')
httpretty.register_uri('POST', re.compile(host + '/v1/bundles/(\w+)/insights$'),
body=load_body('insight_pending.json'), status=202,
content_type='application/json')
|
ganga-devs/ganga
|
ganga/GangaCore/Core/GangaThread/MTRunner/Data.py
|
Python
|
gpl-3.0
| 1,817
| 0.001101
|
#!/usr/bin/env python
from queue import Queue
import threading
class DuplicateDataItemError(Exception):
"""
Class raised when adding the same item in the Data object.
"""
def __init__(self, message):
self.message = message
class Data(object):
"""
Class to define user dataset collection.
"""
_attributes = ('collection', 'queue')
def __init__(self, collection=None):
if collection is None:
collection = []
self.collection = collection
self.queue = Queue(maxsize=-1)
self.lock = threading.Lock()
for item in collection:
self.queue.put(item)
def getCollection(self):
return self.collection
def isEmpty(self):
'''
checks if the bounded queue is empty.
'''
return self.queue.empty()
def addItem(self, item):
'''
try to put a new item in the queue. As the queue is defined
|
with infinity number
of slots, it should never throw "Queue.Full" exception.
'''
self.lock.acquire()
try:
if item not in self.collection:
self.collection.append(item)
self.queue.put(item)
else:
raise DuplicateDataItemError(
'data item \'%s\' already in the task queue' % str(item))
finally:
|
self.lock.release()
def getNextItem(self):
'''
try to get the next item in the queue after waiting in max. 1 sec.
if nothing available, the exception "Queue.Empty" will be thrown.
'''
theItem = None
self.lock.acquire()
try:
theItem = self.queue.get(block=True, timeout=1)
finally:
self.lock.release()
return theItem
|
wbinventor/openmc
|
tests/regression_tests/uwuw/test.py
|
Python
|
mit
| 1,025
| 0.002927
|
import openmc
import openmc.capi
from openmc.stats import Box
from openmc.material import Materials
import pytest
from tests.testing_harness import PyAPITestHarness
pytestmark = pytest.mark.skipif(
not openmc.capi._dagmc_enabled(),
reason="DAGMC CAD geometry is not enabled.")
class UWUWTest(PyAPITestHarness):
def _build_inputs(self):
model = openmc.model.Model()
# settings
model.settings.batches = 5
model.settings.inactive = 0
model.settings.particles = 100
source = openmc.Source(space=Box([-4, -4, -4],
[ 4, 4, 4]))
model.settings.source = source
model.settings.dagmc = True
model.settings.export_to_xml()
# tally
tally = openmc.Tally()
tally.scores = ['total']
tally.filters = [openmc.CellFilter(1)]
model.ta
|
llies = [tally]
model.tallies.export_to_xml()
def test_uwuw()
|
:
harness = UWUWTest('statepoint.5.h5')
harness.main()
|
ramineni/myironic
|
ironic/tests/drivers/test_agent_client.py
|
Python
|
apache-2.0
| 4,574
| 0.001312
|
# Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import requests
from ironic.common import exception
from ironic.drivers.modules import agent_client
from ironic.tests import base
class MockResponse(object):
def __init__(self, data):
self.data = data
self.text = json.dumps(data)
def json(self):
return self.data
class MockNode(object):
def __init__(self):
self.uuid = 'uuid'
self.driver_info = {}
self.driver_internal_info = {
'agent_url': "http://127.0.0.1:9999"
}
self.instance_info = {}
class TestAgentClient(base.TestCase):
def setUp(self):
super(TestAgentClient, self).setUp()
self.client = agent_client.AgentClient()
self.client.session = mock.Mock(autospec=requests.Session)
self.node = MockNode()
def test__get_command_url(self):
command_url = self.client._get_command_url(self.node)
expected = self.node.driver_internal_info['agent_url'] + '/v1/commands'
self.assertEqual(expected, command_url)
def test__get_command_url_fail(self):
del self.node.driver_internal_info['agent_url']
self.assertRaises(exception.IronicException,
self.client._get_command_url,
self.node)
def test__get_command_body(self):
expected = json.dumps({'name': 'prepare_image', 'params': {}})
self.assertEqual(expected,
self.client._get_command_body('prepare_image', {}))
def test__command(self):
response_data = {'status': 'ok'}
self.client.session.post.return_value = MockResponse(response_data)
method = 'standby.run_image'
image_info = {'image_id': 'test_image'}
params = {'image_info': image_info}
url = self.client._get_command_url(self.node)
body = self.client._get_command_body(method, params)
headers = {'Content-Type': 'application/json'}
response = self.client._command(self.node, method, params)
self.assertEqual(response, response_data)
self.client.session.post.assert_called_once_with(
url,
data=body,
headers=headers,
params={'wait': 'false'})
def test_get_commands_status(self):
with mock.patch.object(self.client.session, 'get') as mock_
|
get:
res = mock.Mock()
res.json.return_value = {'commands': []}
mock_get.return_value = res
self.assertEqual([], self.client.get_commands_status(self.node))
@mock.patch('uuid.uuid4', mock.MagicMock(return_value='uuid'))
def test_prepare_image(self):
self.client._command = mock.Mock()
image_info = {'image_id': 'image'}
params = {'image_info': image_info}
self.client.
|
prepare_image(self.node,
image_info,
wait=False)
self.client._command.assert_called_once_with(node=self.node,
method='standby.prepare_image',
params=params,
wait=False)
@mock.patch('uuid.uuid4', mock.MagicMock(return_value='uuid'))
def test_prepare_image_with_configdrive(self):
self.client._command = mock.Mock()
configdrive_url = 'http://swift/configdrive'
self.node.instance_info['configdrive'] = configdrive_url
image_info = {'image_id': 'image'}
params = {
'image_info': image_info,
'configdrive': configdrive_url,
}
self.client.prepare_image(self.node,
image_info,
wait=False)
self.client._command.assert_called_once_with(node=self.node,
method='standby.prepare_image',
params=params,
wait=False)
|
MyNameIsMeerkat/skyline
|
src/webapp/webapp.py
|
Python
|
mit
| 2,673
| 0.001871
|
import redis
import logging
import simplejson as json
import sys
from msgpack import Unpacker
from flask import Flask, request, render_template
from daemon import runner
from os.path import dirname, abspath
# add the shared settings file to namespace
sys.path.insert(0, dirname(dirname(abspath(__file__))))
import settings
REDIS_CONN = redis.StrictRedis(unix_socket_path=settings.REDIS_SOCKET_
|
PATH)
app = Flask(__name__)
app.config['PROPAGATE_EXCEPTIONS'] = True
@app.route("/")
def index():
return render_template('index.html
|
'), 200
@app.route("/app_settings")
def app_settings():
app_settings = {'GRAPHITE_HOST': settings.GRAPHITE_HOST,
'OCULUS_HOST': settings.OCULUS_HOST,
'FULL_NAMESPACE': settings.FULL_NAMESPACE,
}
resp = json.dumps(app_settings)
return resp, 200
@app.route("/api", methods=['GET'])
def data():
metric = request.args.get('metric', None)
try:
raw_series = REDIS_CONN.get(metric)
if not raw_series:
resp = json.dumps({'results': 'Error: No metric by that name'})
return resp, 404
else:
unpacker = Unpacker(use_list = False)
unpacker.feed(raw_series)
timeseries = [item[:2] for item in unpacker]
resp = json.dumps({'results': timeseries})
return resp, 200
except Exception as e:
error = "Error: " + e
resp = json.dumps({'results': error})
return resp, 500
class App():
def __init__(self):
self.stdin_path = '/dev/null'
self.stdout_path = settings.LOG_PATH + '/webapp.log'
self.stderr_path = settings.LOG_PATH + '/webapp.log'
self.pidfile_path = settings.PID_PATH + '/webapp.pid'
self.pidfile_timeout = 5
def run(self):
logger.info('starting webapp')
logger.info('hosted at %s' % settings.WEBAPP_IP)
logger.info('running on port %d' % settings.WEBAPP_PORT)
app.run(settings.WEBAPP_IP, settings.WEBAPP_PORT)
if __name__ == "__main__":
"""
Start the server
"""
webapp = App()
logger = logging.getLogger("AppLog")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s :: %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
handler = logging.FileHandler(settings.LOG_PATH + '/webapp.log')
handler.setFormatter(formatter)
logger.addHandler(handler)
if len(sys.argv) > 1 and sys.argv[1] == 'run':
webapp.run()
else:
daemon_runner = runner.DaemonRunner(webapp)
daemon_runner.daemon_context.files_preserve = [handler.stream]
daemon_runner.do_action()
|
burzillibus/RobHome
|
venv/lib/python2.7/site-packages/gevent/fileobject.py
|
Python
|
mit
| 7,896
| 0.003293
|
"""
Wrappers to make file-like objects cooperative.
.. class:: FileObject
The main entry point to the file-like gevent-compatible behaviour. It will be defined
to be the best available implementation.
There are two main implementations of ``FileObject``. On all systems,
there is :class:`FileObjectThread` which uses the built-in native
threadpool to avoid blocking the entire interpreter. On UNIX systems
(those that support the :mod:`fcntl` module), there is also
:class:`FileObjectPosix` which uses native non-blocking semantics.
A third class, :class:`FileObjectBlock`, is simply a wrapper that executes everything
synchronously (and so is not gevent-compatible). It is provided for testing and debugging
purposes.
Configuration
=============
You may change the default value for ``FileObject`` using the
``GEVENT_FILE`` environment variable. Set it to ``posix``, ``thread``,
or ``block`` to choose from :class:`FileObjectPosix`,
:class:`FileObjectThread` and :class:`FileObjectBlock`, respectively.
You may also set it to the fully qualified class name of another
object that implements the file interface to use one of your own
objects.
.. note:: The environment variable must be set at the time this module
is first imported.
Classes
=======
"""
from __future__ import absolute_import
import functools
import sys
import os
from gevent._fileobjectcommon import FileObjectClosed
from gevent._fileobjectcommon import FileObjectBase
from gevent.hub import get_hub
from gevent._compat import integer_types
from gevent._compat import reraise
from gevent.lock import Semaphore, DummySemaphore
PYPY = hasattr(sys, 'pypy_version_info')
if hasattr(sys, 'exc_clear'):
def _exc_clear():
sys.exc_clear()
else:
def _exc_clear():
return
__all__ = [
'FileObjectPosix',
'FileObjectThread',
'FileObject',
]
try:
from fcntl import fcntl
except ImportError:
__all__.remove("FileObjectPosix")
else:
del fcntl
from gevent._fileobjectposix import FileObjectPosix
class FileObjectThread(FileObjectBase):
"""
A file-like object wrapping another file-like object, performing all blocking
operations on that object in a background thread.
.. caution::
Attempting to change the threadpool or lock of an existing FileObjectThread
has undefined consequences.
.. versionchanged:: 1.1b1
The file object is closed using the threadpool. Note that whether or
not this action is synchronous or asynchronous is not documented.
"""
def __init__(self, fobj, mode=None, bufsize=-1, close=True, threadpool=None, lock=True):
"""
:param fobj: The underlying file-like object to wrap, or an integer fileno
that will be pass to :func:`os.fdopen` along with *mode* and *bufsize*.
:keyword bool lock: If True (the default) then all operations will
be performed one-by-one. Note that this does not guarantee that, if using
this file object from multiple threads/greenlets, operations will be performed
in any particular order, only that no two operations will be attempted at the
same time. You can also pass your own :class:`gevent.lock.Semaphore` to synchronize
file operations with an external resource.
:keyword bool close: If True (the default) then when this object is closed,
the underlying object is closed as well.
"""
closefd = close
self.threadpool = threadpool or get_hub().threadpool
self.lock = lock
if self.lock is True:
self.lock = Semaphore()
elif not self.lock:
self.lock = DummySemaphore()
if not hasattr(self.lock, '__enter__'):
raise TypeError('Expected a Semaphore or boolean, got %r' % type(self.lock))
if isinstance(fobj, integer_types):
if not closefd:
# we cannot do this, since fdopen object will close the descriptor
raise TypeError('FileObjectThread does not support close=False on an fd.')
if
|
mode is None:
assert bufsize == -1, "If you use the default mode, you can't choose a bufsize"
fobj = os.fdopen(fobj)
else:
fobj = os.fdopen(fobj, mode, bufsize)
self.__io_holder = [fobj] # signal for _wrap_method
super(FileObjectThread, self).__init__(fobj, closefd)
|
def _do_close(self, fobj, closefd):
self.__io_holder[0] = None # for _wrap_method
try:
with self.lock:
self.threadpool.apply(fobj.flush)
finally:
if closefd:
# Note that we're not taking the lock; older code
# did fobj.close() without going through the threadpool at all,
# so acquiring the lock could potentially introduce deadlocks
# that weren't present before. Avoiding the lock doesn't make
# the existing race condition any worse.
# We wrap the close in an exception handler and re-raise directly
# to avoid the (common, expected) IOError from being logged by the pool
def close():
try:
fobj.close()
except: # pylint:disable=bare-except
return sys.exc_info()
exc_info = self.threadpool.apply(close)
if exc_info:
reraise(*exc_info)
def _do_delegate_methods(self):
super(FileObjectThread, self)._do_delegate_methods()
if not hasattr(self, 'read1') and 'r' in getattr(self._io, 'mode', ''):
self.read1 = self.read
self.__io_holder[0] = self._io
def _extra_repr(self):
return ' threadpool=%r' % (self.threadpool,)
def __iter__(self):
return self
def next(self):
line = self.readline()
if line:
return line
raise StopIteration
__next__ = next
def _wrap_method(self, method):
# NOTE: We are careful to avoid introducing a refcycle
# within self. Our wrapper cannot refer to self.
io_holder = self.__io_holder
lock = self.lock
threadpool = self.threadpool
@functools.wraps(method)
def thread_method(*args, **kwargs):
if io_holder[0] is None:
# This is different than FileObjectPosix, etc,
# because we want to save the expensive trip through
# the threadpool.
raise FileObjectClosed()
with lock:
return threadpool.apply(method, args, kwargs)
return thread_method
try:
FileObject = FileObjectPosix
except NameError:
FileObject = FileObjectThread
class FileObjectBlock(FileObjectBase):
def __init__(self, fobj, *args, **kwargs):
closefd = kwargs.pop('close', True)
if kwargs:
raise TypeError('Unexpected arguments: %r' % kwargs.keys())
if isinstance(fobj, integer_types):
if not closefd:
# we cannot do this, since fdopen object will close the descriptor
raise TypeError('FileObjectBlock does not support close=False on an fd.')
fobj = os.fdopen(fobj, *args)
super(FileObjectBlock, self).__init__(fobj, closefd)
def _do_close(self, fobj, closefd):
fobj.close()
config = os.environ.get('GEVENT_FILE')
if config:
klass = {'thread': 'gevent.fileobject.FileObjectThread',
'posix': 'gevent.fileobject.FileObjectPosix',
'block': 'gevent.fileobject.FileObjectBlock'}.get(config, config)
if klass.startswith('gevent.fileobject.'):
FileObject = globals()[klass.split('.', 2)[-1]]
else:
from gevent.hub import _import
FileObject = _import(klass)
del klass
|
glizek/openacademy-project
|
openacademy/model/openacademy_course.py
|
Python
|
apache-2.0
| 1,587
| 0
|
'''
This module is to create model of Course
'''
from openerp import api, fields, models, _
class Course(models.Model):
'''
This class create model of Course
'''
_name = 'openacademy.course' # Model odoo name
name = fields.Char(string='Title', required=True) # Field reserved
description = fields.Text(string='Description')
responsible_id = fields.Many2one('res.users',
ondelete='set null',
string="Responsible", index=True)
session_ids = fields.One2many('openacademy.session', 'course_id',
string="
|
Sessions")
|
_sql_constraints = [
('name_description_check',
'CHECK(name != description)',
_("The title of the course should not be the description")),
('name_unique',
'UNIQUE(name)',
_("The course title must be unique")),
]
@api.one # api.one send defaults params: cr, uid, id, context
def copy(self, default=None):
# print "estoy pasando por la funcion heredada de copy en cursos"
if default is None:
default = {}
# default['name'] = self.name + ' (copy)'
copied_count = self.search_count(
[('name', '=like', _(u"Copy of {}%").format(self.name))])
if not copied_count:
new_name = _(u"Copy of {}").format(self.name)
else:
new_name = _(u"Copy of {} ({})").format(self.name, copied_count)
default['name'] = new_name
return super(Course, self).copy(default)
|
lem9/weblate
|
weblate/trans/data.py
|
Python
|
gpl-3.0
| 1,577
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2017 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# M
|
ERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""Data files helpers."""
import os
from django.conf import settings
def create_and_check_di
|
r(path):
"""Ensure directory exists and is writable by us"""
if not os.path.exists(path):
os.makedirs(path)
else:
if not os.access(path, os.W_OK):
raise OSError(
'DATA_DIR {0} is not writable!'.format(path)
)
def check_data_writable():
"""Check we can write to data dir."""
create_and_check_dir(settings.DATA_DIR)
create_and_check_dir(data_dir('home'))
create_and_check_dir(data_dir('whoosh'))
create_and_check_dir(data_dir('ssh'))
create_and_check_dir(data_dir('vcs'))
def data_dir(component):
"""Return path to data dir for given component."""
return os.path.join(settings.DATA_DIR, component)
|
pescobar/easybuild-easyblocks
|
easybuild/easyblocks/generic/conda.py
|
Python
|
gpl-2.0
| 4,661
| 0.003218
|
##
# Copyright 2009-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for installing software using 'conda', implemented as an easyblock.
@author: Jillian Rowe (New York University Abu Dhabi)
@author: Kenneth Hoste (HPC-UGent)
"""
import os
from easybuild.easyblocks.generic.binary import Binary
from easybuild.framework.easyconfig import CUSTOM
import easybuild.tools.environment as env
from easybuild.tools.run import run_cmd
class Conda(Binary):
"""Support for installing software using 'conda'."""
@staticmethod
de
|
f extra_options(extra_vars=None):
"""Extra easyconfig parameters specific to Conda easyblock."""
extra_vars = Binary.extra_options(extra_vars)
extra_vars.update({
'channels': [None, "List of conda channels to pass to 'conda
|
install'", CUSTOM],
'environment_file': [None, "Conda environment.yml file to use with 'conda env create'", CUSTOM],
'remote_environment': [None, "Remote conda environment to use with 'conda env create'", CUSTOM],
'requirements': [None, "Requirements specification to pass to 'conda install'", CUSTOM],
})
return extra_vars
def extract_step(self):
"""Copy sources via extract_step of parent, if any are specified."""
if self.src:
super(Conda, self).extract_step()
def install_step(self):
"""Install software using 'conda env create' or 'conda create' & 'conda install'."""
# initialize conda environment
# setuptools is just a choice, but *something* needs to be there
cmd = "conda config --add create_default_packages setuptools"
run_cmd(cmd, log_all=True, simple=True)
if self.cfg['environment_file'] or self.cfg['remote_environment']:
if self.cfg['environment_file']:
env_spec = '-f ' + self.cfg['environment_file']
else:
env_spec = self.cfg['remote_environment']
# use --force to ignore existing installation directory
cmd = "%s conda env create --force %s -p %s" % (self.cfg['preinstallopts'], env_spec, self.installdir)
run_cmd(cmd, log_all=True, simple=True)
else:
if self.cfg['requirements']:
install_args = "-y %s " % self.cfg['requirements']
if self.cfg['channels']:
install_args += ' '.join('-c ' + chan for chan in self.cfg['channels'])
self.log.info("Installed conda requirements")
cmd = "%s conda create --force -y -p %s %s" % (self.cfg['preinstallopts'], self.installdir, install_args)
run_cmd(cmd, log_all=True, simple=True)
def make_module_extra(self):
"""Add the install directory to the PATH."""
txt = super(Conda, self).make_module_extra()
txt += self.module_generator.set_environment('CONDA_ENV', self.installdir)
txt += self.module_generator.set_environment('CONDA_PREFIX', self.installdir)
txt += self.module_generator.set_environment('CONDA_DEFAULT_ENV', self.installdir)
self.log.debug("make_module_extra added this: %s", txt)
return txt
def make_module_req_guess(self):
"""
A dictionary of possible directories to look for.
"""
# LD_LIBRARY_PATH issue discusses here
# http://superuser.com/questions/980250/environment-module-cannot-initialize-tcl
return {
'PATH': ['bin', 'sbin'],
'MANPATH': ['man', os.path.join('share', 'man')],
'PKG_CONFIG_PATH': [os.path.join(x, 'pkgconfig') for x in ['lib', 'lib32', 'lib64', 'share']],
}
|
openmotics/gateway
|
testing/unittests/api_tests/serializers/sensor_test.py
|
Python
|
agpl-3.0
| 4,097
| 0.000488
|
# Copyright (C) 2020 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import unittest
from gateway.dto import SensorDTO, SensorSourceDTO
from gateway.api.serializers import SensorSerializer
class SensorSerializerTest(unittest.TestCase):
def test_serialize(self):
# Valid room
data = SensorSerializer.serialize(SensorDTO(id=1, name='foo', room=5),
fields=['id', 'name', 'room'])
self.assertEqual({'id': 1,
'name': 'foo',
'room': 5}, data)
# Empty room
data = SensorSerializer.serialize(SensorDTO(id=1, name='foo'),
fields=['id', 'name', 'room'])
self.assertEqual({'id': 1,
'name': 'foo',
'room': 255}, data)
# No room
data = SensorSerializer.serialize(SensorDTO(id=1, name='foo', room=5),
fields=['id', 'name'])
self.assertEqual({'id': 1,
'name': 'foo'}, data)
def test_deserialize(self):
# Valid room
dto = SensorSerializer.deserialize({'id': 5,
'external_id': '0',
'source': {'type': 'master'},
'physical_quantity': 'temperature',
'unit': 'celcius',
'name': 'bar',
'room': 10})
expected_dto = SensorDTO(id=5,
external_id='0',
source=SensorSourceDTO('master', name=None),
physical_quantity='temperature',
unit='celcius',
name='bar',
|
room=10)
assert expected_dto == dto
self.assertEqual(expected_dto, dto)
self.assertEqual(['external_id', 'id', 'name', 'physical_quantity', 'room', 'source', 'unit'], sorted(dto.loaded_fields))
# Empty room
dto = SensorSerializer.deserialize({'id': 5,
'name': 'bar',
'room': 255})
self.assertEqual(SensorDTO(id=5,
|
name='bar'), dto)
self.assertEqual(['id', 'name', 'room'], sorted(dto.loaded_fields))
# No room
dto = SensorSerializer.deserialize({'id': 5,
'name': 'bar'})
self.assertEqual(SensorDTO(id=5, name='bar'), dto)
self.assertEqual(['id', 'name'], sorted(dto.loaded_fields))
# Invalid physical_quantity
with self.assertRaises(ValueError):
_ = SensorSerializer.deserialize({'id': 5,
'physical_quantity': 'something',
'unit': 'celcius',
'name': 'bar'})
# Invalid unit
with self.assertRaises(ValueError):
_ = SensorSerializer.deserialize({'id': 5,
'physical_quantity': 'temperature',
'unit': 'unicorns',
'name': 'bar'})
|
ajrichards/htsint
|
htsint/__init__.py
|
Python
|
bsd-3-clause
| 693
| 0.012987
|
import sys,os
## basic files
#sys.path.append(os.path.realpath(__file__))
#print(os.path.realpath(__file__))
#print(os.path.pardir
|
(os.path.realpath(__file__)))
#print(os.path.split(os.path.abspath(__file__))[:-1])
from .version import __version__
from .basedir import __basedir__
## database functions and classes
from .Configure import Configure
from .RunSubprocess import run_subprocess, RunSubprocess
from .GeneOntology import GeneOntology
from .TermDistances import TermDistances
from .GeneDistances imp
|
ort GeneDistances
from .AssembleDistances import AssembleDistances
from .TaxaSummary import TaxaSummary
from .GeneSetCollection import GeneSetCollection
from .GeneSet import GeneSet
|
bjolivot/ansible
|
lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py
|
Python
|
gpl-3.0
| 60,368
| 0.004092
|
#!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: azure_rm_virtualmachine
version_added: "2.1"
short_description: Manage Azure virtual machines.
description:
- Create, update, stop and start a virtual machine. Provide an existing storage account and network interface or
allow the module to create these for you. If you choose not to provide a network interface, the resource group
must contain a virtual network with at least one subnet.
- Currently requires an image found in the Azure Marketplace. Use azure_rm_virtualmachineimage_facts module
to discover the publisher, offer, sku and version of a particular image.
options:
resource_group:
description:
- Name of the resource group containing the virtual machine.
required: true
name:
description:
- Name of the virtual machine.
required: true
state:
description:
- Assert the state of the virtual machine.
- State 'present' will check that the machine exists with the requested configuration. If the configuration
of the existing machine does not match, the machine will be updated. Use options started, allocated and restarted to change the machine's power
state.
- State 'absent' will remove the virtual machine.
default: present
required: false
choices:
- absent
- present
started:
description:
- Use with state 'present' to start the machine. Set to false to have the machine be 'stopped'.
default: true
required: false
allocated:
description:
- Toggle that controls if the machine is allocated/deallocated, only useful with state='present'.
default: True
required: false
restarted:
description:
- Use with state 'present' to restart a running VM.
default: false
required: false
location:
description:
- Valid Azure location. Defaults to location of the resource group.
default: null
required: false
short_hostname:
description:
- Name assigned internally to the host. On a linux VM this is the name returned by the `hostname` command.
When creating a virtual machine, short_hostname defaults to name.
default: null
required: false
vm_size:
description:
- A valid Azure VM size value. For example, 'Standard_D4'. The list of choices varies depending on the
subscription and location. Check your subscription for available choices.
default: Standard_D1
required: false
admin_username:
description:
- Admin username used to access the host after it is created. Required when creating a VM.
default: null
required: false
admin_password:
description:
- Password for the admin username. Not required if the os_type is Linux and SSH password authentication
is disabled by setting ssh_password_enable
|
d to false.
default: null
required: false
ssh_password_enabled:
description:
- When th
|
e os_type is Linux, setting ssh_password_enabled to false will disable SSH password authentication
and require use of SSH keys.
default: true
required: false
ssh_public_keys:
description:
- "For os_type Linux provide a list of SSH keys. Each item in the list should be a dictionary where the
dictionary contains two keys: path and key_data. Set the path to the default location of the
authorized_keys files. On an Enterprise Linux host, for example, the path will be
/home/<admin username>/.ssh/authorized_keys. Set key_data to the actual value of the public key."
default: null
required: false
image:
description:
- "A dictionary describing the Marketplace image used to build the VM. Will contain keys: publisher,
offer, sku and version. NOTE: set image.version to 'latest' to get the most recent version of a given
image."
required: true
storage_account_name:
description:
- Name of an existing storage account that supports creation of VHD blobs. If not specified for a new VM,
a new storage account named <vm name>01 will be created using storage type 'Standard_LRS'.
default: null
required: false
storage_container_name:
description:
- Name of the container to use within the storage account to store VHD blobs. If no name is specified a
default container will created.
default: vhds
required: false
storage_blob_name:
description:
- Name fo the storage blob used to hold the VM's OS disk image. If no name is provided, defaults to
the VM name + '.vhd'. If you provide a name, it must end with '.vhd'
aliases:
- storage_blob
default: null
required: false
os_disk_caching:
description:
- Type of OS disk caching.
choices:
- ReadOnly
- ReadWrite
default: ReadOnly
aliases:
- disk_caching
required: false
os_type:
description:
- Base type of operating system.
choices:
- Windows
- Linux
default:
- Linux
required: false
public_ip_allocation_method:
description:
- If a public IP address is created when creating the VM (because a Network Interface was not provided),
determines if the public IP address remains permanently associated with the Network Interface. If set
to 'Dynamic' the public IP address may change any time the VM is rebooted or power cycled.
choices:
- Dynamic
- Static
default:
- Static
aliases:
- public_ip_allocation
required: false
open_ports:
description:
- If a network interface is created when creating the VM, a security group will be created as well. For
Linux hosts a rule will be added to the security group allowing inbound TCP connections to the default
SSH port 22, and for Windows hosts ports 3389 and 5986 will be opened. Override the default open ports by
providing a list of ports.
default: null
required: false
network_interface_names:
description:
- List of existing network interface names to add to the VM. If a network interface name is not provided
when the VM is created, a default network interface will be created. In order for the module to create
a network interface, at least one Virtual Network with one Subnet must exist.
default: null
required: false
virtual_network_name:
description:
- When creating a virtual machine, if a network interface name is not provided, one will be created.
The new network interface will be
|
BlueDragonX/whatcouch
|
whatcouch/test/testplugins/testauthenticatorplugin.py
|
Python
|
bsd-3-clause
| 1,977
| 0.002023
|
# Copyright (c) 2010, Ryan Bourgeois <bluedragonx@gmail.com>
# All rights reserved.
#
# This software is licensed under a modified BSD license as defined
|
in the
# provided license file at the root of this project. You may modify and/or
# distribute in accordance with those terms.
#
# This software is provided "as is" and any express or implied warranties,
# including, but not limited to, the implied warranties of merchantability and
# fitness for a particular purpose are disclaimed.
from whatcouch.test import Config
from whatcouch.model import User
from whatcouch.plugins import AuthenticatorPlugin
class TestAuthenticatorPlugin:
|
@staticmethod
def setup_class():
Config.username = 'admin'
Config.password = 'password'
Config.user = User.create(Config.username, Config.password)
Config.user.save()
Config.plugin = AuthenticatorPlugin(Config.t11)
@staticmethod
def teardown_class():
Config.user.delete()
del Config.username
del Config.password
del Config.user
del Config.plugin
def test_authenticate__success(self):
identity = {'login': Config.username, 'password': Config.password}
username = Config.plugin.authenticate(Config.environ, identity)
assert username == Config.username
def test_authenticate__baduser(self):
identity = {'login': 'nobody', 'password': Config.password}
username = Config.plugin.authenticate(Config.environ, identity)
assert username is None
def test_authenticate__baduser(self):
identity = {'login': Config.username, 'password': 'nopass'}
username = Config.plugin.authenticate(Config.environ, identity)
assert username is None
def test_authenticate__badeverything(self):
identity = {'login': 'nobody', 'password': 'nopass'}
username = Config.plugin.authenticate(Config.environ, identity)
assert username is None
|
iulian787/spack
|
var/spack/repos/builtin/packages/sfcgal/package.py
|
Python
|
lgpl-2.1
| 1,183
| 0.001691
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Sfcgal(CMakePackage):
"""
SFCGAL is a C++ wrapper library
|
around CGAL with the aim of supporting
ISO 19107:2013 and OGC Simple Features Access 1.2 for 3D operations. SFCGAL
provides standard compliant geometry types and operations, that can be
accessed from its C or C++ APIs.
"""
homepage = "http://www.sfcgal.org/"
url = "https://githu
|
b.com/Oslandia/SFCGAL/archive/v1.3.7.tar.gz"
version('1.3.7', sha256='30ea1af26cb2f572c628aae08dd1953d80a69d15e1cac225390904d91fce031b')
# Ref: http://oslandia.github.io/SFCGAL/installation.html
depends_on('cgal@4.3: +core')
depends_on('boost@1.54.0:')
depends_on('mpfr@2.2.1:')
depends_on('gmp@4.2:')
def cmake_args(self):
# It seems viewer is discontinued as of v1.3.0
# https://github.com/Oslandia/SFCGAL/releases/tag/v1.3.0
# Also, see https://github.com/Oslandia/SFCGAL-viewer
return ['-DSFCGAL_BUILD_VIEWER=OFF']
|
marcionreis/djangoecommerce
|
core/tests/test_views.py
|
Python
|
cc0-1.0
| 1,619
| 0.00248
|
# coding=utf-8
from django.test import TestCase, C
|
lient
from django.core.urlresolvers import reverse
from django.core import mail
class IndexViewTestCase(TestCase):
def setUp(self):
self.client = Client()
self.url = reverse('index')
def tearDown(self):
pass
def test_status_code(self):
response = self.client.get(self.url)
self.assertEquals(response.status_code, 200)
def test_template_used(self):
response = self.client.g
|
et(self.url)
self.assertTemplateUsed(response, 'index.html')
class ContactViewTestCase(TestCase):
def setUp(self):
self.client = Client()
self.url = reverse('contact')
def test_view_ok(self):
response = self.client.get(self.url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'contact.html')
def test_form_error(self):
data = {'name': '', 'message': '', 'email': ''}
response = self.client.post(self.url, data)
self.assertFormError(response, 'form', 'name', 'Este campo é obrigatório.')
self.assertFormError(response, 'form', 'email', 'Este campo é obrigatório.')
self.assertFormError(response, 'form', 'message', 'Este campo é obrigatório.')
def test_form_ok(self):
data = {'name': 'test', 'message': 'test', 'email': 'test@test.com'}
response = self.client.post(self.url, data)
self.assertTrue(response.context['success'])
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].subject, 'Contato do Django E-Commerce')
|
karservice/kobra
|
kobra/tests/test_sesam.py
|
Python
|
mit
| 4,475
| 0.000447
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from unittest import mock
from uuid import UUID
from django.test import TestCase
from sesam import SesamStudent, StudentNotFound
from .. import factories
from ..models import Student
def sesam_response_factory(student):
# Factory function useful together with StudentFactory.
return SesamStudent(
liu_id=student.liu_id,
name=student.name,
union=student.union.name if student.union else None,
section_code=student.section.code if student.section else None,
nor_edu_person_lin=student.id,
liu_lin=student.liu_lin,
email=student.email
)
class SesamTests(TestCase):
def test_get_no_local_no_sesam(self):
# Without existing local entry and without Sesam match.
with mock.patch('sesam.SesamStudentServiceClient.get_student',
side_effect=StudentNotFound):
with self.assertRaises(Student.DoesNotExist):
Student.objects.get(liu_id='oller120', use_sesam=True)
def test_get_no_local(self):
# Without existing local entry.
mock_sesam_response = sesam_response_factory(
factories.StudentFactory.build())
with mock.patch('sesam.SesamStudentServiceClient.get_student',
return_value=mock_sesam_response):
student = Student.objects.get(liu_id=mock_sesam_response.liu_id,
use_sesam=True)
student.refresh_from_db() # Make sure the changes are persisted
self.assertEqual(student.id, mock_sesam_response.nor_edu_person_lin)
def test_get_with_local(self):
# With local entry.
original_student = factories.StudentFactory(union=None)
new_union = factories.UnionFactory()
# Mock response that looks like the student but now with a union
# membership
mock_sesam_response = sesam_response_factory(original_student)._replace(
union=new_union.name)
with mock.patch('sesam.SesamStudentServiceClient.get_student',
return_value=mock_sesam_response):
with mock.patch('kobra.models.Student.is_outdated',
new_callable=mock.PropertyMock, return_value=False):
unchanged_student = Student.objects.get(
id=mock_sesam_response.nor_edu_person_lin, use_sesam=True)
self.assertEqual(unchanged_student.union, None)
unchanged_student.refresh_from_db()
self.assertEqual(unchanged_student.union, None)
with mock.patch('sesam.SesamStudentServiceClient.get_student',
return_value=mock_sesam_response):
with mock.patch('kobra.models.Student.is_outdated',
new_callable=mock.PropertyMock, return_value=True):
changed_student = Student.objects.get(
id=mock_sesam_response.nor_edu_person_lin, use_sesam=True)
self.assertEqual(changed_student.union, new_union)
changed_student.refresh_from_db()
|
self.assertEqual(changed_student.union, new_union)
def test_get_with_local_no_sesam(self):
# With local entry.
student = factories.StudentFactory()
with mock.patch('sesam.SesamStudentServiceClient.get_student',
side_effect=StudentNotFound):
with mock.patch('kobra.models.Student.is_outdated',
new_callable=mock.PropertyMock,
return_value=True):
fetched_student
|
= Student.objects.get(pk=student.pk,
use_sesam=True)
self.assertEqual(student.pk, fetched_student.pk)
self.assertEqual(student.last_updated, fetched_student.last_updated)
def test_get_updates_mifare_id(self):
# With existing local entry.
student = factories.StudentFactory(mifare_id=None)
mock_sesam_response = sesam_response_factory(student)
mifare_id = 12345678
with mock.patch('sesam.SesamStudentServiceClient.get_student',
return_value=mock_sesam_response):
student = Student.objects.get(mifare_id=mifare_id, use_sesam=True)
self.assertEqual(student.mifare_id, mifare_id)
student.refresh_from_db() # Make sure the changes are persisted
self.assertEqual(student.mifare_id, mifare_id)
|
jessrosenfield/pants
|
src/python/pants/backend/python/tasks/checkstyle/common.py
|
Python
|
apache-2.0
| 9,330
| 0.010289
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import ast
import codecs
import itertools
import re
import textwrap
import tokenize
from abc import abstractmethod
from collections import Sequence
from twitter.common.lang import Compatibility, Interface
__all__ = (
'CheckstylePlugin',
'PythonFile',
)
class OffByOneList(Sequence):
def __init__(self, iterator):
# Make sure we properly handle unicode chars in code files.
self._list = list(iterator)
def __iter__(self):
return iter(self._list)
def __reversed__(self):
return reversed(self._list)
def __len__(self):
return len(self._list)
def __getitem__(self, element_id):
if isinstance(element_id, Compatibility.integer):
return self.__get_list_item(element_id)
elif isinstance(element_id, slice):
return self.__getslice(element_id)
raise TypeError('__getitem__ only supports integers and slices')
def __getslice(self, sl):
if sl.start == 0 or sl.stop == 0:
raise IndexError
new_slice = slice(sl.start - 1 if sl.start > 0 else sl.start,
sl.stop - 1 if sl.stop > 0 else sl.stop)
return self._list[new_slice]
def __get_list_item(self, item):
if item == 0:
raise IndexError
if item < 0:
return self._list[item]
return self._list[item - 1]
def index(self, value):
return self._list.index(value) + 1
class PythonFile(object):
"""Checkstyle wrapper for Python source files."""
SKIP_TOKENS = frozenset((tokenize.COMMENT, tokenize.NL, tokenize.DEDENT))
def _remove_coding_header(self, blob):
"""
There is a bug in ast.parse that cause it to throw a syntax error if
you have a header similar to...
# coding=utf-8,
we replace this line with something else to bypass the bug.
:param blob: file text contents
:return: adjusted blob
"""
# Remove the # coding=utf-8 to avoid AST erroneous parse errors
# https://bugs.python.org/issue22221
lines = blob.split('\n')
if lines and 'coding=utf-8' in lines[0]:
lines[0] = '#remove coding'
return '\n'.join(lines).encode('ascii', errors='replace')
def __init__(self, blob, filename='<expr>'):
self._blob = self._remove_coding_header(blob)
self.tree = ast.parse(self._blob, filename)
self.lines = OffByOneList(self._blob.split('\n'))
self.filename = filename
self.logical_lines = dict((start, (start, stop, indent))
for start, sto
|
p, indent in self.iter_logical_lines(self._blob))
def __iter__(self):
return iter(self.lines)
def __getitem__(self, line_number):
return self.lines[self.line_range(line_number)]
def __str__(self):
return 'PythonFile({filename})'.format(filename=self.filename)
@classmethod
def parse(cls, filename):
with codecs.open(filename, encoding='utf-8') as fp:
blob = fp.read()
return cls(blob, filename)
|
@classmethod
def from_statement(cls, statement):
"""A helper to construct a PythonFile from a triple-quoted string, for testing.
:param statement: Python file contents
:return: Instance of PythonFile
"""
return cls('\n'.join(textwrap.dedent(statement).split('\n')[1:]))
@classmethod
def iter_tokens(cls, blob):
""" Iterate over tokens found in blob contents
:param blob: Input string with python file contents
:return: token iterator
"""
return tokenize.generate_tokens(Compatibility.StringIO(blob).readline)
@property
def tokens(self):
"""An iterator over tokens for this Python file from the tokenize module."""
return self.iter_tokens(self._blob)
@staticmethod
def translate_logical_line(start, end, contents, indent_stack, endmarker=False):
"""Translate raw contents to logical lines"""
# Remove leading blank lines.
while contents[0] == '\n':
start += 1
contents.pop(0)
# Remove trailing blank lines.
while contents[-1] == '\n':
end -= 1
contents.pop()
indent = len(indent_stack[-1]) if indent_stack else 0
if endmarker:
indent = len(contents[0])
return start, end + 1, indent
def iter_logical_lines(self, blob):
"""Returns an iterator of (start_line, stop_line, indent) for logical lines """
indent_stack = []
contents = []
line_number_start = None
for token in self.iter_tokens(blob):
token_type, token_text, token_start = token[0:3]
if token_type == tokenize.INDENT:
indent_stack.append(token_text)
if token_type == tokenize.DEDENT:
indent_stack.pop()
if token_type in self.SKIP_TOKENS:
continue
contents.append(token_text)
if line_number_start is None:
line_number_start = token_start[0]
elif token_type in (tokenize.NEWLINE, tokenize.ENDMARKER):
yield self.translate_logical_line(
line_number_start,
token_start[0] + (1 if token_type is tokenize.NEWLINE else -1),
list(filter(None, contents)),
indent_stack,
endmarker=token_type == tokenize.ENDMARKER)
contents = []
line_number_start = None
def line_range(self, line_number):
"""Return a slice for the given line number"""
if line_number <= 0 or line_number > len(self.lines):
raise IndexError('NOTE: Python file line numbers are offset by 1.')
if line_number not in self.logical_lines:
return slice(line_number, line_number + 1)
else:
start, stop, _ = self.logical_lines[line_number]
return slice(start, stop)
def enumerate(self):
"""Return an enumeration of line_number, line pairs."""
return enumerate(self, 1)
class Nit(object):
"""Encapsulate a Style faux pas.
The general taxonomy of nits:
Prefix
F => Flake8 errors
E => PEP8 error
W => PEP8 warning
T => Twitter error
Prefix:
0 Naming
1 Indentation
2 Whitespace
3 Blank line
4 Import
5 Line length
6 Deprecation
7 Statement
8 Flake / Logic
9 Runtime
"""
COMMENT = 0
WARNING = 1
ERROR = 2
SEVERITY = {
COMMENT: 'COMMENT',
WARNING: 'WARNING',
ERROR: 'ERROR'
}
@staticmethod
def flatten_lines(*line_or_line_list):
return itertools.chain(*line_or_line_list)
def __init__(self, code, severity, python_file, message, line_number=None):
if severity not in self.SEVERITY:
raise ValueError('Severity should be one of {}'.format(' '.join(self.SEVERITY.values())))
self.python_file = python_file
if not re.match(r'[A-Z]\d{3}', code):
raise ValueError('Code must contain a prefix letter followed by a 3 digit number')
self.code = code
self.severity = severity
self._message = message
self._line_number = line_number
def __str__(self):
"""convert ascii for safe terminal output"""
flat = list(self.flatten_lines([self.message], self.lines))
return '\n |'.join(flat).encode('ascii', errors='replace')
@property
def line_number(self):
if self._line_number:
line_range = self.python_file.line_range(self._line_number)
if line_range.stop - line_range.start > 1:
return '%03d-%03d' % (line_range.start, line_range.stop - 1)
else:
return '%03d' % line_range.start
@property
def message(self):
return '{code}:{severity:<7} {filename}:{linenum} {message}'.format(
code=self.code,
severity=self.SEVERITY[self.severity],
filename=self.python_file.filename,
linenum=self.line_number or '*',
message=self._message)
@property
def lines(self):
return self.python_file[self._line_number] if self._line_number else []
class CheckstylePlugin(Interface):
"""Interface for checkstyle plugins."""
def __init__(self, python_file):
if not isinstance(python_file, PythonFile):
raise TypeError('CheckstylePlugin takes PythonFile objects.')
self.python_file = python_file
def iter_ast_types(self, ast_t
|
pcolmant/repanier
|
repanier/views/producer_invoice_class.py
|
Python
|
gpl-3.0
| 4,583
| 0.000873
|
import django
from django.http import Http404
from django.views.generic import DetailView
from repanier.const import DECIMAL_ZERO
from repanier.models.bankaccount import BankAccount
from repanier.models.invoice import ProducerInvoice
from repanier.models.offeritem import OfferItemReadOnly
from repanier.models.producer import Producer
from repanier.tools import get_repanier_template_name
class ProducerInvoiceView(DetailView):
template_name = get_repanier_template_name("producer_invoice_form.html")
model = ProducerInvoice
uuid = None
def get_object(self, queryset=None):
# Important to handle producer without any invoice
try:
obj = super().get_object(queryset)
except Http404:
obj = None # ProducerInvoice.objects.none()
return obj
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if context["object"] is None:
# This producer has never been invoiced
raise Http404
else:
producer_invoice = self.get_object()
bank_account_set = BankAccount.o
|
bjects.filter(
producer_invoice=producer_invoice
).order_by("operation_date")
|
context["bank_account_set"] = bank_account_set
offer_item_set = (
OfferItemReadOnly.objects.filter(
permanence_id=producer_invoice.permanence_id,
producer_id=producer_invoice.producer_id,
)
.exclude(quantity_invoiced=DECIMAL_ZERO)
.order_by("producer_sort_order_v2")
.distinct()
)
context["offer_item_set"] = offer_item_set
if producer_invoice.invoice_sort_order is not None:
previous_producer_invoice = (
ProducerInvoice.objects.filter(
producer_id=producer_invoice.producer_id,
invoice_sort_order__isnull=False,
invoice_sort_order__lt=producer_invoice.invoice_sort_order,
)
.order_by("-invoice_sort_order")
.only("id")
.first()
)
next_producer_invoice = (
ProducerInvoice.objects.filter(
producer_id=producer_invoice.producer_id,
invoice_sort_order__isnull=False,
invoice_sort_order__gt=producer_invoice.invoice_sort_order,
)
.order_by("invoice_sort_order")
.only("id")
.first()
)
else:
previous_producer_invoice = None
next_producer_invoice = (
ProducerInvoice.objects.filter(
producer_id=producer_invoice.producer_id,
invoice_sort_order__isnull=False,
)
.order_by("invoice_sort_order")
.only("id")
.first()
)
if previous_producer_invoice is not None:
context["previous_producer_invoice_id"] = previous_producer_invoice.id
if next_producer_invoice is not None:
context["next_producer_invoice_id"] = next_producer_invoice.id
context["uuid"] = self.uuid
context["producer"] = producer_invoice.producer
return context
def get_queryset(self):
self.uuid = None
if self.request.user.is_staff:
producer_id = self.request.GET.get("producer", None)
else:
self.uuid = self.kwargs.get("uuid", None)
if self.uuid:
try:
producer = Producer.objects.filter(uuid=self.uuid).first()
producer_id = producer.id
except:
raise Http404
else:
raise Http404
pk = self.kwargs.get("pk", 0)
if pk == 0:
last_producer_invoice = (
ProducerInvoice.objects.filter(
producer_id=producer_id, invoice_sort_order__isnull=False
)
.only("id")
.order_by("-invoice_sort_order")
.first()
)
if last_producer_invoice is not None:
self.kwargs["pk"] = last_producer_invoice.id
return ProducerInvoice.objects.filter(producer_id=producer_id)
|
google/trax
|
trax/tf_numpy/examples/mnist/train_test.py
|
Python
|
apache-2.0
| 1,761
| 0.004543
|
# coding=utf-8
# Copyright 2022 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that the example training script works on fake data."""
import mock
import numpy as np
import tensorflow.compat.v2 as tf
from trax.tf_numpy.examples.mnist import dataset
from trax.tf_numpy.examples.mnist import train
class TFNumpyMnistExampleTest(tf.test.TestCase):
def testRuns(self):
with mock.patch.object(dataset, 'load', new=fake_mnist_data):
train.tr
|
ain(
batch_size=1,
learning_rate=0.1,
num_training_iters=10,
validation_steps=5)
train.t
|
rain(
batch_size=2,
learning_rate=0.1,
num_training_iters=5,
validation_steps=2)
train.train(
batch_size=10,
learning_rate=0.1,
num_training_iters=1,
validation_steps=1)
def fake_mnist_data():
def gen_examples(num_examples):
x = np.array(
np.random.randn(num_examples, 784), copy=False, dtype=np.float32)
y = np.zeros((num_examples, 10), dtype=np.float32)
y[:][0] = 1.
return (x, y)
return (gen_examples(100), gen_examples(10), gen_examples(10))
if __name__ == '__main__':
tf.compat.v1.enable_eager_execution()
tf.test.main()
|
rain87/pc-health
|
smart_attributes.py
|
Python
|
mit
| 3,144
| 0.000636
|
names = {
1: 'Read Error Rate',
2: 'Throughput Performance',
3: 'Spin-Up Time',
4: 'Start/Stop Count',
5: 'Reallocated Sectors Count',
6: 'Read Channel Margin',
7: 'Seek Error Rate',
8: 'Seek Time Performance',
9: 'Power-On Hours',
10: 'Spin Retry Count',
11: 'Recalibration Retries or Calibration Retry Count',
12: 'Power Cycle Count',
13: 'Soft Read Error Rate',
22: 'Current Helium Level',
170: 'Available Reserved Space',
171: 'SSD Program Fail Count',
172: 'SSD Erase Fail Count',
173: 'SSD Wear Leveling Count',
174: 'Unexpected power loss count',
175: 'Power Loss Protection Failure',
176: 'Erase Fail Count',
177: 'Wear Range Delta',
179: 'Used Reserved Block Count Total',
180: 'Unused Reserved Block Count Total',
181: 'Program Fail Count Total or Non-4K Aligned Access Count',
182: 'Erase Fail Count',
183: 'SATA Downshift Error Count or Runtime Bad Block',
184: 'End-to-End error / IOEDC',
185: 'Head Stability',
186: 'Induced Op-Vibration Detection',
187: 'Reported Uncorrectable Errors',
188: 'Command Timeout',
189: 'High Fly Writes',
190: 'Temperature Difference or Airflow Temperature',
191: 'G-sense Error Rate',
192: 'Power-off Retract Count, Emergency Retract Cycle Count (Fujitsu),[45] or Unsafe Shutdown Count',
193: 'Load Cycle Count or Load/Unload Cycle Co
|
unt
|
(Fujitsu)',
194: 'Temperature Celsius',
195: 'Hardware ECC Recovered',
196: 'Reallocation Event Count',
197: 'Current Pending Sector Count',
198: 'Uncorrectable Sector Count',
199: 'UltraDMA CRC Error Count',
200: 'Multi-Zone Error Rate',
201: 'Soft Read Error Rate or TA Counter Detected',
202: 'Data Address Mark errors or TA Counter Increased',
203: 'Run Out Cancel',
204: 'Soft ECC Correction',
205: 'Thermal Asperity Rate',
206: 'Flying Height',
207: 'Spin High Current',
208: 'Spin Buzz',
209: 'Offline Seek Performance',
210: 'Vibration During Write',
211: 'Vibration During Write',
212: 'Shock During Write',
220: 'Disk Shift',
221: 'G-Sense Error Rate',
222: 'Loaded Hours',
223: 'Load/Unload Retry Count',
224: 'Load Friction',
225: 'Load/Unload Cycle Count',
226: "Load 'In'-time",
227: 'Torque Amplification Count',
228: 'Power-Off Retract Cycle',
230: 'GMR Head Amplitude (magnetic HDDs), Drive Life Protection Status (SSDs)',
231: 'Life Left (SSDs) or Temperature',
232: 'Endurance Remaining or Available Reserved Space',
233: 'Media Wearout Indicator (SSDs) or Power-On Hours',
234: 'Average erase count AND Maximum Erase Count',
235: 'Good Block Count AND System(Free) Block Count',
240: "Head Flying Hours or 'Transfer Error Rate' (Fujitsu)",
241: 'Total LBAs Written',
242: 'Total LBAs Read',
243: 'Total LBAs Written Expanded',
244: 'Total LBAs Read Expanded',
249: 'NAND Writes (1GiB)',
250: 'Read Error Retry Rate',
251: 'Minimum Spares Remaining',
252: 'Newly Added Bad Flash Block',
254: 'Free Fall Protection'
}
|
Unode/ete
|
ete3/tools/ete_build_lib/ordereddict.py
|
Python
|
gpl-3.0
| 10,439
| 0.001437
|
from __future__ import absolute_import
import six
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from six.moves._thread import get_ident as _get_ident
except ImportError:
from six.moves._dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in six.itervalues(self.__map):
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
I
|
f E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d gi
|
ven)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in list(other.keys()):
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in list(kwds.items()):
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not i
|
priestc/MultiExplorer
|
multiexplorer/multiexplorer/migrations/0007_auto_20170211_0802.py
|
Python
|
mit
| 967
| 0.002068
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-11 08:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezo
|
ne
class Migration(migrations.Migration):
dependencies = [
('multiexplorer', '0006_pullhistory'),
|
]
operations = [
migrations.CreateModel(
name='PushHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_pushed', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.AddField(
model_name='memo',
name='signature',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='memo',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
DGA-MI-SSI/YaCo
|
tests/tests/test_enums.py
|
Python
|
gpl-3.0
| 9,673
| 0.001654
|
# Copyright (C) 2017 The YaCo Authors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#!/bin/python
import runtests
class Fixture(runtests.Fixture):
def test_enums(self):
a, b = self.setup_repos()
a.run(
self.script("idaapi.add_enum(idaapi.BADADDR, 'name_a', idaapi.hexflag())"),
self.save_types(),
)
a.check_git(added=["enum"])
b.run(
self.check_types(),
self.script("idaapi.set_enum_name(idaapi.get_enum('name_a'), 'name_b')"),
self.save_types(),
)
b.check_git(modified=["enum"])
a.run(
self.check_types(),
self.script("idaapi.del_enum(idaapi.get_enum('name_b'))"),
self.save_types(),
)
a.check_git(deleted=["enum"])
b.run(
self.check_types(),
)
def test_enum_members(self):
a, b = self.setup_repos()
a.run(
self.script("idaapi.add_enum_member(idaapi.add_enum(idaapi.BADADDR, 'name_a', idaapi.hexflag()), 'field_a', 0, -1)"),
self.save_types(),
)
a.check_git(added=["enum", "enum_member
|
"])
b.run(
self.check_types(),
self.script("idaapi.set_enum_member_name(idaapi.get_enum_member_by_name('field_a'), 'field_b')"),
self.save_types(),
)
b.check_git(modi
|
fied=["enum"], added=["enum_member"], deleted=["enum_member"])
a.run(
self.check_types(),
self.script("idaapi.set_enum_name(idaapi.get_enum('name_a'), 'name_b')"),
self.save_types(),
)
a.check_git(modified=["enum"])
b.run(
self.check_types(),
self.script("idaapi.del_enum_member(idaapi.get_enum('name_b'), 0, 0, -1)"),
self.save_types(),
)
b.check_git(deleted=["enum_member"], modified=["enum"])
a.run(
self.check_types(),
)
def test_enum_types(self):
a, b = self.setup_repos()
constants = """
ea = 0x66045614
# flags, bits, bitfield, ea, operand, fields
enums = [
(idaapi.hexflag(), 0, False, ea+0x00, 0, [0, 0x40, 16]),
(idaapi.charflag(), 0, False, ea+0x19, 1, [ord('a'), ord('z'), ord('$')]),
(idaapi.decflag(), 1, False, ea+0x02, 0, [0, 10, 16]),
(idaapi.octflag(), 0, False, ea+0x06, 1, [0, 8, 13, 16]),
(idaapi.binflag(), 0, True, ea+0x04, 0, [1, 2]),
]
"""
a.run(
self.script(constants + """
idx = 0
def get_cmt():
global idx
idx += 1
return "cmt_%02x" % idx
eidx = 0
for (flags, bits, bitfield, ea, operand, fields) in enums:
name = "enum_%x" % eidx
eidx += 1
eid = idaapi.add_enum(idaapi.BADADDR, name, flags)
if bits != 0:
idaapi.set_enum_width(eid, bits)
if bitfield:
idaapi.set_enum_bf(eid, True)
for rpt in [False, True]:
idaapi.set_enum_cmt(eid, get_cmt(), rpt)
fidx = 0
for f in fields:
field = "%s_%x" % (name, fidx)
fidx += 1
if bitfield:
idaapi.add_enum_member(eid, field, f, f)
else:
idaapi.add_enum_member(eid, field, f, -1)
cid = idaapi.get_enum_member_by_name(field)
for rpt in [False, True]:
set_enum_member_cmt(cid, get_cmt(), rpt)
idaapi.op_enum(ea, operand, eid, 0)
"""),
self.save_types(),
self.save_last_ea()
)
b.run(
self.check_last_ea(),
self.check_types(),
self.script(constants + """
for (flags, bits, bitfield, ea, operand, fields) in enums:
idaapi.clr_op_type(ea, operand)
"""),
self.save_last_ea(),
)
b.check_git(modified=["basic_block"])
a.run(
self.check_last_ea(),
self.script(constants + """
eidx = 0
for (flags, bits, bitfield, ea, operand, fields) in enums:
name = "enum_%x" % eidx
eidx += 1
eid = idaapi.get_enum(name)
idaapi.op_enum(ea, operand, eid, 0)
"""),
self.save_last_ea(),
)
a.check_git(modified=["basic_block"])
b.run(
self.check_last_ea(),
self.script(constants + """
eidx = 0
for (flags, bits, bitfield, ea, operand, fields) in enums:
name = "enum_%x" % eidx
eidx += 1
eid = idaapi.get_enum(name)
idaapi.del_enum(eid)
"""),
self.save_types(),
self.save_last_ea(),
)
b.check_git(deleted=["enum"] * 5 + ["enum_member"] * 15)
a.run(
self.check_last_ea(),
self.check_types(),
)
def test_enum_bf(self):
a, b = self.setup_repos()
a.run(
self.script("idaapi.add_enum(idaapi.BADADDR, 'name_a', idaapi.hexflag())"),
self.save_types(),
)
a.check_git(added=["enum"])
b.run(
self.check_types(),
self.script("idaapi.set_enum_bf(idaapi.get_enum('name_a'), True)"),
self.save_types(),
)
b.check_git(modified=["enum"])
a.run(
self.check_types(),
self.script("idaapi.set_enum_bf(idaapi.get_enum('name_a'), False)"),
self.save_types(),
)
a.check_git(modified=["enum"])
b.run(
self.check_types(),
)
def test_enum_cmt(self):
a, b = self.setup_repos()
a.run(
self.script("idaapi.add_enum(idaapi.BADADDR, 'name_a', idaapi.hexflag())"),
self.save_types(),
)
a.check_git(added=["enum"])
b.run(
self.check_types(),
self.script("idaapi.set_enum_cmt(idaapi.get_enum('name_a'), 'some_comment', False)"),
self.save_types(),
)
b.check_git(modified=["enum"])
a.run(
self.check_types(),
self.script("idaapi.set_enum_cmt(idaapi.get_enum('name_a'), '', False)"),
self.save_types(),
)
a.check_git(modified=["enum"])
b.run(
self.check_types(),
)
def test_renamed_enums_are_still_applied(self):
a, b = self.setup_cmder()
a.run(
self.script("""
eid = idaapi.add_enum(idaapi.BADADDR, "somename", idaapi.hexflag())
idaapi.add_enum_member(eid, "somevalue", 0x8)
idaapi.add_enum_member(eid, "anothervalue", 0x18)
"""),
self.sync(),
self.script("""
eid = idaapi.get_enum("somename")
ea = 0x40199D
idaapi.op_enum(ea, 0, eid, 0)
ea = 0x4019BE
idaapi.op_enum(ea, 1, eid, 0)
"""),
self.save_types(),
self.save_ea(0x40199D),
self.save_ea(0x4019BE),
)
a.check_git(added=["binary", "segment", "segment_chunk", "function"] + ["basic_block"] * 2)
b.run(
self.check_types(),
self.check_ea(0x40199D),
self.check_ea(0x4019BE),
self.script("""
idaapi.set_enum_name(idaapi.get_enum("somename"), "anothername")
"""),
self.save_types(),
self.save_ea(0x40199D),
self.save_ea(0x4019BE),
)
b.check_git(modified=["enum"])
a.run(
self.check_types(),
self.check_ea(0x40199D),
self.check_ea(0x4019BE),
)
def test_create_same_enum_independently(self):
a, b = self.setup_cmder()
a.run(
self.script("""
eid = idaapi.add_enum(idaapi.BADADDR, "somename", idaapi.hexflag())
idaapi.add_enum_member(eid, "somevalue", 0x4)
"""),
# create an arbitrary commit which should stay
|
htlcnn/pyrevitscripts
|
HTL.tab/Test.panel/Test.pushbutton/keyman/keyman/keys/models.py
|
Python
|
mit
| 300
| 0.006667
|
from django.db import models
from django.core.urlresolvers import reverse
class Sof
|
tware(models.Model):
name = models.CharField(max_length=200)
def __unicode_
|
_(self):
return self.name
def get_absolute_url(self):
return reverse('software_edit', kwargs={'pk': self.pk})
|
dennyb87/pycity
|
week5/card_validation_test.py
|
Python
|
gpl-3.0
| 1,101
| 0.002725
|
import unittest
from card_validation import (
numberToMatrix,
getOddDigits,
getEvenDigits,
sumOfDoubleOddPlace,
sumOfEvenPlace,
getDigit,
isValid
)
class CardValidationTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(CardValidationTest, self).__init__(*args, **kwargs)
self.card_number = "4388576018410707"
self.matrix = numberToMatrix(self.card_number)
self.odds = getOddDigits(self.matrix)
self.evens = getEvenDigits(self.matrix)
def test_numberToMatrix(self):
self.assertEqual(self.matrix.__class__, list)
def test_getOddDigits(self):
self.assertEqual(self.odds.__cla
|
ss__, list)
def test_getEvenDigits(self):
self.assertEqual(self.evens.__class__, list)
def test_sumOfDoubleOddPlace(self):
self.assertEqual(sumOfDoubleOddPlace(self.o
|
dds), 29)
def test_getDigit(self):
self.assertEqual(getDigit(9), 9)
def test_isValid(self):
self.assertEqual(isValid(self.card_number), True)
if __name__ == "__main__":
unittest.main()
|
WillGuan105/django
|
tests/gis_tests/relatedapp/tests.py
|
Python
|
bsd-3-clause
| 15,582
| 0.002246
|
from __future__ import unicode_literals
from django.contrib.gis.db.models import F, Collect, Count, Extent, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.geos import GEOSGeometry, MultiPoint, Point
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import override_settings
from django.utils import timezone
from ..utils import no_oracle
from .models import (
Article, Author, Book, City, DirectoryEntry, Event, Location, Parcel,
)
@skipUnlessDBFeature("gis_enabled")
class RelatedGeoModelTest(TestCase):
fixtures = ['initial']
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.order_by('id')
qs2 = City.objects.order_by('id').select_related()
qs3 = City.objects.order_by('id').select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@skipUnlessDBFeature("has_transform_method")
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
|
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
tr
|
ansformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@skipUnlessDBFeature("supports_extent_aggr")
def test_related_extent_aggregate(self):
"Testing the `Extent` aggregate on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.aggregate(Extent('location__point'))['location__point__extent']
e2 = City.objects.exclude(state='NM').aggregate(Extent('location__point'))['location__point__extent']
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e):
self.assertAlmostEqual(ref_val, e_val, tol)
@skipUnlessDBFeature("supports_extent_aggr")
def test_related_extent_annotate(self):
"""
Test annotation with Extent GeoAggregate.
"""
cities = City.objects.annotate(points_extent=Extent('location__point')).order_by('name')
tol = 4
self.assertAlmostEqual(
cities[0].points_extent,
(-97.516111, 33.058333, -97.516111, 33.058333),
tol
)
@skipUnlessDBFeature("has_unionagg_method")
def test_related_union_aggregate(self):
"Testing the `Union` aggregate on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.aggregate(Union()`).
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.aggregate(Union('location__point'))['location__point__union']
u2 = City.objects.exclude(
name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth'),
).aggregate(Union('location__point'))['location__point__union']
u3 = aggs['location__point__union']
self.assertEqual(type(u1), MultiPoint)
self.assertEqual(type(u3), MultiPoint)
# Ordering of points in the result of the union is not defined and
# implementation-dependent (DB backend, GEOS version)
self.assertSetEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u1})
self.assertSetEqual({p.ewkt for p in ref_u2}, {p.ewkt for p in u2})
self.assertSetEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u3})
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry(
'POLYGON((-97.501205 33.052520,-97.501205 33.052576,'
'-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))',
srid=4326
)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if connection.features.supports_transform:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel,
|
fnp/wolnelektury
|
src/infopages/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 4,109
| 0.008033
|
# This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='InfoPage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('main_page', models.IntegerField(null=True, verbose_name='main page priority', blank=True)),
('slug', models.SlugField(unique=True, max_length=120, verbose_name='Slug')),
('title', models.CharField(max_length=120, verbose_name='Title', blank=True)),
('title_de', models.CharField(max_length=120, null=True, verbose_name='Title', blank=True)),
('title_en', models.CharField(max_length=120, null=True, verbose_name='Title', blank=True)),
('title_es', models.CharField(max_length=120, null=True, verbose_name='Title', blank=True)),
('title_fr', models.CharField(max_length=120, null=True, verbose_name='Title', blank=True)),
('title_it', models.CharField(max_length=120, null=True, verbose_name='Title', blank=True)),
('title_lt', models.CharField(max_length=120, null=True, verbose_name='Title', blank=True)),
('title_pl', models.CharField(max_length=12
|
0, null=True, ver
|
bose_name='Title', blank=True)),
('title_ru', models.CharField(max_length=120, null=True, verbose_name='Title', blank=True)),
('title_uk', models.CharField(max_length=120, null=True, verbose_name='Title', blank=True)),
('left_column', models.TextField(verbose_name='left column', blank=True)),
('left_column_de', models.TextField(null=True, verbose_name='left column', blank=True)),
('left_column_en', models.TextField(null=True, verbose_name='left column', blank=True)),
('left_column_es', models.TextField(null=True, verbose_name='left column', blank=True)),
('left_column_fr', models.TextField(null=True, verbose_name='left column', blank=True)),
('left_column_it', models.TextField(null=True, verbose_name='left column', blank=True)),
('left_column_lt', models.TextField(null=True, verbose_name='left column', blank=True)),
('left_column_pl', models.TextField(null=True, verbose_name='left column', blank=True)),
('left_column_ru', models.TextField(null=True, verbose_name='left column', blank=True)),
('left_column_uk', models.TextField(null=True, verbose_name='left column', blank=True)),
('right_column', models.TextField(verbose_name='right column', blank=True)),
('right_column_de', models.TextField(null=True, verbose_name='right column', blank=True)),
('right_column_en', models.TextField(null=True, verbose_name='right column', blank=True)),
('right_column_es', models.TextField(null=True, verbose_name='right column', blank=True)),
('right_column_fr', models.TextField(null=True, verbose_name='right column', blank=True)),
('right_column_it', models.TextField(null=True, verbose_name='right column', blank=True)),
('right_column_lt', models.TextField(null=True, verbose_name='right column', blank=True)),
('right_column_pl', models.TextField(null=True, verbose_name='right column', blank=True)),
('right_column_ru', models.TextField(null=True, verbose_name='right column', blank=True)),
('right_column_uk', models.TextField(null=True, verbose_name='right column', blank=True)),
],
options={
'ordering': ('main_page', 'slug'),
'verbose_name': 'info page',
'verbose_name_plural': 'info pages',
},
bases=(models.Model,),
),
]
|
Guidobelix/pyload
|
module/plugins/accounts/OpenloadCo.py
|
Python
|
gpl-3.0
| 413
| 0.014528
|
# -*- coding: utf-8 -*-
from module.plugins.internal.XFSAccount import XFSAccount
class OpenloadCo(XFSAccount):
__name__ = "OpenloadCo"
__type__ = "account"
__version__ = "0.02"
__status__ = "testing"
__description__ = """Openload.co account plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter
|
Purcaro", "vuolter@gmail.com")]
PLUGIN_DOMAIN = "openload.co"
|
|
gem/oq-engine
|
openquake/hazardlib/gsim/morikawa_fujiwara_2013.py
|
Python
|
agpl-3.0
| 14,534
| 0.003509
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2021 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports class:`MorikawaFujiwara2013`
"""
import numpy as np
from openquake.baselib.general import CallableDict
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, PGV, SA, JMA
CONSTS = {
"D0": 300.,
"e": 0.5,
"Mw01": 8.2,
"Mw1": 16.0}
def _get_basin_term(C, z1pt4):
d0 = CONSTS["D0"]
tmp = np.ones_like(z1pt4) * C['Dlmin']
return C['pd'] * np.log10(np.maximum(tmp, z1pt4)/d0)
def _get_intensity_correction_term(C, region, xvf, focal_depth):
if region == 'NE':
gamma = C['gNE']
elif region == 'SW':
gamma = C['gEW']
elif region is None:
gamma = 0.
else:
raise ValueError('Unsupported region')
return (gamma * np.minimum(xvf, 75.0) *
np.maximum(focal_depth-30., 0.))
_get_magnitude_term = CallableDict()
@_get_magnitude_term.add(const.TRT.ACTIVE_SHALLOW_CRUST)
def _get_magnitude_term_1(trt, region, C, rrup, mw1prime, mw1, rhypo):
return (C['a'] * (mw1prime - mw1)**2 + C['b1'] * rrup + C['c1'] -
np.log10(rrup + C['d'] * 10.**(CONSTS['e'] * mw1prime)))
@_get_magnitude_term.add(const.TRT.SUBDUCTION_INTERFACE)
def _get_magnitude_term_2(trt, region, C, rrup, mw1prime, mw1, rhypo):
return (C['a'] * (mw1prime - mw1)**2 + C['b2'] * rrup + C['c2'] -
np.log10(rrup + C['d'] * 10.**(CONSTS['e']*mw1prime)))
@_get_magnitude_term.add(const.TRT.SUBDUCTION_INTRASLAB)
def _get_magnitude_term_3(trt, region, C, rrup, mw1prime, mw1, rhypo):
tmp = (C['a'] * (mw1prime - mw1)**2 + C['b3'] * rrup + C['c3'] -
np.log10(rrup + C['d'] * 10.**(CONSTS['e']*mw1prime)))
if region == "SW":
tmp[rhypo < 80] += C['PH']
return tmp
def _get_shallow_amplification_term(C, vs30):
tmp = np.ones_like(vs30) * C['Vsmax']
return C['ps'] * np.log10(np.minimum(tmp, vs30)/C['V0'])
class MorikawaFujiwara2013Crustal(GMPE):
"""
Implements the GMM from Morikawa and Fujiwara published as "A New Ground
Motion Prediction Equation for Japan Applicable up to M9 Mega-Earthquake",
Journal of Disaster Research, Vol.8, No.5, 2013.
"""
#: Supported tectonic region type is active shallow crust
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Supported intensity measure types are spectral acceleration,
#: peak ground velocity and peak ground acceleration
DEFINED_FOR_INTENSITY_MEASURE_TYPES = {PGA, PGV, SA, JMA}
#: Supported intensity measure component is orientation-independent
#: measure :attr:`~openquake.hazardlib.const.IMC.RotD50`
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.GEOMETRIC_MEAN
#: Supported standard deviation types are inter-event, intra-event
#: and total, see equation 2, pag 106.
DEFINED_FOR_STANDARD_DEVIATION_TYPES = {const.StdDev.TOTAL}
#: Required site parameters are:
#: - Vs30 - time averaged shear-wave velocity [m/s]
#: - z1p4 - Depth to the 1.4 km/s interface [m]
#: - xvf - Distance from the volcanic front [km, positive in the forearc]
REQUIRES_SITES_PARAMETERS = {'vs30', 'z1pt4', 'xvf'}
#: Required rupture parameters are magnitude, and hypocentral depth [km].
REQUIRES_RUPTURE_PARAMETERS = {'mag', 'hypo_depth'}
#: Required distance measure is Rrup [km]
REQUIRES_DISTANCES = {'rrup'}
region = None
model = 'model1'
def compute(self, ctx: np.recarray, imts, mean, sig, tau, phi):
trt = self.DEFINED_FOR_TECTONIC_REGION_TYPE
mw01 = CONSTS["Mw01"]
mw1 = CONSTS["Mw1"]
mw1prime = np.array(ctx.mag)
mw1prime[ctx.mag >= mw01] = mw01
for m, imt in enumerate(imts):
C = self.COEFFS[imt]
if self.model == 'model1':
mag_term = _get_magnitude_term(
trt, self.region, C, ctx.rrup, mw1prime, mw1,
ctx.hypo_depth)
else:
msg = "Model not supported"
raise ValueError(msg)
mean[m] = (mag_term + _get_basin_term(C, ctx.z1pt4) +
_get_shallow_amplification_term(C, ctx.vs30) +
_get_intensity_correction_term(
C, self.region, ctx.xvf, ctx.hypo_depth))
mean[m] = np.log(10**mean[m] / 980.665)
sig[m] = C['sigma'] * np.log(10)
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT a b1 b2 b3 c1 c2 c3 d pd Dlmin ps Vsmax V0 gNE gEW PH sigma
jma -0.0321 -0.003736 -0.003320 -0.004195 6.9301 6.9042 7.2975 0.005078 0.032214 320.0 -0.756496 1200.0 350 0.000061 0.000059 -0.2284 0.3493
pga -0.0321 -0.005315 -0.005042 -0.005605 7.0830 7.1181 7.5035 0.011641 -0.055358 15.0 -0.523212 1950.0 350 0.000076 0.000063 -0.2426 0.3761
pgv -0.0325 -0.002654 -0.002408 -0.003451 5.6952 5.6026 6.0030 0.002266 0.129142 105.0 -0.693402 850.0 350 0.000047 0.000037 -0.2643 0.3399
0.05 -0.0321 -0.005912 -0.005674 -0.006231 7.2151 7.2759 7.6801 0.012812 -0.071415 15.0 -0.368827 2000.0 350 0.000088 0.000066 -0.2414 0.3938
0.06 -0.0321 -0.006097 -0.005864 -0.006405 7.2852 7.3523 7.7504 0.014508 -0.081796 15.0 -0.309232 2000.0 350 0.000087 0.000066 -0.2427 0.4039
0.07 -0.0321 -0.006142 -0.005967 -0.006507 7.3397 7.4152 7.8127 0.015574 -0.089891 15.0 -0.247786 2000.0 350 0.000086 0.000066 -0.2439 0.4149
0.08 -0.0323 -0.006104 -0.006033 -0.006594 7.4122 7.4929 7.8938 0.016465 -0.093581 15.0 -0.234067 2000.0 350 0.000085 0.000066 -0.2451 0.4219
0.09 -0.0325 -0.006112 -0.006079 -0.006689 7.4817 7.5649 7.9649 0.017390 -0.089604 15.0 -0.252853 2000.0 350 0.000084 0.000066 -0.2461 0.4259
0.1 -0.0327 -0.006116 -0.006061 -0.006686 7.5396 7.6214 8.0219 0.018438 -0.084855 15.0 -0.284416 2000.0 350 0.000084 0.000066 -0.2470 0.4266
0.11 -0.0324 -0.005998 -0.005971 -0.006576 7.5072 7.5947 7.9960 0.017396 -0.076412 15.0 -0.305700 2000.0 350 0.000083 0.000066 -0.2479 0.4256
0.12 -0.0322 -0.005896 -0.005878 -0.006448 7.4920 7.5837 7.9782 0.016457 -0.076948 15.0 -0.351975 2000.0
|
350 0.000083 0.000066 -0.2486 0.4243
0.13 -0.0321 -0.005786 -0.005757 -0.006331 7.4788 7.5645 7.9644 0.015607 -0.072886 15.0 -0.395130 2000.0 350 0.000082 0.000066 -0.2494 0.4229
0.15 -0.0321 -0.005564 -0.005579 -0.006078 7.4630 7.5471 7.9360 0.014118 -0.061401 15.0 -0.461774 2000.0 350 0.000082 0.000066 -0.2506 0.4193
0.17 -0.0321 -0.005398 -0.005382 -0.005813 7.4557 7.5245 7.9097 0.012855 -0.051288 15.0 -0.53678
|
9 2000.0 350 0.000081 0.000066 -0.2516 0.4162
0.2 -0.0321 -0.005151 -0.005027 -0.005476 7.4307 7.4788 7.8719 0.011273 -0.043392 15.0 -0.633661 2000.0 350 0.000080 0.000065 -0.2528 0.4152
0.22 -0.0322 -0.005000 -0.004827 -0.005204 7.4139 7.4461 7.8311 0.010380 -0.035431 15.0 -0.665914 2000.0 350 0.000080 0.000065 -0.2535 0.4130
0.25 -0.0321 -0.004836 -0.004519 -0.004907 7.3736 7.3728 7.7521 0.009225 -0.032667 15.0 -0.719524 2000.0 350 0.000079 0.000065 -0.2543 0.4089
0.3 -0.0321 -0.004543 -0.004095 -0.004621 7.2924 7.2797 7.6656 0.007670 -0.019984 15.0 -0.793002 2000.0 350 0.000078 0.000065 -0.2553 0.4063
0.35 -0.0321 -0.004379 -0.003717 -0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.