text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import os
import sys
import unittest
DIR = os.path.dirname(os.path.abspath(__file__))
sys.path[0] = os.path.dirname(DIR)
from yorbay.builder import build_from_standalone_source
from yorbay.compiler import ErrorWithSource
from yorbay.globals import Global
from yorbay.debug.stacktrace import get_stack
class DummyGlobal(Global):
def get(self):
return ''
class TestStackTrace(unittest.TestCase):
def setUp(self):
self.path = '/some/path.l20n'
self.l20n = None
def build(self, source):
self.l20n = build_from_standalone_source(source, path=self.path, debug=True)
def resolve_exc(self, entity_name):
try:
self.l20n.make_env().resolve_entity(entity_name)
except ErrorWithSource as e:
return e.cause
except StandardError as e:
return e
self.fail()
def test_lazy_hash_resolved_from_other_entity(self):
self.build(
'\n'
'<myhash {key: "value"}>\n'
'\n'
'<show "{{ myhash.noSuchKey }}">'
)
exc = self.resolve_exc('show')
self.assertTrue(isinstance(exc, KeyError))
stack = get_stack(exc)
self.assertEqual(len(stack), 2)
self.assertFrameEqual(stack[0], 'entity', 'myhash', 1, 8)
self.assertFrameEqual(stack[1], 'entity', 'show', 3, 16)
# TODO currently this test fails
# def test_lazy_hash_directly_resolved(self):
# self.build('<show {key: "value"}>')
# stack = get_stack(self.resolve_exc('show'))
# self.assertTrue(len(stack) == 1, msg=stack)
def test_nested_expressions(self):
self.build(
'<show "{{\n'
' 5\n'
' /\n'
' 0 \n'
' +\n'
' 1 }}">'
)
exc = self.resolve_exc('show')
self.assertTrue(isinstance(exc, ArithmeticError), msg=(type(exc), exc))
stack = get_stack(exc)
self.assertEqual(len(stack), 1)
self.assertFrameEqual(stack[0], 'entity', 'show', 2, 4)
def assertFrameEqual(self, frame, entry_type, entry_name, line, column):
self.assertEqual(
(frame.entry_type, frame.entry_name, frame.pos.line, frame.pos.column),
(entry_type, entry_name, line, column)
)
class TestSimilarity(unittest.TestCase):
def setUp(self):
self.l20n = build_from_standalone_source("""
<withVar "{{ $varName }}">
<withEntry "{{ entryName }}">
<withEntryNotSimilar "{{ entryNameWhichIsSeriouslyWrong }}">
<withGlobal "{{ @globalName }}">
<entryname "">
<entname "">
<macroWithVar($macroVarName) { $macrovarname }>
<withMacroVar "{{ macroWithVar(12) }}">
""", debug=True)
def resolve_exc(self, entity_name, vars=None, globals=None):
try:
self.l20n.make_env(vars=vars, globals=globals).resolve_entity(entity_name)
except ErrorWithSource as e:
return e.cause
except StandardError as e:
return e
self.fail()
def test_entry_similar(self):
exc = self.resolve_exc('withEntry')
self.assertTrue(isinstance(exc, NameError))
self.assertTrue('Did you mean "entryname"' in str(exc), msg=str(exc))
def test_entry_not_similar(self):
exc = self.resolve_exc('withEntryNotSimilar')
self.assertTrue(isinstance(exc, NameError))
self.assertTrue('Did you mean' not in str(exc), msg=str(exc))
def test_variable_similar(self):
exc = self.resolve_exc('withVar', vars={
'vrName': 1, 'vaarNme': 2,
})
self.assertTrue(isinstance(exc, NameError))
self.assertTrue('Did you mean "vrName"' in str(exc), msg=str(exc))
def test_macro_variable_similar(self):
exc = self.resolve_exc('withMacroVar', vars={
'maakroVarName': 1, 'x': 2,
})
self.assertTrue(isinstance(exc, NameError))
self.assertTrue('Did you mean "macroVarName"' in str(exc), msg=str(exc))
exc = self.resolve_exc('withMacroVar', vars={
'marcovarname': 1, 'x': 2,
})
self.assertTrue(isinstance(exc, NameError))
self.assertTrue('Did you mean "marcovarname"' in str(exc), msg=str(exc))
def test_variable_not_similar(self):
exc = self.resolve_exc('withVar', vars={
'first': 1, 'second': 2,
})
self.assertTrue(isinstance(exc, NameError))
self.assertTrue('Did you mean' not in str(exc), msg=str(exc))
def test_global_similar(self):
exc = self.resolve_exc('withGlobal', globals={
'cookies': DummyGlobal(), 'globbbName': DummyGlobal(), 'globlalName': DummyGlobal()
})
self.assertTrue(isinstance(exc, NameError))
self.assertTrue('Did you mean "globlalName"' in str(exc), msg=str(exc))
def test_global_not_similar(self):
exc = self.resolve_exc('withGlobal', globals={
'what': DummyGlobal(), 'terrible': DummyGlobal(), 'failure': DummyGlobal()
})
self.assertTrue(isinstance(exc, NameError))
self.assertTrue('Did you mean' not in str(exc), msg=str(exc))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "66874d0bc179c07e64eb4896186a5f05",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 95,
"avg_line_length": 33.717948717948715,
"alnum_prop": 0.5889733840304182,
"repo_name": "rusek/yorbay-python",
"id": "d44732a18f2dee95ba21ff169c080187fb2dd085",
"size": "5298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/debug_compiler_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "146791"
},
{
"name": "Shell",
"bytes": "848"
}
],
"symlink_target": ""
}
|
import re
from unidecode import unidecode
import numpy as np
"""
Functions to read in the files from the GermEval contest,
create suitable numpy matrices for train/dev/test
@author: Nils Reimers
"""
def readFile(filepath):
sentences = []
sentence = []
for line in open(filepath):
line = line.strip()
if len(line) == 0 or line[0] == '#':
if len(sentence) > 0:
sentences.append(sentence)
sentence = []
continue
splits = line.split('\t')
sentence.append([splits[1], splits[2]])
return sentences
def multiple_replacer(key_values):
#replace_dict = dict(key_values)
replace_dict = key_values
replacement_function = lambda match: replace_dict[match.group(0)]
pattern = re.compile("|".join([re.escape(k) for k, v in key_values.iteritems()]), re.M)
return lambda string: pattern.sub(replacement_function, string)
def multiple_replace(string, key_values):
return multiple_replacer(key_values)(string)
def normalizeWord(line):
line = unicode(line, "utf-8") #Convert to UTF8
line = line.replace(u"„", u"\"")
line = line.lower(); #To lower case
#Replace all special charaters with the ASCII corresponding, but keep Umlaute
#Requires that the text is in lowercase before
replacements = dict(((u"ß", "SZ"), (u"ä", "AE"), (u"ü", "UE"), (u"ö", "OE")))
replacementsInv = dict(zip(replacements.values(),replacements.keys()))
line = multiple_replace(line, replacements)
line = unidecode(line)
line = multiple_replace(line, replacementsInv)
line = line.lower() #Unidecode might have replace some characters, like € to upper case EUR
line = re.sub("([0-9][0-9.,]*)", '0', line) #Replace digits by NUMBER
return line.strip();
def createNumpyArray(sentences, windowsize, word2Idx, label2Idx):
unknownIdx = word2Idx['UNKNOWN']
paddingIdx = word2Idx['PADDING']
xMatrix = []
yVector = []
wordCount = 0
unknownWordCount = 0
for sentence in sentences:
targetWordIdx = 0
for targetWordIdx in xrange(len(sentence)):
# Get the context of the target word and map these words to the index in the embeddings matrix
wordIndices = []
for wordPosition in xrange(targetWordIdx-windowsize, targetWordIdx+windowsize+1):
if wordPosition < 0 or wordPosition >= len(sentence):
wordIndices.append(paddingIdx)
continue
word = sentence[wordPosition][0]
wordCount += 1
if word in word2Idx:
wordIdx = word2Idx[word]
elif word.lower() in word2Idx:
wordIdx = word2Idx[word.lower()]
elif normalizeWord(word) in word2Idx:
wordIdx = word2Idx[normalizeWord(word)]
else:
wordIdx = unknownIdx
unknownWordCount += 1
wordIndices.append(wordIdx)
#Get the label and map to int
labelIdx = label2Idx[sentence[targetWordIdx][1]]
xMatrix.append(wordIndices)
yVector.append(labelIdx)
print "Unknowns: %.2f%%" % (unknownWordCount/(float(wordCount))*100)
return (np.asarray(xMatrix, dtype='int32'), np.asarray(yVector, dtype='int32'))
|
{
"content_hash": "c4309879a2e18905731042235fb6e416",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 106,
"avg_line_length": 32.0625,
"alnum_prop": 0.5736563631300473,
"repo_name": "UKPLab/deeplearning4nlp-tutorial",
"id": "78905cfec6245ef9bf4899a418aebd9b29108213",
"size": "3624",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "2015-10_Lecture/Lecture3/code/keras 0.3.0 (as used in lecture)/GermEvalReader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "192145"
},
{
"name": "Jupyter Notebook",
"bytes": "223814"
},
{
"name": "Python",
"bytes": "260799"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lambda_event
short_description: Creates, updates or deletes AWS Lambda function event mappings.
description:
- This module allows the management of AWS Lambda function event source mappings such as DynamoDB and Kinesis stream
events via the Ansible framework. These event source mappings are relevant only in the AWS Lambda pull model, where
AWS Lambda invokes the function.
It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda
function itself and M(lambda_alias) to manage function aliases.
version_added: "2.2"
author: Pierre Jodouin (@pjodouin), Ryan Brown (@ryansb)
options:
lambda_function_arn:
description:
- The name or ARN of the lambda function.
required: true
aliases: ['function_name', 'function_arn']
state:
description:
- Describes the desired state.
required: true
default: "present"
choices: ["present", "absent"]
alias:
description:
- Name of the function alias. Mutually exclusive with C(version).
required: true
version:
description:
- Version of the Lambda function. Mutually exclusive with C(alias).
required: false
event_source:
description:
- Source of the event that triggers the lambda function.
required: false
default: stream
choices: ['stream']
source_params:
description:
- Sub-parameters required for event source.
- I(== stream event source ==)
- C(source_arn) The Amazon Resource Name (ARN) of the Kinesis or DynamoDB stream that is the event source.
- C(enabled) Indicates whether AWS Lambda should begin polling the event source. Default is True.
- C(batch_size) The largest number of records that AWS Lambda will retrieve from your event source at the
time of invoking your function. Default is 100.
- C(starting_position) The position in the stream where AWS Lambda should start reading.
Choices are TRIM_HORIZON or LATEST.
required: true
requirements:
- boto3
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
---
# Example that creates a lambda event notification for a DynamoDB stream
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: DynamoDB stream event mapping
lambda_event:
state: "{{ state | default('present') }}"
event_source: stream
function_name: "{{ function_name }}"
alias: Dev
source_params:
source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457
enabled: True
batch_size: 100
starting_position: TRIM_HORIZON
- name: Show source event
debug:
var: lambda_stream_events
'''
RETURN = '''
---
lambda_stream_events:
description: list of dictionaries returned by the API describing stream event mappings
returned: success
type: list
'''
import re
import sys
try:
import boto3
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec,
get_aws_connection_info)
# ---------------------------------------------------------------------------------------------------
#
# Helper Functions & classes
#
# ---------------------------------------------------------------------------------------------------
class AWSConnection:
"""
Create the connection object and client objects as required.
"""
def __init__(self, ansible_obj, resources, use_boto3=True):
try:
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3)
self.resource_client = dict()
if not resources:
resources = ['lambda']
resources.append('iam')
for resource in resources:
aws_connect_kwargs.update(dict(region=self.region,
endpoint=self.endpoint,
conn_type='client',
resource=resource
))
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
# if region is not provided, then get default profile/session region
if not self.region:
self.region = self.resource_client['lambda'].meta.region_name
except (ClientError, ParamValidationError, MissingParametersError) as e:
ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
# set account ID
try:
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
except (ClientError, ValueError, KeyError, IndexError):
self.account_id = ''
def client(self, resource='lambda'):
return self.resource_client[resource]
def pc(key):
"""
Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
:param key:
:return:
"""
return "".join([token.capitalize() for token in key.split('_')])
def ordered_obj(obj):
"""
Order object for comparison purposes
:param obj:
:return:
"""
if isinstance(obj, dict):
return sorted((k, ordered_obj(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered_obj(x) for x in obj)
else:
return obj
def set_api_sub_params(params):
"""
Sets module sub-parameters to those expected by the boto3 API.
:param params:
:return:
"""
api_params = dict()
for param in params.keys():
param_value = params.get(param, None)
if param_value:
api_params[pc(param)] = param_value
return api_params
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module:
:param aws:
:return:
"""
function_name = module.params['lambda_function_arn']
# validate function name
if not re.search('^[\w\-:]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)
if len(function_name) > 64 and not function_name.startswith('arn:aws:lambda:'):
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
elif len(function_name) > 140 and function_name.startswith('arn:aws:lambda:'):
module.fail_json(msg='ARN "{0}" exceeds 140 character limit'.format(function_name))
# check if 'function_name' needs to be expanded in full ARN format
if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'):
function_name = module.params['lambda_function_arn']
module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name)
qualifier = get_qualifier(module)
if qualifier:
function_arn = module.params['lambda_function_arn']
module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier)
return
def get_qualifier(module):
"""
Returns the function qualifier as a version or alias or None.
:param module:
:return:
"""
qualifier = None
if module.params['version'] > 0:
qualifier = str(module.params['version'])
elif module.params['alias']:
qualifier = str(module.params['alias'])
return qualifier
# ---------------------------------------------------------------------------------------------------
#
# Lambda Event Handlers
#
# This section defines a lambda_event_X function where X is an AWS service capable of initiating
# the execution of a Lambda function (pull only).
#
# ---------------------------------------------------------------------------------------------------
def lambda_event_stream(module, aws):
"""
Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications.
:param module:
:param aws:
:return:
"""
client = aws.client('lambda')
facts = dict()
changed = False
current_state = 'absent'
state = module.params['state']
api_params = dict(FunctionName=module.params['lambda_function_arn'])
# check if required sub-parameters are present and valid
source_params = module.params['source_params']
source_arn = source_params.get('source_arn')
if source_arn:
api_params.update(EventSourceArn=source_arn)
else:
module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.")
# check if optional sub-parameters are valid, if present
batch_size = source_params.get('batch_size')
if batch_size:
try:
source_params['batch_size'] = int(batch_size)
except ValueError:
module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size']))
# optional boolean value needs special treatment as not present does not imply False
source_param_enabled = module.boolean(source_params.get('enabled', 'True'))
# check if event mapping exist
try:
facts = client.list_event_source_mappings(**api_params)['EventSourceMappings']
if facts:
current_state = 'present'
except ClientError as e:
module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e))
if state == 'present':
if current_state == 'absent':
starting_position = source_params.get('starting_position')
if starting_position:
api_params.update(StartingPosition=starting_position)
else:
module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.")
if source_arn:
api_params.update(Enabled=source_param_enabled)
if source_params.get('batch_size'):
api_params.update(BatchSize=source_params.get('batch_size'))
try:
if not module.check_mode:
facts = client.create_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e))
else:
# current_state is 'present'
api_params = dict(FunctionName=module.params['lambda_function_arn'])
current_mapping = facts[0]
api_params.update(UUID=current_mapping['UUID'])
mapping_changed = False
# check if anything changed
if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']:
api_params.update(BatchSize=source_params['batch_size'])
mapping_changed = True
if source_param_enabled is not None:
if source_param_enabled:
if current_mapping['State'] not in ('Enabled', 'Enabling'):
api_params.update(Enabled=True)
mapping_changed = True
else:
if current_mapping['State'] not in ('Disabled', 'Disabling'):
api_params.update(Enabled=False)
mapping_changed = True
if mapping_changed:
try:
if not module.check_mode:
facts = client.update_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e))
else:
if current_state == 'present':
# remove the stream event mapping
api_params = dict(UUID=facts[0]['UUID'])
try:
if not module.check_mode:
facts = client.delete_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e))
return camel_dict_to_snake_dict(dict(changed=changed, events=facts))
def main():
"""Produce a list of function suffixes which handle lambda events."""
this_module = sys.modules[__name__]
source_choices = ["stream"]
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
lambda_function_arn=dict(required=True, default=None, aliases=['function_name', 'function_arn']),
event_source=dict(required=False, default="stream", choices=source_choices),
source_params=dict(type='dict', required=True, default=None),
alias=dict(required=False, default=None),
version=dict(type='int', required=False, default=0),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['alias', 'version']],
required_together=[]
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['lambda'])
validate_params(module, aws)
this_module_function = getattr(this_module, 'lambda_event_{0}'.format(module.params['event_source'].lower()))
results = this_module_function(module, aws)
module.exit_json(**results)
if __name__ == '__main__':
main()
|
{
"content_hash": "c703877dd694ca939bb8c323c224fa2e",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 134,
"avg_line_length": 34.226190476190474,
"alnum_prop": 0.6074434782608695,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "f7298cb548fa7c1de09bf4bf092c51b5d80b4b4f",
"size": "14552",
"binary": false,
"copies": "26",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/cloud/amazon/lambda_event.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
}
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from target_kinetis import Kinetis
import logging
class KL46Z(Kinetis):
memoryMapXML = """<?xml version="1.0"?>
<!DOCTYPE memory-map PUBLIC "+//IDN gnu.org//DTD GDB Memory Map V1.0//EN" "http://sourceware.org/gdb/gdb-memory-map.dtd">
<memory-map>
<memory type="flash" start="0x0" length="0x40000"> <property name="blocksize">0x400</property></memory>
<memory type="ram" start="0x1fffe000" length="0x8000"> </memory>
</memory-map>
"""
def __init__(self, transport):
super(KL46Z, self).__init__(transport)
self.mdm_idr = 0x001c0020
|
{
"content_hash": "97cb3a224e13577ee373ee7c051e648f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 121,
"avg_line_length": 33.857142857142854,
"alnum_prop": 0.7097046413502109,
"repo_name": "geky/pyOCD",
"id": "181fa67acf07f408a75fca51f0e9dfb7b455b871",
"size": "1185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyOCD/target/target_kl46z.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "473"
},
{
"name": "C",
"bytes": "1688"
},
{
"name": "Python",
"bytes": "352353"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from mock import Mock
from chatterbot.conversation import Statement
from chatterbot.input import gitter
from chatterbot.input import Gitter
class MockResponse(object):
def __init__(self, status_code, data):
self.status_code = status_code
self.data = data
def json(self):
return self.data
def mock_get_response(*args, **kwargs):
url = args[0]
endpoints = {
'https://api.gitter.im/v1/user': MockResponse(200, [{'id': '9893893'}]),
'https://api.gitter.im/v1/rooms/40999743/chatMessages?limit=1': MockResponse(200, [
{'id': '4467', 'text': 'Hello', 'unread': True}
])
}
return endpoints[url]
def mock_post_response(*args, **kwargs):
url = args[0]
endpoints = {
'https://api.gitter.im/v1/rooms': MockResponse(200, {'id': '40999743'}),
'https://api.gitter.im/v1/user/9893893/rooms/40999743/unreadItems': MockResponse(200, {'id': '343222'})
}
return endpoints[url]
class GitterAdapterTests(TestCase):
def setUp(self):
super(GitterAdapterTests, self).setUp()
import requests
requests.get = Mock(side_effect=mock_get_response)
requests.post = Mock(side_effect=mock_post_response)
gitter.requests = requests
self.adapter = Gitter(
gitter_room='',
gitter_api_token='',
gitter_sleep_time=0,
gitter_only_respond_to_mentions=False
)
def test_validate_status_code_200(self):
response = MockResponse(200, {})
try:
self.adapter._validate_status_code(response)
except Gitter.HTTPStatusException:
self.fail('Test raised HTTPStatusException unexpectedly!')
def test_validate_response_201(self):
response = MockResponse(201, {})
try:
self.adapter._validate_status_code(response)
except Gitter.HTTPStatusException:
self.fail('Test raised HTTPStatusException unexpectedly!')
def test_response_status_code_not_ok(self):
response = MockResponse(404, {})
with self.assertRaises(Gitter.HTTPStatusException):
self.adapter._validate_status_code(response)
def test_join_room(self):
data = self.adapter.join_room('room_name')
self.assertIn('id', data)
def test_get_user_data(self):
data = self.adapter.get_user_data()
self.assertIn('id', data[0])
def test_mark_messages_as_read(self):
data = self.adapter.mark_messages_as_read([1, 2, 3])
self.assertIn('id', data)
def test_get_most_recent_message(self):
data = self.adapter.get_most_recent_message()
self.assertIn('text', data)
self.assertIn('id', data)
self.assertIn('unread', data)
def test_contains_mention(self):
self.adapter.username = 'chatterbot'
contains = self.adapter._contains_mention([{'screenName': 'chatterbot'}])
self.assertTrue(contains)
def test_does_not_contain_mention(self):
self.adapter.username = 'chatterbot'
contains = self.adapter._contains_mention([{'screenName': 'coolguy'}])
self.assertFalse(contains)
def test_should_respond_no_data(self):
should = self.adapter.should_respond({})
self.assertFalse(should)
def test_should_respond_unread(self):
should = self.adapter.should_respond({'unread': True})
self.assertTrue(should)
def test_remove_mentions(self):
cleaned = self.adapter.remove_mentions('Hi @person how are you @myfriend')
self.assertEqual(cleaned, 'Hi how are you')
def test_process_input(self):
statement = Statement('Hello')
data = self.adapter.process_input(statement)
self.assertEqual('Hello', data)
|
{
"content_hash": "5287b1b11daa5483b206ea13b4bf4b5d",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 111,
"avg_line_length": 30.830645161290324,
"alnum_prop": 0.6311797018048653,
"repo_name": "sahararaju/dataasservices",
"id": "fe1489877d6d0f07f8ddab0a3d72aeefcdfbfa70",
"size": "3823",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "chatterbot/input-adapter-tests/test_gitter_input_adapter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16127"
},
{
"name": "Clojure",
"bytes": "3153"
},
{
"name": "Common Lisp",
"bytes": "1095"
},
{
"name": "Go",
"bytes": "4683"
},
{
"name": "HTML",
"bytes": "7955"
},
{
"name": "JavaScript",
"bytes": "16963"
},
{
"name": "Python",
"bytes": "258118"
},
{
"name": "Rust",
"bytes": "20301"
},
{
"name": "Shell",
"bytes": "794"
}
],
"symlink_target": ""
}
|
""" Enthought pyface package component
"""
# Major package imports.
import wx
# Enthought library imports.
from traits.api import HasTraits, provides
# Local imports.
from pyface.i_image_cache import IImageCache, MImageCache
@provides(IImageCache)
class ImageCache(MImageCache, HasTraits):
""" The toolkit specific implementation of an ImageCache. See the
IImageCache interface for the API documentation.
"""
###########################################################################
# 'object' interface.
###########################################################################
def __init__(self, width, height):
self._width = width
self._height = height
# The images in the cache!
self._images = {} # {filename : wx.Image}
# The images in the cache converted to bitmaps.
self._bitmaps = {} # {filename : wx.Bitmap}
return
###########################################################################
# 'ImageCache' interface.
###########################################################################
def get_image(self, filename):
# Try the cache first.
image = self._images.get(filename)
if image is None:
# Load the image from the file and add it to the list.
#
# N.B 'wx.BITMAP_TYPE_ANY' tells wxPython to attempt to autodetect
# --- the image format.
image = wx.Image(filename, wx.BITMAP_TYPE_ANY)
# We force all images in the cache to be the same size.
if image.GetWidth() != self._width or image.GetHeight() != self._height:
image.Rescale(self._width, self._height)
# Add the bitmap to the cache!
self._images[filename] = image
return image
def get_bitmap(self, filename):
# Try the cache first.
bmp = self._bitmaps.get(filename)
if bmp is None:
# Get the image.
image = self.get_image(filename)
# Convert the alpha channel to a mask.
image.ConvertAlphaToMask()
# Convert it to a bitmaps!
bmp = image.ConvertToBitmap()
# Add the bitmap to the cache!
self._bitmaps[filename] = bmp
return bmp
#### EOF ######################################################################
|
{
"content_hash": "248bbe56f44032275ed421fe6ba8bc8f",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 84,
"avg_line_length": 29.95,
"alnum_prop": 0.4941569282136895,
"repo_name": "brett-patterson/pyface",
"id": "49d50cf526927bfe05ccd2256c8f5e602d4a64b7",
"size": "2999",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pyface/ui/wx/image_cache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "648"
},
{
"name": "Python",
"bytes": "2371056"
}
],
"symlink_target": ""
}
|
from django.contrib.auth import get_user_model
User = get_user_model()
class UserPersistor(object):
@staticmethod
def create_user(vid, email, username, first_name=None, last_name=None):
user = User()
user.email = email
user.username = username
user.first_name = first_name
user.last_name = last_name
return user
@staticmethod
def update_user(user, vid, email, username, first_name=None, last_name=None):
user.email = email
user.username = username
user.first_name = first_name
user.last_name = last_name
return user
|
{
"content_hash": "dba8520308c51c1260df4ee3924c2a2c",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 81,
"avg_line_length": 28.17391304347826,
"alnum_prop": 0.6095679012345679,
"repo_name": "visipedia/django-visipedia",
"id": "5bf27ca552a43c2496db76f6927906d616356b35",
"size": "648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_visipedia/persistors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17874"
}
],
"symlink_target": ""
}
|
from taiga.requestmaker import RequestMaker
from taiga.models import WikiLink, WikiLinks
import unittest
from mock import patch
class TestWikiLinks(unittest.TestCase):
@patch('taiga.models.base.ListResource._new_resource')
def test_create_wikilink(self, mock_new_resource):
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
mock_new_resource.return_value = WikiLink(rm)
wikilink = WikiLinks(rm).create(1, 'Title', 'home')
mock_new_resource.assert_called_with(
payload={'project': 1, 'title': 'Title', 'href': 'home'}
)
@patch('taiga.requestmaker.RequestMaker.post')
def test_import_wikilink(self, mock_requestmaker_post):
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
wikilink = WikiLinks(rm).import_(1, 'Title', 'home')
mock_requestmaker_post.assert_called_with(
'/{endpoint}/{id}/{type}', endpoint='importer', payload={'project': 1,
'href': 'home',
'title': 'Title'},
id=1, type='wiki_link'
)
|
{
"content_hash": "db2a58553da09f160567e6122735fa29",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 87,
"avg_line_length": 43.44444444444444,
"alnum_prop": 0.5677749360613811,
"repo_name": "mlq/python-taiga",
"id": "c8f91b6516c51ecc3fc9fca6ce4d47d9be4fe616",
"size": "1173",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_wikilinks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "144381"
},
{
"name": "Shell",
"bytes": "168"
}
],
"symlink_target": ""
}
|
"""
WSGI config for djtest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djtest.settings")
application = get_wsgi_application()
|
{
"content_hash": "ea347d5e85d4a3080a0e88fcac582a10",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.3125,
"alnum_prop": 0.7686375321336761,
"repo_name": "Elemnir/presentations",
"id": "cbe40b6d69fa1dcdfa294f873895a83a7f55f7b8",
"size": "389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utk_prog_team_2015_04_09/djtest/djtest/wsgi.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3350"
},
{
"name": "Python",
"bytes": "5971"
},
{
"name": "Shell",
"bytes": "1024"
}
],
"symlink_target": ""
}
|
"""The tests for the Tasmota cover platform."""
import copy
import json
from unittest.mock import patch
from hatasmota.utils import (
get_topic_stat_result,
get_topic_stat_status,
get_topic_tele_sensor,
get_topic_tele_will,
)
import pytest
from homeassistant.components import cover
from homeassistant.components.tasmota.const import DEFAULT_PREFIX
from homeassistant.const import ATTR_ASSUMED_STATE, STATE_UNKNOWN, Platform
from .test_common import (
DEFAULT_CONFIG,
help_test_availability,
help_test_availability_discovery_update,
help_test_availability_poll_state,
help_test_availability_when_connection_lost,
help_test_discovery_device_remove,
help_test_discovery_removal,
help_test_discovery_update_unchanged,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
)
from tests.common import async_fire_mqtt_message
COVER_SUPPORT = (
cover.SUPPORT_OPEN
| cover.SUPPORT_CLOSE
| cover.SUPPORT_STOP
| cover.SUPPORT_SET_POSITION
)
TILT_SUPPORT = (
cover.SUPPORT_OPEN_TILT
| cover.SUPPORT_CLOSE_TILT
| cover.SUPPORT_STOP_TILT
| cover.SUPPORT_SET_TILT_POSITION
)
async def test_missing_relay(hass, mqtt_mock, setup_tasmota):
"""Test no cover is discovered if relays are missing."""
@pytest.mark.parametrize(
"relay_config, num_covers",
[
([3, 3, 3, 3, 3, 3, 1, 1, 3, 3], 4),
([3, 3, 3, 3, 0, 0, 0, 0], 2),
([3, 3, 1, 1, 0, 0, 0, 0], 1),
([3, 3, 3, 1, 0, 0, 0, 0], 0),
],
)
async def test_multiple_covers(
hass, mqtt_mock, setup_tasmota, relay_config, num_covers
):
"""Test discovery of multiple covers."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"] = relay_config
mac = config["mac"]
assert len(hass.states.async_all("cover")) == 0
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
assert len(hass.states.async_all("cover")) == num_covers
async def test_tilt_support(hass, mqtt_mock, setup_tasmota):
"""Test tilt support detection."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"] = [3, 3, 3, 3, 3, 3, 3, 3]
config["sht"] = [
[0, 0, 0], # Default settings, no tilt
[-90, 90, 24], # Tilt configured
[-90, 90, 0], # Duration 0, no tilt
[-90, -90, 24], # min+max same, no tilt
]
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
assert len(hass.states.async_all("cover")) == 4
state = hass.states.get("cover.tasmota_cover_1")
assert state.attributes["supported_features"] == COVER_SUPPORT
state = hass.states.get("cover.tasmota_cover_2")
assert state.attributes["supported_features"] == COVER_SUPPORT | TILT_SUPPORT
state = hass.states.get("cover.tasmota_cover_3")
assert state.attributes["supported_features"] == COVER_SUPPORT
state = hass.states.get("cover.tasmota_cover_4")
assert state.attributes["supported_features"] == COVER_SUPPORT
async def test_controlling_state_via_mqtt_tilt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 3
config["rl"][1] = 3
config["sht"] = [[-90, 90, 24]]
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == STATE_UNKNOWN
assert state.attributes["supported_features"] == COVER_SUPPORT | TILT_SUPPORT
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Periodic updates
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/SENSOR",
'{"Shutter1":{"Position":54,"Direction":-1,"Tilt":-90}}',
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "closing"
assert state.attributes["current_position"] == 54
assert state.attributes["current_tilt_position"] == 0
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/SENSOR",
'{"Shutter1":{"Position":100,"Direction":1,"Tilt":90}}',
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "opening"
assert state.attributes["current_position"] == 100
assert state.attributes["current_tilt_position"] == 100
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/SENSOR",
'{"Shutter1":{"Position":0,"Direction":0,"Tilt":0}}',
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "closed"
assert state.attributes["current_position"] == 0
assert state.attributes["current_tilt_position"] == 50
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/SENSOR", '{"Shutter1":{"Position":1,"Direction":0}}'
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "open"
assert state.attributes["current_position"] == 1
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/SENSOR",
'{"Shutter1":{"Position":100,"Direction":0}}',
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "open"
assert state.attributes["current_position"] == 100
# State poll response
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"Shutter1":{"Position":54,"Direction":-1,"Tilt":-90}}}',
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "closing"
assert state.attributes["current_position"] == 54
assert state.attributes["current_tilt_position"] == 0
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"Shutter1":{"Position":100,"Direction":1,"Tilt":90}}}',
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "opening"
assert state.attributes["current_position"] == 100
assert state.attributes["current_tilt_position"] == 100
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"Shutter1":{"Position":0,"Direction":0,"Tilt":0}}}',
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "closed"
assert state.attributes["current_position"] == 0
assert state.attributes["current_tilt_position"] == 50
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"Shutter1":{"Position":1,"Direction":0}}}',
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "open"
assert state.attributes["current_position"] == 1
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"Shutter1":{"Position":100,"Direction":0}}}',
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "open"
assert state.attributes["current_position"] == 100
# Command response
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/RESULT",
'{"Shutter1":{"Position":54,"Direction":-1,"Tilt":-90}}',
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "closing"
assert state.attributes["current_position"] == 54
assert state.attributes["current_tilt_position"] == 0
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/RESULT",
'{"Shutter1":{"Position":100,"Direction":1,"Tilt":90}}',
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "opening"
assert state.attributes["current_position"] == 100
assert state.attributes["current_tilt_position"] == 100
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/RESULT",
'{"Shutter1":{"Position":0,"Direction":0,"Tilt":0}}',
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "closed"
assert state.attributes["current_position"] == 0
assert state.attributes["current_tilt_position"] == 50
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Shutter1":{"Position":1,"Direction":0}}'
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "open"
assert state.attributes["current_position"] == 1
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/RESULT",
'{"Shutter1":{"Position":100,"Direction":0}}',
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "open"
assert state.attributes["current_position"] == 100
@pytest.mark.parametrize("tilt", ("", ',"Tilt":0'))
async def test_controlling_state_via_mqtt_inverted(
hass, mqtt_mock, setup_tasmota, tilt
):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 3
config["rl"][1] = 3
config["sho"] = [1] # Inverted cover
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == STATE_UNKNOWN
assert state.attributes["supported_features"] == COVER_SUPPORT
# Periodic updates
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/SENSOR",
'{"Shutter1":{"Position":54,"Direction":-1' + tilt + "}}",
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "opening"
assert state.attributes["current_position"] == 46
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/SENSOR",
'{"Shutter1":{"Position":100,"Direction":1' + tilt + "}}",
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "closing"
assert state.attributes["current_position"] == 0
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/SENSOR",
'{"Shutter1":{"Position":0,"Direction":0' + tilt + "}}",
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "open"
assert state.attributes["current_position"] == 100
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/SENSOR",
'{"Shutter1":{"Position":99,"Direction":0' + tilt + "}}",
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "open"
assert state.attributes["current_position"] == 1
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/SENSOR",
'{"Shutter1":{"Position":100,"Direction":0' + tilt + "}}",
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "closed"
assert state.attributes["current_position"] == 0
# State poll response
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"Shutter1":{"Position":54,"Direction":-1' + tilt + "}}}",
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "opening"
assert state.attributes["current_position"] == 46
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"Shutter1":{"Position":100,"Direction":1' + tilt + "}}}",
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "closing"
assert state.attributes["current_position"] == 0
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"Shutter1":{"Position":0,"Direction":0' + tilt + "}}}",
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "open"
assert state.attributes["current_position"] == 100
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"Shutter1":{"Position":99,"Direction":0' + tilt + "}}}",
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "open"
assert state.attributes["current_position"] == 1
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"Shutter1":{"Position":100,"Direction":0' + tilt + "}}}",
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "closed"
assert state.attributes["current_position"] == 0
# Command response
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/RESULT",
'{"Shutter1":{"Position":54,"Direction":-1' + tilt + "}}",
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "opening"
assert state.attributes["current_position"] == 46
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/RESULT",
'{"Shutter1":{"Position":100,"Direction":1' + tilt + "}}",
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "closing"
assert state.attributes["current_position"] == 0
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/RESULT",
'{"Shutter1":{"Position":0,"Direction":0' + tilt + "}}",
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "open"
assert state.attributes["current_position"] == 100
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/RESULT",
'{"Shutter1":{"Position":1,"Direction":0' + tilt + "}}",
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "open"
assert state.attributes["current_position"] == 99
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/RESULT",
'{"Shutter1":{"Position":100,"Direction":0' + tilt + "}}",
)
state = hass.states.get("cover.tasmota_cover_1")
assert state.state == "closed"
assert state.attributes["current_position"] == 0
async def call_service(hass, entity_id, service, **kwargs):
"""Call a fan service."""
await hass.services.async_call(
cover.DOMAIN,
service,
{"entity_id": entity_id, **kwargs},
blocking=True,
)
async def test_sending_mqtt_commands(hass, mqtt_mock, setup_tasmota):
"""Test the sending MQTT commands."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["dn"] = "Test"
config["rl"][0] = 3
config["rl"][1] = 3
config["sht"] = [[-90, 90, 24]]
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("cover.test_cover_1")
assert state.state == STATE_UNKNOWN
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Close the cover and verify MQTT message is sent
await call_service(hass, "cover.test_cover_1", "close_cover")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/ShutterClose1", "", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Tasmota is not optimistic, the state should still be unknown
state = hass.states.get("cover.test_cover_1")
assert state.state == STATE_UNKNOWN
# Open the cover and verify MQTT message is sent
await call_service(hass, "cover.test_cover_1", "open_cover")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/ShutterOpen1", "", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Stop the cover and verify MQTT message is sent
await call_service(hass, "cover.test_cover_1", "stop_cover")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/ShutterStop1", "", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Set position and verify MQTT message is sent
await call_service(hass, "cover.test_cover_1", "set_cover_position", position=0)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/ShutterPosition1", "0", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Set position and verify MQTT message is sent
await call_service(hass, "cover.test_cover_1", "set_cover_position", position=99)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/ShutterPosition1", "99", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Close the cover tilt and verify MQTT message is sent
await call_service(hass, "cover.test_cover_1", "close_cover_tilt")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/ShutterTilt1", "CLOSE", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Open the cover tilt and verify MQTT message is sent
await call_service(hass, "cover.test_cover_1", "open_cover_tilt")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/ShutterTilt1", "OPEN", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Stop the cover tilt and verify MQTT message is sent
await call_service(hass, "cover.test_cover_1", "stop_cover_tilt")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/ShutterStop1", "", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Set tilt position and verify MQTT message is sent
await call_service(
hass, "cover.test_cover_1", "set_cover_tilt_position", tilt_position=0
)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/ShutterTilt1", "-90", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Set tilt position and verify MQTT message is sent
await call_service(
hass, "cover.test_cover_1", "set_cover_tilt_position", tilt_position=100
)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/ShutterTilt1", "90", 0, False
)
mqtt_mock.async_publish.reset_mock()
async def test_sending_mqtt_commands_inverted(hass, mqtt_mock, setup_tasmota):
"""Test the sending MQTT commands."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["dn"] = "Test"
config["rl"][0] = 3
config["rl"][1] = 3
config["sho"] = [1] # Inverted cover
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("cover.test_cover_1")
assert state.state == STATE_UNKNOWN
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Close the cover and verify MQTT message is sent
await call_service(hass, "cover.test_cover_1", "close_cover")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/ShutterClose1", "", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Tasmota is not optimistic, the state should still be unknown
state = hass.states.get("cover.test_cover_1")
assert state.state == STATE_UNKNOWN
# Open the cover and verify MQTT message is sent
await call_service(hass, "cover.test_cover_1", "open_cover")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/ShutterOpen1", "", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Stop the cover and verify MQTT message is sent
await call_service(hass, "cover.test_cover_1", "stop_cover")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/ShutterStop1", "", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Set position and verify MQTT message is sent
await call_service(hass, "cover.test_cover_1", "set_cover_position", position=0)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/ShutterPosition1", "100", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Set position and verify MQTT message is sent
await call_service(hass, "cover.test_cover_1", "set_cover_position", position=99)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/ShutterPosition1", "1", 0, False
)
mqtt_mock.async_publish.reset_mock()
async def test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test availability after MQTT disconnection."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["dn"] = "Test"
config["rl"][0] = 3
config["rl"][1] = 3
await help_test_availability_when_connection_lost(
hass,
mqtt_client_mock,
mqtt_mock,
Platform.COVER,
config,
entity_id="test_cover_1",
)
async def test_availability(hass, mqtt_mock, setup_tasmota):
"""Test availability."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["dn"] = "Test"
config["rl"][0] = 3
config["rl"][1] = 3
await help_test_availability(
hass, mqtt_mock, Platform.COVER, config, entity_id="test_cover_1"
)
async def test_availability_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test availability discovery update."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["dn"] = "Test"
config["rl"][0] = 3
config["rl"][1] = 3
await help_test_availability_discovery_update(
hass, mqtt_mock, Platform.COVER, config, entity_id="test_cover_1"
)
async def test_availability_poll_state(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test polling after MQTT connection (re)established."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 3
config["rl"][1] = 3
poll_topic = "tasmota_49A3BC/cmnd/STATUS"
await help_test_availability_poll_state(
hass, mqtt_client_mock, mqtt_mock, Platform.COVER, config, poll_topic, "10"
)
async def test_discovery_removal_cover(hass, mqtt_mock, caplog, setup_tasmota):
"""Test removal of discovered cover."""
config1 = copy.deepcopy(DEFAULT_CONFIG)
config1["dn"] = "Test"
config1["rl"][0] = 3
config1["rl"][1] = 3
config2 = copy.deepcopy(DEFAULT_CONFIG)
config2["dn"] = "Test"
config2["rl"][0] = 0
config2["rl"][1] = 0
await help_test_discovery_removal(
hass,
mqtt_mock,
caplog,
Platform.COVER,
config1,
config2,
entity_id="test_cover_1",
name="Test cover 1",
)
async def test_discovery_update_unchanged_cover(hass, mqtt_mock, caplog, setup_tasmota):
"""Test update of discovered cover."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["dn"] = "Test"
config["rl"][0] = 3
config["rl"][1] = 3
with patch(
"homeassistant.components.tasmota.cover.TasmotaCover.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass,
mqtt_mock,
caplog,
Platform.COVER,
config,
discovery_update,
entity_id="test_cover_1",
name="Test cover 1",
)
async def test_discovery_device_remove(hass, mqtt_mock, setup_tasmota):
"""Test device registry remove."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["dn"] = "Test"
config["rl"][0] = 3
config["rl"][1] = 3
unique_id = f"{DEFAULT_CONFIG['mac']}_cover_shutter_0"
await help_test_discovery_device_remove(
hass, mqtt_mock, Platform.COVER, unique_id, config
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock, setup_tasmota):
"""Test MQTT subscriptions are managed when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["dn"] = "Test"
config["rl"][0] = 3
config["rl"][1] = 3
topics = [
get_topic_stat_result(config),
get_topic_tele_sensor(config),
get_topic_stat_status(config, 10),
get_topic_tele_will(config),
]
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, Platform.COVER, config, topics, entity_id="test_cover_1"
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test MQTT discovery update when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["dn"] = "Test"
config["rl"][0] = 3
config["rl"][1] = 3
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, Platform.COVER, config, entity_id="test_cover_1"
)
|
{
"content_hash": "c4ee4e7db141b532c2894be2a3ced8f3",
"timestamp": "",
"source": "github",
"line_count": 762,
"max_line_length": 88,
"avg_line_length": 33.25984251968504,
"alnum_prop": 0.6317471590909091,
"repo_name": "mezz64/home-assistant",
"id": "06471e1175768574d1fb8bc41309c1113a66444a",
"size": "25344",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/tasmota/test_cover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
'''
Created on 12/23/2015
@Ronak Shah
'''
from collections import defaultdict
import pandas as pd
import logging
import coloredlogs
# Gives elements at particular index in list
getVar = lambda searchList, ind: [searchList[i] for i in ind]
coloredlogs.install(level='DEBUG')
def ReadRepeatFile(filename, verbose):
if(verbose):
logging.info("iAnnotateSV::AnnotateForRepeatRegion: Reading & Storing Repeat TSV file as dictionary")
# Initialize dictionary of lists
dataDict = defaultdict(list)
with open(filename, 'r') as filecontent:
header = filecontent.readline()
for line in filecontent:
data = line.rstrip('\n').split('\t')
processedData = (data[0].replace('chr', ''))
slicedData = data[1:]
joinedData = '\t'.join(slicedData)
dataDict[processedData].append(joinedData)
return dataDict
def AnnotateRepeatRegion (verbose, count, sv, rrDict):
if(verbose):
logging.info("iAnnotateSV::AnnotateForRepeatRegion: Checking Entry %d in Repeat data", count)
# Initialize List to store repeat annotation
list_svloc1 = []
list_svloc2 = []
# Read SV Data
sv_chr1 = str(sv.loc['chr1'])
sv_pos1 = int(sv.loc['pos1'])
sv_chr2 = str(sv.loc['chr2'])
sv_pos2 = int(sv.loc['pos2'])
# Traverse through Repeat Data Dict
list_loc1 = rrDict.get(sv_chr1, "None") # Get the values for the chromosome
if(list_loc1 != "None"): # Check if there are no keys with a particular chromosome
for loc in list_loc1: # For each location in all values check the overlap
data = loc.split('\t')
rr_pos1 = int(data[0])
rr_pos2 = int(data[1])
if (rr_pos1 <= sv_pos1 <= rr_pos2):
slicedData = data[2:]
joinedData = '-'.join(slicedData)
list_svloc1.append(joinedData)
else:
if(verbose):
logging.info("iAnnotateSV::AnnotateForRepeatRegion: Chromosome %s is not there in the repeat dictionary", sv_chr1)
list_loc2 = rrDict.get(sv_chr2, "None")
if(list_loc2 != "None"):
for loc in list_loc2:
data = loc.split('\t')
rr_pos1 = int(data[0])
rr_pos2 = int(data[1])
if (rr_pos1 <= sv_pos2 <= rr_pos2):
slicedData = data[2:]
joinedData = '-'.join(slicedData)
list_svloc2.append(joinedData)
else:
if(verbose):
logging.info("iAnnotateSV::AnnotateForRepeatRegion: Chromosome %s is not there in the repeat dictionary", sv_chr2)
return (list_svloc1, list_svloc2)
|
{
"content_hash": "d4c260976bf35e890f2ca9a5f4696d66",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 126,
"avg_line_length": 39.61194029850746,
"alnum_prop": 0.6096458176337604,
"repo_name": "rhshah/iAnnotateSV",
"id": "7dd0a14e3fb3f625d70f2bc7262e5c622a210323",
"size": "2654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iAnnotateSV/AnnotateForRepeatRegion.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "95"
},
{
"name": "Python",
"bytes": "90085"
}
],
"symlink_target": ""
}
|
import sys
import getpass
from boliau import cmdlib
from boliau.plugins.gspread import actionlib
def do_insert():
cmd = cmdlib.as_command(actionlib.Upsert(),
require_stdin=True)
cmd.add_argument('spreadsheet', help="spreadsheet name.")
cmd.add_argument('--email', help = "user email")
cmd.add_argument('--worksheet', help="worksheet name.", default='sheet1')
args = cmd.parse_argv()
args.password = getpass.getpass()
print cmd.call(args, stdin=sys.stdin)
|
{
"content_hash": "d86519a136fc9697b62d3de41ea3dcf3",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 77,
"avg_line_length": 34.46666666666667,
"alnum_prop": 0.6634429400386848,
"repo_name": "hychen/boliau",
"id": "f7351859a92efdb3f574e5bd14e14d145e99a4c7",
"size": "517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boliau/plugins/gspread/cmd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "112192"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
from sqlalchemy import and_
from sqlalchemy import delete
from sqlalchemy import exc
from sqlalchemy import exists
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import or_
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.dialects import mysql
from sqlalchemy.engine import default
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_deprecated
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class _DeleteTestBase:
@classmethod
def define_tables(cls, metadata):
Table(
"mytable",
metadata,
Column("myid", Integer),
Column("name", String(30)),
Column("description", String(50)),
)
Table(
"myothertable",
metadata,
Column("otherid", Integer),
Column("othername", String(30)),
)
class DeleteTest(_DeleteTestBase, fixtures.TablesTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_delete_literal_binds(self):
table1 = self.tables.mytable
stmt = table1.delete().where(table1.c.name == "jill")
self.assert_compile(
stmt,
"DELETE FROM mytable WHERE mytable.name = 'jill'",
literal_binds=True,
)
def test_delete(self):
table1 = self.tables.mytable
self.assert_compile(
delete(table1).where(table1.c.myid == 7),
"DELETE FROM mytable WHERE mytable.myid = :myid_1",
)
self.assert_compile(
table1.delete().where(table1.c.myid == 7),
"DELETE FROM mytable WHERE mytable.myid = :myid_1",
)
self.assert_compile(
table1.delete()
.where(table1.c.myid == 7)
.where(table1.c.name == "somename"),
"DELETE FROM mytable "
"WHERE mytable.myid = :myid_1 "
"AND mytable.name = :name_1",
)
def test_where_empty(self):
table1 = self.tables.mytable
with expect_deprecated():
self.assert_compile(
table1.delete().where(and_()), "DELETE FROM mytable"
)
with expect_deprecated():
self.assert_compile(
table1.delete().where(or_()), "DELETE FROM mytable"
)
def test_prefix_with(self):
table1 = self.tables.mytable
stmt = (
table1.delete()
.prefix_with("A", "B", dialect="mysql")
.prefix_with("C", "D")
)
self.assert_compile(stmt, "DELETE C D FROM mytable")
self.assert_compile(
stmt, "DELETE A B C D FROM mytable", dialect=mysql.dialect()
)
def test_alias(self):
table1 = self.tables.mytable
talias1 = table1.alias("t1")
stmt = delete(talias1).where(talias1.c.myid == 7)
self.assert_compile(
stmt, "DELETE FROM mytable AS t1 WHERE t1.myid = :myid_1"
)
def test_non_correlated_select(self):
table1, table2 = self.tables.mytable, self.tables.myothertable
# test a non-correlated WHERE clause
s = select(table2.c.othername).where(table2.c.otherid == 7)
self.assert_compile(
delete(table1).where(table1.c.name == s.scalar_subquery()),
"DELETE FROM mytable "
"WHERE mytable.name = ("
"SELECT myothertable.othername "
"FROM myothertable "
"WHERE myothertable.otherid = :otherid_1"
")",
)
def test_correlated_select(self):
table1, table2 = self.tables.mytable, self.tables.myothertable
# test one that is actually correlated...
s = select(table2.c.othername).where(table2.c.otherid == table1.c.myid)
self.assert_compile(
table1.delete().where(table1.c.name == s.scalar_subquery()),
"DELETE FROM mytable "
"WHERE mytable.name = ("
"SELECT myothertable.othername "
"FROM myothertable "
"WHERE myothertable.otherid = mytable.myid"
")",
)
class DeleteFromCompileTest(
_DeleteTestBase, fixtures.TablesTest, AssertsCompiledSQL
):
# DELETE FROM is also tested by individual dialects since there is no
# consistent syntax. here we use the StrSQLcompiler which has a fake
# syntax.
__dialect__ = "default_enhanced"
def test_delete_extra_froms(self):
table1, table2 = self.tables.mytable, self.tables.myothertable
stmt = table1.delete().where(table1.c.myid == table2.c.otherid)
self.assert_compile(
stmt,
"DELETE FROM mytable , myothertable "
"WHERE mytable.myid = myothertable.otherid",
)
def test_correlation_to_extra(self):
table1, table2 = self.tables.mytable, self.tables.myothertable
stmt = (
table1.delete()
.where(table1.c.myid == table2.c.otherid)
.where(
~exists()
.where(table2.c.otherid == table1.c.myid)
.where(table2.c.othername == "x")
.correlate(table2)
)
)
self.assert_compile(
stmt,
"DELETE FROM mytable , myothertable WHERE mytable.myid = "
"myothertable.otherid AND NOT (EXISTS "
"(SELECT * FROM mytable WHERE myothertable.otherid = "
"mytable.myid AND myothertable.othername = :othername_1))",
)
def test_dont_correlate_to_extra(self):
table1, table2 = self.tables.mytable, self.tables.myothertable
stmt = (
table1.delete()
.where(table1.c.myid == table2.c.otherid)
.where(
~exists()
.where(table2.c.otherid == table1.c.myid)
.where(table2.c.othername == "x")
.correlate()
)
)
self.assert_compile(
stmt,
"DELETE FROM mytable , myothertable WHERE mytable.myid = "
"myothertable.otherid AND NOT (EXISTS "
"(SELECT * FROM myothertable, mytable "
"WHERE myothertable.otherid = "
"mytable.myid AND myothertable.othername = :othername_1))",
)
def test_autocorrelate_error(self):
table1, table2 = self.tables.mytable, self.tables.myothertable
stmt = (
table1.delete()
.where(table1.c.myid == table2.c.otherid)
.where(
~exists()
.where(table2.c.otherid == table1.c.myid)
.where(table2.c.othername == "x")
)
)
assert_raises_message(
exc.InvalidRequestError,
".*returned no FROM clauses due to auto-correlation.*",
stmt.compile,
dialect=default.StrCompileDialect(),
)
class DeleteFromRoundTripTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"mytable",
metadata,
Column("myid", Integer),
Column("name", String(30)),
Column("description", String(50)),
)
Table(
"myothertable",
metadata,
Column("otherid", Integer),
Column("othername", String(30)),
)
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30), nullable=False),
)
Table(
"addresses",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", None, ForeignKey("users.id")),
Column("name", String(30), nullable=False),
Column("email_address", String(50), nullable=False),
)
Table(
"dingalings",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("address_id", None, ForeignKey("addresses.id")),
Column("data", String(30)),
)
Table(
"update_w_default",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("ycol", Integer, key="y"),
Column("data", String(30), onupdate=lambda: "hi"),
)
@classmethod
def fixtures(cls):
return dict(
users=(
("id", "name"),
(7, "jack"),
(8, "ed"),
(9, "fred"),
(10, "chuck"),
),
addresses=(
("id", "user_id", "name", "email_address"),
(1, 7, "x", "jack@bean.com"),
(2, 8, "x", "ed@wood.com"),
(3, 8, "x", "ed@bettyboop.com"),
(4, 8, "x", "ed@lala.com"),
(5, 9, "x", "fred@fred.com"),
),
dingalings=(
("id", "address_id", "data"),
(1, 2, "ding 1/2"),
(2, 5, "ding 2/5"),
),
)
@testing.requires.delete_using
def test_exec_two_table(self, connection):
users, addresses = self.tables.users, self.tables.addresses
dingalings = self.tables.dingalings
connection.execute(dingalings.delete()) # fk violation otherwise
connection.execute(
addresses.delete()
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
)
expected = [
(1, 7, "x", "jack@bean.com"),
(5, 9, "x", "fred@fred.com"),
]
self._assert_table(connection, addresses, expected)
@testing.requires.delete_using
def test_exec_three_table(self, connection):
users = self.tables.users
addresses = self.tables.addresses
dingalings = self.tables.dingalings
connection.execute(
dingalings.delete()
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
.where(addresses.c.id == dingalings.c.address_id)
)
expected = [(2, 5, "ding 2/5")]
self._assert_table(connection, dingalings, expected)
@testing.requires.delete_using
def test_exec_two_table_plus_alias(self, connection):
users, addresses = self.tables.users, self.tables.addresses
dingalings = self.tables.dingalings
connection.execute(dingalings.delete()) # fk violation otherwise
a1 = addresses.alias()
connection.execute(
addresses.delete()
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
.where(a1.c.id == addresses.c.id)
)
expected = [(1, 7, "x", "jack@bean.com"), (5, 9, "x", "fred@fred.com")]
self._assert_table(connection, addresses, expected)
@testing.requires.delete_using
def test_exec_alias_plus_table(self, connection):
users, addresses = self.tables.users, self.tables.addresses
dingalings = self.tables.dingalings
d1 = dingalings.alias()
connection.execute(
delete(d1)
.where(users.c.id == addresses.c.user_id)
.where(users.c.name == "ed")
.where(addresses.c.id == d1.c.address_id)
)
expected = [(2, 5, "ding 2/5")]
self._assert_table(connection, dingalings, expected)
def _assert_table(self, connection, table, expected):
stmt = table.select().order_by(table.c.id)
eq_(connection.execute(stmt).fetchall(), expected)
|
{
"content_hash": "7492f203672bc91778e9c2fbb07e9fe2",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 79,
"avg_line_length": 31.796833773087073,
"alnum_prop": 0.5450170110364285,
"repo_name": "sqlalchemy/sqlalchemy",
"id": "5b7e5ebbe3a9ab082521b24bae8c781f9279ade8",
"size": "12051",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "test/sql/test_delete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "21698"
},
{
"name": "Python",
"bytes": "16838583"
}
],
"symlink_target": ""
}
|
from .._abstract.abstract import BaseSecurityHandler, BaseAGSServer
from ..security.security import AGSTokenSecurityHandler
import json, types
########################################################################
class MobileServiceLayer(BaseAGSServer):
"""
Represents a single mobile service layer
"""
_url = None
_proxy_url = None
_proxy_port = None
_securityHandler = None
_json = None
_json_dict = None
_display = None
_drawingInfo = None
_extent = None
_canModifyLayer = None
_advancedQueryCapabilities = None
_hasLabels = None
_supportsAdvancedQueries = None
_id = None
_currentVersion = None
_geometryType = None
_ownershipBasedAccessControlForFeatures = None
_type = None
_useStandardizedQueries = None
_supportedQueryFormats = None
_maxRecordCount = None
_description = None
_defaultVisibility = None
_typeIdField = None
_displayField = None
_name = None
_supportsStatistics = None
_hasAttachments = None
_fields = None
_maxScale = None
_copyrightText = None
_canScaleSymbols = None
_minScale = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler=None,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
self._url = url
self._securityHandler = securityHandler
if self._securityHandler is not None:
self._referer_url = self._securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" inializes the properties """
params = {
"f" : "json",
}
json_dict = self._do_get(self._url, params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json_dict = json_dict
self._json = json.dumps(self._json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, v)
else:
print k, " - attribute not implemented for Mobile Service Layer."
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""
returns key/value pair
"""
attributes = json.loads(str(self))
for att in attributes.keys():
yield (att, getattr(self, att))
#----------------------------------------------------------------------
@property
def drawingInfo(self):
"""gets the services drawing information"""
if self._drawingInfo is None:
self.__init()
return self._drawingInfo
#----------------------------------------------------------------------
@property
def extent(self):
"""returns the service layer extent"""
if self._extent is None:
self.__init()
return self._extent
#----------------------------------------------------------------------
@property
def canModifyLayer(self):
"""returns value for can modify layer"""
if self._canModifyLayer is None:
self.__init()
return self._canModifyLayer
#----------------------------------------------------------------------
@property
def advancedQueryCapabilities(self):
"""gets the advancedQueryCapabilities value"""
if self._advancedQueryCapabilities is None:
self.__init()
return self._advancedQueryCapabilities
#----------------------------------------------------------------------
@property
def hasLabels(self):
"""returns the has labels value"""
if self._hasLabels is None:
self.__init()
return self._hasLabels
#----------------------------------------------------------------------
@property
def supportsAdvancedQueries(self):
"""returns the supportsAdvancedQueries value"""
if self._supportsAdvancedQueries is None:
self.__init()
return self._supportsAdvancedQueries
#----------------------------------------------------------------------
@property
def id(self):
"""returns the layers' id"""
if self._id is None:
self.__init()
return self._id
#----------------------------------------------------------------------
@property
def currentVersion(self):
"""gets the layers current version"""
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def geometryType(self):
"""retusn the layers geometry type"""
if self._geometryType is None:
self.__init()
return self._geometryType
#----------------------------------------------------------------------
@property
def ownershipBasedAccessControlForFeatures(self):
"""returns the ownershipBasedAccessControlForFeatures value"""
if self._ownershipBasedAccessControlForFeatures is None:
self.__init()
return self._ownershipBasedAccessControlForFeatures
#----------------------------------------------------------------------
@property
def type(self):
"""gets the layer type"""
if self._type is None:
self.__init()
return self._type
#----------------------------------------------------------------------
@property
def useStandardizedQueries(self):
"""gets the useStandardizedQueries value"""
if self._useStandardizedQueries is None:
self.__init()
return self._useStandardizedQueries
#----------------------------------------------------------------------
@property
def hasAttachments(self):
"""returns if the layer has attachments enabled"""
if self._hasAttachments is None:
self.__init()
return self._hasAttachments
#----------------------------------------------------------------------
@property
def supportedQueryFormats(self):
"""returns the supportedQueryFormats value"""
if self._supportedQueryFormats is None:
self.__init()
return self._supportedQueryFormats
#----------------------------------------------------------------------
@property
def maxRecordCount(self):
"""returns the max record count"""
if self._maxRecordCount is None:
self.__init()
return self._maxRecordCount
#----------------------------------------------------------------------
@property
def description(self):
"""returns the service layer description"""
if self._description is None:
self.__init()
return self._description
#----------------------------------------------------------------------
@property
def defaultVisibility(self):
"""returns the defaultVisibility value"""
if self._defaultVisibility is None:
self.__init()
return self._defaultVisibility
#----------------------------------------------------------------------
@property
def typeIdField(self):
"""returns the type id field"""
if self._typeIdField is None:
self.__init()
return self._typeIdField
#----------------------------------------------------------------------
@property
def displayField(self):
"""returns the display field"""
if self._displayField is None:
self.__init()
return self._display
#----------------------------------------------------------------------
@property
def name(self):
"""returns the layers name"""
if self._name is None:
self.__init()
return self._name
#----------------------------------------------------------------------
@property
def supportsStatistics(self):
"""returns the supports statistics value"""
if self._supportsStatistics is None:
self.__init()
return self._supportsStatistics
#----------------------------------------------------------------------
@property
def fields(self):
"""gets the fields for the layer"""
if self._fields is None:
self.__init()
return self._fields
#----------------------------------------------------------------------
@property
def copyrightText(self):
"""gets the copy right text"""
if self._copyrightText is None:
self.__init()
return self._copyrightText
#----------------------------------------------------------------------
@property
def canScaleSymbols(self):
"""returns the can scale symbols value"""
if self._canScaleSymbols is None:
self.__init()
return self._canScaleSymbols
#----------------------------------------------------------------------
@property
def minScale(self):
"""returns the minScale value"""
if self._minScale is None:
self.__init()
return self._minScale
#----------------------------------------------------------------------
@property
def maxScale(self):
"""gets the max scale for the layer"""
if self._maxScale is None:
self.__init()
return self._maxScale
########################################################################
class MobileService(BaseAGSServer):
"""
Represents a single globe layer
"""
_url = None
_proxy_url = None
_proxy_port = None
_securityHandler = None
_json = None
_json_dict = None
_layers = None
_description = None
_initialExtent = None
_spatialReference = None
_mapName = None
_currentVersion = None
_units = None
_fullExtent = None
_serviceDescription = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler=None,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
self._url = url
self._securityHandler = securityHandler
if self._securityHandler is not None:
self._referer_url = self._securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" inializes the properties """
params = {
"f" : "json",
}
json_dict = self._do_get(self._url, params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json_dict = json_dict
self._json = json.dumps(self._json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, v)
else:
print k, " - attribute not implemented for Mobile Service."
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""
returns key/value pair
"""
attributes = json.loads(str(self))
for att in attributes.keys():
yield [att, getattr(self, att)]
#----------------------------------------------------------------------
@property
def layers(self):
"""gets the service layers"""
if self._layers is None:
self.__init()
lyrs = []
for lyr in self._layers:
url = self._url + "/%s" % lyr['id']
lyr['object'] = MobileServiceLayer(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=False)
return self._layers
#----------------------------------------------------------------------
@property
def description(self):
"""gets the service description"""
if self._description is None:
self.__init()
return self._description
#----------------------------------------------------------------------
@property
def initialExtent(self):
"""gets the service initial extent"""
if self._initialExtent is None:
self.__init()
return self._initialExtent
#----------------------------------------------------------------------
@property
def spatialReference(self):
"""gets the spatial reference"""
if self._spatialReference is None:
self.__init()
return self._spatialReference
#----------------------------------------------------------------------
@property
def mapName(self):
"""gets the map name"""
if self._mapName is None:
self._mapName
return self._mapName
#----------------------------------------------------------------------
@property
def currentVersion(self):
"""gets the current version"""
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def units(self):
"""gets the units for the service"""
if self._units is None:
self.__init()
return self._units
#----------------------------------------------------------------------
@property
def fullExtent(self):
"""returns the service full extent"""
if self._fullExtent is None:
self.__init()
return self._fullExtent
#----------------------------------------------------------------------
@property
def serviceDescription(self):
"""returns the service description"""
if self._serviceDescription is None:
self.__init()
return self._serviceDescription
|
{
"content_hash": "3b55a8a7e0f00797c274d1f608faff38",
"timestamp": "",
"source": "github",
"line_count": 413,
"max_line_length": 85,
"avg_line_length": 37.191283292978206,
"alnum_prop": 0.4329427083333333,
"repo_name": "conklinbd/MovementAnalysis",
"id": "278a602781af467466646b82fa93e7bea4b0e113",
"size": "15360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TemplateInstall/PortalDeploy/arcrest/ags/_mobileservice.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "156"
},
{
"name": "Python",
"bytes": "1855353"
}
],
"symlink_target": ""
}
|
from servee import frontendadmin
from servee.frontendadmin.insert import ModelInsert
from sample_image_insert.models import Image
class ImageInsert(ModelInsert):
model = Image
frontendadmin.site.register_insert(ImageInsert)
|
{
"content_hash": "8bd1d0abb668878ebfb1d8e302bcd08e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 51,
"avg_line_length": 28.625,
"alnum_prop": 0.8384279475982532,
"repo_name": "servee/servee",
"id": "2f6eb21dd333b32325f26abb8a9fb7045b605634",
"size": "229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example_project/sample_image_insert/servee_registry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "68505"
},
{
"name": "HTML",
"bytes": "46896"
},
{
"name": "JavaScript",
"bytes": "5602"
},
{
"name": "Python",
"bytes": "56014"
}
],
"symlink_target": ""
}
|
from typing import Optional
from functools import partial
from ConfigSpace import NotEqualsCondition
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import (
CategoricalHyperparameter,
UniformFloatHyperparameter,
)
from autosklearn.askl_typing import FEAT_TYPE_TYPE
from autosklearn.pipeline.components.base import AutoSklearnPreprocessingAlgorithm
from autosklearn.pipeline.constants import (
DENSE,
INPUT,
SIGNED_DATA,
SPARSE,
UNSIGNED_DATA,
)
class SelectClassificationRates(AutoSklearnPreprocessingAlgorithm):
def __init__(self, alpha, mode="fpr", score_func="chi2", random_state=None):
import sklearn.feature_selection
self.random_state = random_state # We don't use this
self.alpha = alpha
self.mode = mode
if score_func == "chi2":
self.score_func = sklearn.feature_selection.chi2
elif score_func == "f_classif":
self.score_func = sklearn.feature_selection.f_classif
elif score_func == "mutual_info_classif":
self.score_func = partial(
sklearn.feature_selection.mutual_info_classif,
random_state=self.random_state,
)
# mutual info classif constantly crashes without mode percentile
self.mode = "percentile"
else:
raise ValueError(
"score_func must be in ('chi2, 'f_classif', 'mutual_info_classif') "
"for classification "
"but is: %s " % (score_func)
)
def fit(self, X, y):
import scipy.sparse
import sklearn.feature_selection
self.alpha = float(self.alpha)
self.preprocessor = sklearn.feature_selection.GenericUnivariateSelect(
score_func=self.score_func, param=self.alpha, mode=self.mode
)
# Because the pipeline guarantees that each feature is positive,
# clip all values below zero to zero
if self.score_func == sklearn.feature_selection.chi2:
if scipy.sparse.issparse(X):
X.data[X.data < 0] = 0.0
else:
X[X < 0] = 0.0
self.preprocessor.fit(X, y)
return self
def transform(self, X):
import scipy.sparse
import sklearn.feature_selection
# Because the pipeline guarantees that each feature is positive,
# clip all values below zero to zero
if self.score_func == sklearn.feature_selection.chi2:
if scipy.sparse.issparse(X):
X.data[X.data < 0] = 0.0
else:
X[X < 0] = 0.0
if self.preprocessor is None:
raise NotImplementedError()
try:
Xt = self.preprocessor.transform(X)
except ValueError as e:
if (
"zero-size array to reduction operation maximum which has no "
"identity" in e.message
):
raise ValueError("%s removed all features." % self.__class__.__name__)
else:
raise e
if Xt.shape[1] == 0:
raise ValueError("%s removed all features." % self.__class__.__name__)
return Xt
@staticmethod
def get_properties(dataset_properties=None):
data_type = UNSIGNED_DATA
if dataset_properties is not None:
signed = dataset_properties.get("signed")
if signed is not None:
data_type = SIGNED_DATA if signed is True else UNSIGNED_DATA
return {
"shortname": "SR",
"name": "Univariate Feature Selection based on rates",
"handles_regression": False,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": False,
"handles_multioutput": False,
"is_deterministic": True,
"input": (SPARSE, DENSE, data_type),
"output": (INPUT,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
alpha = UniformFloatHyperparameter(
name="alpha", lower=0.01, upper=0.5, default_value=0.1
)
if dataset_properties is not None and dataset_properties.get("sparse"):
choices = ["chi2", "mutual_info_classif"]
else:
choices = ["chi2", "f_classif", "mutual_info_classif"]
score_func = CategoricalHyperparameter(
name="score_func", choices=choices, default_value="chi2"
)
mode = CategoricalHyperparameter("mode", ["fpr", "fdr", "fwe"], "fpr")
cs = ConfigurationSpace()
cs.add_hyperparameter(alpha)
cs.add_hyperparameter(score_func)
cs.add_hyperparameter(mode)
# mutual_info_classif constantly crashes if mode is not percentile
# as a WA, fix the mode for this score
cond = NotEqualsCondition(mode, score_func, "mutual_info_classif")
cs.add_condition(cond)
return cs
|
{
"content_hash": "0c78b486e321b40dc86b567382d0993a",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 86,
"avg_line_length": 33.986666666666665,
"alnum_prop": 0.5961161239701844,
"repo_name": "automl/auto-sklearn",
"id": "3a728d753ec21d45e7105c41dbbf63cf2808e611",
"size": "5098",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "autosklearn/pipeline/components/feature_preprocessing/select_rates_classification.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "950"
},
{
"name": "Makefile",
"bytes": "3513"
},
{
"name": "Python",
"bytes": "2008151"
},
{
"name": "Shell",
"bytes": "4744"
}
],
"symlink_target": ""
}
|
import atexit
from collections import deque
from functools import partial
import logging
import os
import socket
import sys
from threading import Lock, Thread
import time
import weakref
from six.moves import range
try:
from weakref import WeakSet
except ImportError:
from cassandra.util import WeakSet # noqa
import asyncore
try:
import ssl
except ImportError:
ssl = None # NOQA
from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING, Timer, TimerManager
log = logging.getLogger(__name__)
_dispatcher_map = {}
def _cleanup(loop_weakref):
try:
loop = loop_weakref()
except ReferenceError:
return
loop._cleanup()
class _PipeWrapper(object):
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
def close(self):
os.close(self.fd)
def getsockopt(self, level, optname, buflen=None):
# act like an unerrored socket for the asyncore error handling
if level == socket.SOL_SOCKET and optname == socket.SO_ERROR and not buflen:
return 0
raise NotImplementedError()
class _AsyncoreDispatcher(asyncore.dispatcher):
def __init__(self, socket):
asyncore.dispatcher.__init__(self, map=_dispatcher_map)
# inject after to avoid base class validation
self.set_socket(socket)
self._notified = False
def writable(self):
return False
def validate(self):
assert not self._notified
self.notify_loop()
assert self._notified
self.loop(0.1)
assert not self._notified
def loop(self, timeout):
asyncore.loop(timeout=timeout, use_poll=True, map=_dispatcher_map, count=1)
class _AsyncorePipeDispatcher(_AsyncoreDispatcher):
def __init__(self):
self.read_fd, self.write_fd = os.pipe()
_AsyncoreDispatcher.__init__(self, _PipeWrapper(self.read_fd))
def writable(self):
return False
def handle_read(self):
while len(os.read(self.read_fd, 4096)) == 4096:
pass
self._notified = False
def notify_loop(self):
if not self._notified:
self._notified = True
os.write(self.write_fd, b'x')
class _AsyncoreUDPDispatcher(_AsyncoreDispatcher):
"""
Experimental alternate dispatcher for avoiding busy wait in the asyncore loop. It is not used by default because
it relies on local port binding.
Port scanning is not implemented, so multiple clients on one host will collide. This address would need to be set per
instance, or this could be specialized to scan until an address is found.
To use::
from cassandra.io.asyncorereactor import _AsyncoreUDPDispatcher, AsyncoreLoop
AsyncoreLoop._loop_dispatch_class = _AsyncoreUDPDispatcher
"""
bind_address = ('localhost', 10000)
def __init__(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.bind(self.bind_address)
self._socket.setblocking(0)
_AsyncoreDispatcher.__init__(self, self._socket)
def handle_read(self):
try:
d = self._socket.recvfrom(1)
while d and d[1]:
d = self._socket.recvfrom(1)
except socket.error as e:
pass
self._notified = False
def notify_loop(self):
if not self._notified:
self._notified = True
self._socket.sendto(b'', self.bind_address)
def loop(self, timeout):
asyncore.loop(timeout=timeout, use_poll=False, map=_dispatcher_map, count=1)
class _BusyWaitDispatcher(object):
max_write_latency = 0.001
"""
Timeout pushed down to asyncore select/poll. Dictates the amount of time it will sleep before coming back to check
if anything is writable.
"""
def notify_loop(self):
pass
def loop(self, timeout):
if not _dispatcher_map:
time.sleep(0.005)
count = timeout // self.max_write_latency
asyncore.loop(timeout=self.max_write_latency, use_poll=True, map=_dispatcher_map, count=count)
def validate(self):
pass
def close(self):
pass
class AsyncoreLoop(object):
timer_resolution = 0.1 # used as the max interval to be in the io loop before returning to service timeouts
_loop_dispatch_class = _AsyncorePipeDispatcher if os.name != 'nt' else _BusyWaitDispatcher
def __init__(self):
self._pid = os.getpid()
self._loop_lock = Lock()
self._started = False
self._shutdown = False
self._thread = None
self._timers = TimerManager()
try:
dispatcher = self._loop_dispatch_class()
dispatcher.validate()
log.debug("Validated loop dispatch with %s", self._loop_dispatch_class)
except Exception:
log.exception("Failed validating loop dispatch with %s. Using busy wait execution instead.", self._loop_dispatch_class)
dispatcher.close()
dispatcher = _BusyWaitDispatcher()
self._loop_dispatcher = dispatcher
atexit.register(partial(_cleanup, weakref.ref(self)))
def maybe_start(self):
should_start = False
did_acquire = False
try:
did_acquire = self._loop_lock.acquire(False)
if did_acquire and not self._started:
self._started = True
should_start = True
finally:
if did_acquire:
self._loop_lock.release()
if should_start:
self._thread = Thread(target=self._run_loop, name="cassandra_driver_event_loop")
self._thread.daemon = True
self._thread.start()
def wake_loop(self):
self._loop_dispatcher.notify_loop()
def _run_loop(self):
log.debug("Starting asyncore event loop")
with self._loop_lock:
while not self._shutdown:
try:
self._loop_dispatcher.loop(self.timer_resolution)
self._timers.service_timeouts()
except Exception:
log.debug("Asyncore event loop stopped unexepectedly", exc_info=True)
break
self._started = False
log.debug("Asyncore event loop ended")
def add_timer(self, timer):
self._timers.add_timer(timer)
def _cleanup(self):
self._shutdown = True
if not self._thread:
return
log.debug("Waiting for event loop thread to join...")
self._thread.join(timeout=1.0)
if self._thread.is_alive():
log.warning(
"Event loop thread could not be joined, so shutdown may not be clean. "
"Please call Cluster.shutdown() to avoid this.")
log.debug("Event loop thread was joined")
class AsyncoreConnection(Connection, asyncore.dispatcher):
"""
An implementation of :class:`.Connection` that uses the ``asyncore``
module in the Python standard library for its event loop.
"""
_loop = None
_writable = False
_readable = False
@classmethod
def initialize_reactor(cls):
if not cls._loop:
cls._loop = AsyncoreLoop()
else:
current_pid = os.getpid()
if cls._loop._pid != current_pid:
log.debug("Detected fork, clearing and reinitializing reactor state")
cls.handle_fork()
cls._loop = AsyncoreLoop()
@classmethod
def handle_fork(cls):
global _dispatcher_map
_dispatcher_map = {}
if cls._loop:
cls._loop._cleanup()
cls._loop = None
@classmethod
def create_timer(cls, timeout, callback):
timer = Timer(timeout, callback)
cls._loop.add_timer(timer)
return timer
def __init__(self, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
self.deque = deque()
self.deque_lock = Lock()
self._connect_socket()
asyncore.dispatcher.__init__(self, self._socket, _dispatcher_map)
self._writable = True
self._readable = True
self._send_options_message()
# start the event loop if needed
self._loop.maybe_start()
def close(self):
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s", id(self), self.host)
self._writable = False
self._readable = False
asyncore.dispatcher.close(self)
log.debug("Closed socket to %s", self.host)
if not self.is_defunct:
self.error_all_requests(
ConnectionShutdown("Connection to %s was closed" % self.host))
#This happens when the connection is shutdown while waiting for the ReadyMessage
if not self.connected_event.is_set():
self.last_error = ConnectionShutdown("Connection to %s was closed" % self.host)
# don't leave in-progress operations hanging
self.connected_event.set()
def handle_error(self):
self.defunct(sys.exc_info()[1])
def handle_close(self):
log.debug("Connection %s closed by server", self)
self.close()
def handle_write(self):
while True:
with self.deque_lock:
try:
next_msg = self.deque.popleft()
except IndexError:
self._writable = False
return
try:
sent = self.send(next_msg)
self._readable = True
except socket.error as err:
if (err.args[0] in NONBLOCKING):
with self.deque_lock:
self.deque.appendleft(next_msg)
else:
self.defunct(err)
return
else:
if sent < len(next_msg):
with self.deque_lock:
self.deque.appendleft(next_msg[sent:])
if sent == 0:
return
def handle_read(self):
try:
while True:
buf = self.recv(self.in_buffer_size)
self._iobuf.write(buf)
if len(buf) < self.in_buffer_size:
break
except socket.error as err:
if ssl and isinstance(err, ssl.SSLError):
if err.args[0] not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
self.defunct(err)
return
elif err.args[0] not in NONBLOCKING:
self.defunct(err)
return
if self._iobuf.tell():
self.process_io_buffer()
if not self._requests and not self.is_control_connection:
self._readable = False
def push(self, data):
sabs = self.out_buffer_size
if len(data) > sabs:
chunks = []
for i in range(0, len(data), sabs):
chunks.append(data[i:i + sabs])
else:
chunks = [data]
with self.deque_lock:
self.deque.extend(chunks)
self._writable = True
self._loop.wake_loop()
def writable(self):
return self._writable
def readable(self):
return self._readable or (self.is_control_connection and not (self.is_defunct or self.is_closed))
|
{
"content_hash": "473be404c39d03bc99f352f87eb4c2cb",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 131,
"avg_line_length": 29.45408163265306,
"alnum_prop": 0.5788151740862636,
"repo_name": "Ahmad31/Web_Flask_Cassandra",
"id": "9fd0d15a4ec72273933e155d66c14cf4adcd34be",
"size": "12125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask/lib/python2.7/site-packages/cassandra/io/asyncorereactor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "34860"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "HTML",
"bytes": "86875"
},
{
"name": "JavaScript",
"bytes": "7232"
},
{
"name": "Jupyter Notebook",
"bytes": "181"
},
{
"name": "Python",
"bytes": "12265503"
},
{
"name": "Shell",
"bytes": "3248"
}
],
"symlink_target": ""
}
|
import luigi
import logging
import numpy as np
from concurrent import futures
import os
import vigra
from pipelineParameter import PipelineParameter
from multicutProblemTasks import MulticutProblem
from blockwiseMulticutTasks import BlockwiseMulticutSolver
from nifty_helper import nifty_lmc_objective, run_nifty_lmc, nifty_lmc_fm_factory, nifty_lmc_kl_factory
from customTargets import HDF5DataTarget
from tools import config_logger, run_decorator, replace_from_dict
# init the workflow logger
workflow_logger = logging.getLogger(__name__)
config_logger(workflow_logger)
# import the proper nifty version
try:
import nifty
import nifty.graph.rag as nrag
except ImportError:
try:
import nifty_with_cplex as nifty
import nifty_with_cplex.graph.rag as nrag
except ImportError:
import nifty_with_gurobi as nifty
import nifty_with_gurobi.graph.rag as nrag
# TODO criteria for the candidates
class CandidateObjects(luigi.Task):
SegmentationPath = luigi.Parameter()
def requires(self):
pass
# Find large candidadate objects in the segmentation and
# store their bounding box and mask
@run_decorator
def run(self):
pass
def output(self):
pass
class SkeletonsFromCandidates(luigi.Task):
SegmentationPath = luigi.Parameter()
def requires(self):
pass
# Skeletonize the candidate objects
@run_decorator
def run(self):
pass
def output(self):
pass
# features for skeletons
# TODO figure out what we want here and if we actually treat this as features or directly as weights for the lmc
# potential features:
# -> nblast
# -> skeleton graph properties
# -> ???
class SkeletonFeatures(luigi.Task):
# the skeletons, TODO wrap different skeletons via tasks (external skeletons, skeletons from candidates)
Skeletons = luigi.TaskParameter()
def requires(self):
pass
@run_decorator
def run(self):
pass
def output(self):
pass
# CandidateObjects, should be saved in the following format:
# "objects": Array with the ids of objects that should be resolved
# "lifted_uvs": Array with uv-ids corresponding to the lifted edges for each object
# "lifted_costs": Array wih the costs for the lifted edges for each object
class GenerateCandidateObjects(luigi.Task):
#
SegmentationPath = luigi.Parameter()
def requires(self):
pass
@run_decorator
def run(self):
pass
def output(self):
pass
class ResolveCandidates(luigi.Task):
segmentationPath = luigi.Parameter()
fragmentationPath = luigi.Parameter()
fragmentClassifierPath = luigi.Parameter()
weight = luigi.Parameter(default=1.)
numberOfLevels = luigi.Parameter(default=1.)
def requires(self):
mc_problem = MulticutProblem(self.fragmentationPath, self.fragmentClassifierPath)
return {
"mc_problem": mc_problem,
"candidate_objects": GenerateCandidateObjects(self.segmentationPath),
"mc_nodes": BlockwiseMulticutSolver(self.segmentationPath, mc_problem, self.numberOfLevels)
}
@run_decorator
def run(self):
inp = self.input()
local_costs = inp["mc_problem"].read("costs")
local_graph = inp["mc_problem"].read("graph")
mc_nodes = inp["mc_nodes"].read()
candidates = inp["candidate_objects"].read("objects")
# TODO lifted uvs need to be reshaped properly!
lifted_uvs_to_candidates = inp["candidate_objects"].read("lifted_uv_ids")
lifted_costs_to_candidates = inp["candidate_objects"].read("lifted_costs")
def resolve_candidate(i, object_id):
# find the nodes in the fragmentation that belong to this object
fragment_nodes = np.where(mc_nodes == object_id)[0]
lifted_uv_ids = lifted_uvs_to_candidates[i]
lifted_costs = lifted_costs_to_candidates[i]
# get the local edge-ids
inner_edges, outer_edges, subgraph = local_graph.extractSubgraphFromNodes(fragment_nodes)
costs = local_costs[inner_edges]
# this is how the global nodes are mapped to local nodes
# in 'extractSubgraphFromNodes'
# TODO would be nice to also get this from the nifty function directly
global_to_local_nodes = {node: i for i, node in enumerate(fragment_nodes)}
uv_ids = subgraph.uvIds()
lifted_uv_ids = replace_from_dict(lifted_uv_ids, global_to_local_nodes)
# properly weight the local and lifted edges
costs /= len(uv_ids)
lifted_costs /= len(lifted_uv_ids)
lifted_costs *= self.weight
# resolve the object with a lifted multicut
lmc_objective = nifty_lmc_objective(
uv_ids,
lifted_uv_ids,
costs,
lifted_costs
)
return fragment_nodes, run_nifty_lmc(
lmc_objective,
nifty_lmc_fm_factory(
lmc_objective,
nifty_lmc_kl_factory(
lmc_objective,
warmstart=True
),
warmstart=True
)
)
with futures.ThreadPoolExecutor(max_workers=PipelineParameter().nThreads) as tp:
tasks = [tp.submit(resolve_candidate, i, object_id) for i, object_id in enumerate(candidates)]
results = [t.result() for t in tasks]
offset = mc_nodes.max() + 1
resolved_node_result = mc_nodes.copy()
for fragment_nodes, lmc_res in results:
lmc_res += offset
offset = lmc_res.max() + 1
resolved_node_result[fragment_nodes] = lmc_res
resolved_node_result = vigra.analysis.relabelConsecutive(
resolved_node_result,
start_label=0
)
self.output().write(resolved_node_result)
def output(self):
save_path = os.path.join(
PipelineParameter().cache,
"ResolvedSegmentation.h5"
)
return HDF5DataTarget(save_path)
|
{
"content_hash": "9aaf7e0e70f7b08ed0f04b49a1e195cf",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 112,
"avg_line_length": 30.387254901960784,
"alnum_prop": 0.6363929666075173,
"repo_name": "constantinpape/mc_luigi",
"id": "24a3e46473949e8fc79b997578d5971df8c80ffe",
"size": "6260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mc_luigi/skeletonTasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "316528"
},
{
"name": "Shell",
"bytes": "1030"
}
],
"symlink_target": ""
}
|
"""
WSGI config for pyment project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
from django.core.wsgi import get_wsgi_application
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pyment.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
{
"content_hash": "033bb065a65d11553f56a232ee8887a1",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 39.13793103448276,
"alnum_prop": 0.7991189427312775,
"repo_name": "mathuin/pyment",
"id": "528ab72773d57477ab5d366311608100f1f5c9af",
"size": "1135",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "django/pyment/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7253"
},
{
"name": "CSS",
"bytes": "3691"
},
{
"name": "Dockerfile",
"bytes": "325"
},
{
"name": "HTML",
"bytes": "17986"
},
{
"name": "JavaScript",
"bytes": "1995"
},
{
"name": "Makefile",
"bytes": "7418"
},
{
"name": "Python",
"bytes": "239563"
},
{
"name": "Shell",
"bytes": "4610"
}
],
"symlink_target": ""
}
|
from .candlefactory import CandleFactory, granularity_to_time
from .streamrecord import *
|
{
"content_hash": "0ea3a2b3b00caefc11d73328501667c8",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 61,
"avg_line_length": 45,
"alnum_prop": 0.8333333333333334,
"repo_name": "hootnot/oanda-trading-environment",
"id": "11bbb9c57aede0f1ba3a39f7028eca40cf96f71f",
"size": "91",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oanda_trading_environment/stream/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47010"
}
],
"symlink_target": ""
}
|
from car import Car
from belief import BeliefDriveItEnv
from DeepQNetwork import *
from argparse import ArgumentParser
from explorer import ExpEpsilonAnnealingExplorer
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-g', '--gamma', default=0.99, type=float, help='Reward discount factor')
parser.add_argument('-n', '--noop', default=0, type=int, help='Number of steps where the agent takes the default action')
parser.add_argument('-e', '--epoch', default=100, type=int, help='Number of epochs to run')
parser.add_argument('-s', '--steps', default=10000, type=int, help='Number of steps per epoch')
parser.add_argument('-p', '--plot', action='store_true', default=False, help='Flag for enabling Tensorboard')
args = parser.parse_args()
max_steps = args.epoch * args.steps
env = BeliefDriveItEnv(time_limit=180)
agent = DeepQAgent(env.observation_space.shape, env.action_space.n, \
gamma=args.gamma, explorer=ExpEpsilonAnnealingExplorer(1, 0.1, max_steps), monitor=args.plot)
current_step = 0
action = 0
current_state = env.reset()
while current_step < max_steps:
if current_step >= args.noop:
action = agent.act(np.asarray(current_state, dtype='float32'))
new_state, reward, done, _ = env.step(action)
new_state = new_state
# Clipping reward for training stability
reward = np.clip(reward, -1, 1)
agent.observe(current_state, action, reward, new_state, done)
agent.train()
current_state = new_state
if done:
current_state = env.reset()
current_step += 1
agent.save("plop.mod")
|
{
"content_hash": "3f9ab20ac5d2c0a89a51800051115e2b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 125,
"avg_line_length": 37.46666666666667,
"alnum_prop": 0.6577698695136418,
"repo_name": "jcsharp/DriveIt",
"id": "5e8bc942bd0d86f3a7ea7da8548df342285a67a6",
"size": "1726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DQN/train.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "85"
},
{
"name": "Jupyter Notebook",
"bytes": "7074652"
},
{
"name": "Python",
"bytes": "115513"
}
],
"symlink_target": ""
}
|
import sys
sys.path.append('/srv/newsblur')
import os
import datetime
import requests
import settings
import socket
def main():
t = os.popen('stat -c%Y /var/lib/redis/dump.rdb')
timestamp = t.read().split('\n')[0]
modified = datetime.datetime.fromtimestamp(int(timestamp))
ten_min_ago = datetime.datetime.now() - datetime.timedelta(minutes=10)
hostname = socket.gethostname()
modified_minutes = datetime.datetime.now() - modified
log_tail = os.popen('tail -n 100 /var/log/redis.log').read()
if modified < ten_min_ago:
requests.post(
"https://api.mailgun.net/v2/%s/messages" % settings.MAILGUN_SERVER_NAME,
auth=("api", settings.MAILGUN_ACCESS_KEY),
data={"from": "NewsBlur Redis Monitor: %s <admin@%s.newsblur.com>" % (hostname, hostname),
"to": [settings.ADMINS[0][1]],
"subject": "%s hasn't bgsave'd redis in %s!" % (hostname, modified_minutes),
"text": "Last modified %s: %s ago\n\n----\n\n%s" % (hostname, modified_minutes, log_tail)})
else:
print " ---> Redis bgsave fine: %s / %s ago" % (hostname, modified_minutes)
if __name__ == '__main__':
main()
|
{
"content_hash": "373ac3706ed1b8f6de47cf9fb7650236",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 113,
"avg_line_length": 40.064516129032256,
"alnum_prop": 0.5909822866344605,
"repo_name": "youprofit/NewsBlur",
"id": "bf2b376f8c45191dfc4b99b076c52bdd09c9cbe5",
"size": "1265",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "utils/monitor_redis_bgsave.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "678536"
},
{
"name": "CoffeeScript",
"bytes": "6451"
},
{
"name": "HTML",
"bytes": "269852"
},
{
"name": "Java",
"bytes": "710373"
},
{
"name": "JavaScript",
"bytes": "1577082"
},
{
"name": "Nginx",
"bytes": "897"
},
{
"name": "Objective-C",
"bytes": "2667108"
},
{
"name": "Perl",
"bytes": "55598"
},
{
"name": "Python",
"bytes": "2407231"
},
{
"name": "R",
"bytes": "527"
},
{
"name": "Ruby",
"bytes": "870"
},
{
"name": "Shell",
"bytes": "40018"
}
],
"symlink_target": ""
}
|
import hou
import IECore
import IECoreHoudini
import unittest
import os
import shutil
class TestOpHolder( IECoreHoudini.TestCase ):
def testOpHolder(self):
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
op = geo.createNode( "ieOpHolder" )
self.assert_( op )
fn = IECoreHoudini.FnOpHolder( op )
self.assert_( fn )
return (op,fn)
# tests a basic op, the function set and that it cooks as expected
def testSimpleOp(self):
(op, fn) = self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("cobReader", 1)()
fn.setParameterised( cl )
self.assertNotEqual( fn.getParameterised(), None )
self.assertEqual( fn.getParameterised(), cl )
op.parm("parm_filename").set( self.__torusTestFile )
op.cook() # cook using Houdini's cook mechanism, NOT operate()
self.assertEqual( fn.getParameterised()["filename"].getValue(), IECore.StringData( self.__torusTestFile ) )
result = fn.getParameterised().resultParameter().getValue()
self.assertEqual( result, IECore.Reader.create( self.__torusTestFile ).read() )
# tests the alternative 'all in one' opHolder creator
def testAlternateCreator(self):
n = IECoreHoudini.FnOpHolder.create( "noise_deformer", "noiseDeformer", 1 )
self.assert_( n )
fn = IECoreHoudini.FnOpHolder( n )
self.assert_( fn )
op = fn.getParameterised()
self.assert_( op )
self.assertEqual( op.typeName(), "noiseDeformer" )
# tests creation within contexts (simulating from UIs)
def testContextCreator( self ) :
# test generic creation
n = IECoreHoudini.FnOpHolder.create( "vectorMaker", "vectors/V3fVectorCreator" )
self.assertEqual( n.path(), "/obj/vectorMaker/vectorMaker" )
# test contextArgs outside UI mode fallback to generic behaviour
contextArgs = { "toolname" : "ieOpHolder" }
n2 = IECoreHoudini.FnOpHolder.create( "vectorMaker", "vectors/V3fVectorCreator", contextArgs=contextArgs )
self.assertEqual( n2.path(), "/obj/vectorMaker1/vectorMaker" )
# test parent arg
geo = hou.node( "/obj" ).createNode( "geo", run_init_scripts=False )
n3 = IECoreHoudini.FnOpHolder.create( "vectorMaker", "vectors/V3fVectorCreator", parent=geo, contextArgs=contextArgs )
self.assertEqual( n3.path(), "/obj/geo1/vectorMaker" )
# test automatic conversion
contextArgs["shiftclick"] = True
n4 = IECoreHoudini.FnOpHolder.create( "noise", "noiseDeformer", parent=geo, contextArgs=contextArgs )
self.assertEqual( n4.path(), "/obj/geo1/noise" )
self.assertEqual( len(n4.outputConnectors()[0]), 1 )
self.assertEqual( n4.outputConnectors()[0][0].outputNode().type().name(), "ieCortexConverter" )
# test automatic conversion and output connections
mountain = geo.createNode( "mountain" )
contextArgs["outputnodename"] = mountain.path()
n5 = IECoreHoudini.FnOpHolder.create( "noise", "noiseDeformer", parent=geo, contextArgs=contextArgs )
self.assertEqual( n5.path(), "/obj/geo1/noise1" )
self.assertEqual( len(n5.outputConnectors()[0]), 1 )
converter = n5.outputConnectors()[0][0].outputNode()
self.assertEqual( converter.type().name(), "ieCortexConverter" )
self.assertEqual( len(converter.outputConnectors()[0]), 1 )
outputNode = converter.outputConnectors()[0][0].outputNode()
self.assertEqual( outputNode.type().name(), "mountain" )
self.assertEqual( outputNode, mountain )
# test that a C++ op can be assigned using the function set
def testCppOp(self):
(op,fn) = self.testOpHolder()
mesh_normals = IECore.MeshNormalsOp()
self.assert_( mesh_normals )
fn.setParameterised(mesh_normals)
self.assertEqual( fn.getParameterised().typeName(), "MeshNormalsOp" )
# test that we can wire opholders together
def testWireTogether(self):
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
v1_op = geo.createNode( "ieOpHolder", node_name="vector1" )
fn = IECoreHoudini.FnOpHolder( v1_op )
fn.setOp( "vectors/V3fVectorCreator", 1 )
v1_op.parm("parm_size").set(3)
v1_op.parmTuple("parm_value").set( (1,2,3) )
v2_op = geo.createNode( "ieOpHolder", node_name="vector2" )
fn = IECoreHoudini.FnOpHolder( v2_op )
fn.setOp( "vectors/V3fVectorCreator", 1 )
v2_op.parm("parm_size").set(3)
v2_op.parmTuple("parm_value").set( (4,5,6) )
add_op = geo.createNode( "ieOpHolder", node_name="add_vectors" )
fn = IECoreHoudini.FnOpHolder( add_op )
fn.setOp( "vectors/V3fVectorAdder", 1 )
print_op = geo.createNode( "ieOpHolder", node_name="print_values" )
fn = IECoreHoudini.FnOpHolder( print_op )
fn.setOp( "objectDebug", 1 )
print_op.parm("parm_quiet").set(True)
# connect our ops together
add_op.setInput( 0, v1_op )
add_op.setInput( 1, v2_op )
print_op.setInput( 0, add_op )
# cook and check our output
print_op.cook()
fn = IECoreHoudini.FnOpHolder(print_op)
result = fn.getParameterised().resultParameter().getValue()
self.assertEqual( result, IECore.V3fVectorData( [IECore.V3f(5,7,9),IECore.V3f(5,7,9),IECore.V3f(5,7,9)] ) )
# test that a hip with opHolders wired together can be saved and reloaded & still evaluate
def testSaveLoad(self):
hou.hipFile.clear(suppress_save_prompt=True)
save_file = "test/opHolder_testData/opSave_test.hip"
self.testWireTogether()
# save scene
hou.hipFile.save(save_file)
# new scene
hou.hipFile.clear(suppress_save_prompt=True)
# open scene
hou.hipFile.load(save_file)
# check some parameters are ok
self.assertEqual( hou.node("/obj/geo1/vector1").parm("parm_size").eval(), 3 )
self.assertEqual( hou.node("/obj/geo1/vector1").parmTuple("parm_value").eval(), (1,2,3) )
self.assertEqual( hou.node("/obj/geo1/vector2").parm("parm_size").eval(), 3 )
self.assertEqual( hou.node("/obj/geo1/vector2").parmTuple("parm_value").eval(), (4,5,6) )
# check the result of our last opHolder
n = hou.node("/obj/geo1/print_values")
n.cook()
fn = IECoreHoudini.FnOpHolder(n)
result = fn.getParameterised().resultParameter().getValue()
self.assertEqual( result, IECore.V3fVectorData( [IECore.V3f(5,7,9),IECore.V3f(5,7,9),IECore.V3f(5,7,9)] ) )
# tests changing op and inputs
def testChangingOp( self ) :
n = IECoreHoudini.FnOpHolder.create( "test_node", "vectors/V3fVectorCreator", 1 )
fn = IECoreHoudini.FnOpHolder( n )
op = fn.getParameterised()
self.assertEqual( len(n.inputConnectors()), 0 )
fn.setOp( "objectDebug", 1 )
self.assertEqual( len(n.inputConnectors()), 1 )
torus = n.createInputNode( 0, "torus" )
self.assertEqual( torus, n.inputConnections()[0].inputNode() )
self.assertEqual( 0, n.inputConnections()[0].inputIndex() )
fn.setOp( "vectors/V3fVectorAdder", 1 )
self.assertEqual( len(n.inputConnectors()), 2 )
self.assertEqual( torus, n.inputConnections()[0].inputNode() )
self.assertEqual( 0, n.inputConnections()[0].inputIndex() )
box = n.createInputNode( 1, "box" )
self.assertEqual( box, n.inputConnections()[1].inputNode() )
self.assertEqual( 1, n.inputConnections()[1].inputIndex() )
n.setInput( 0, None )
self.assertEqual( len(n.inputConnectors()), 2 )
self.assertEqual( len(n.inputConnections()), 1 )
self.assertEqual( box, n.inputConnections()[0].inputNode() )
self.assertEqual( 1, n.inputConnections()[0].inputIndex() )
fn.setOp( "objectDebug", 1 )
self.assertEqual( len(n.inputConnectors()), 1 )
self.assertEqual( box, n.inputConnections()[0].inputNode() )
self.assertEqual( 0, n.inputConnections()[0].inputIndex() )
fn.setOp( "vectors/V3fVectorCreator", 1 )
self.assertEqual( len(n.inputConnectors()), 0 )
self.assert_( not n.inputConnectors() )
# tests creation of a lot of opHolders
def testLotsQuickly(self):
n = []
for i in range(1000):
n.append( IECoreHoudini.FnOpHolder.create( "noise_deformer", "noiseDeformer", 1 ) )
for _n in n:
_n.destroy()
# test using the noiseDeformer op
def testModifyMesh(self):
(op, fn) = self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("cobReader", 1)()
fn.setParameterised( cl )
op.parm("parm_filename").set( self.__torusNormalsTestFile )
deformer = op.createOutputNode( "ieOpHolder" )
self.assert_( deformer )
cl = IECore.ClassLoader.defaultOpLoader().load("noiseDeformer", 1)()
self.assertEqual( cl.typeName(), "noiseDeformer" )
fn = IECoreHoudini.FnOpHolder( deformer )
fn.setParameterised( cl )
deformer.parm("parm_magnitude").set( 2.5 )
deformer.parmTuple("parm_frequency").set( (1,2,3) )
deformer.cook()
torus = IECore.Reader.create( self.__torusNormalsTestFile ).read()
result = fn.getParameterised().resultParameter().getValue()
self.assertEqual( len(result["P"].data), len(torus["P"].data) )
self.assertEqual( len(result["N"].data), len(torus["N"].data) )
self.assertNotEqual( result["P"], torus["P"] )
self.assertNotEqual( result["N"], torus["N"] )
return ( op, deformer )
# test the bbox on our Sop geometry is set correctly
def testOutputBBox(self):
(op,fn) = self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("cobReader", 1)()
fn.setParameterised( cl )
op.parm("parm_filename").set( self.__torusNormalsTestFile )
op.cook()
geo = op.geometry()
self.assert_( geo )
bbox = geo.boundingBox()
self.failUnless( bbox.isAlmostEqual(hou.BoundingBox(-1.5, -0.475528, -1.42658, 1.5, 0.475528, 1.42658)) )
deformer = op.createOutputNode( "ieOpHolder" )
cl = IECore.ClassLoader.defaultOpLoader().load("noiseDeformer", 1)()
fn = IECoreHoudini.FnOpHolder( deformer )
fn.setParameterised( cl )
self.assertEqual( len(deformer.inputConnectors()), 1 )
deformer.parm("parm_magnitude").set(2)
deformer.cook()
geo2 = deformer.geometry()
self.assert_( geo2 )
bbox2 = geo2.boundingBox()
self.assert_( not bbox2.isAlmostEqual(hou.BoundingBox(-1.5, -0.475528, -1.42658, 1.5, 0.475528, 1.42658)) )
self.failUnless( bbox2.isAlmostEqual(hou.BoundingBox(-1.8938, -1.08025, -1.75561, 1.64279, 1.37116, 1.97013)) )
return ( geo, deformer )
# test an opHolder with 2 primitive inputs
def testMultipleInputs(self):
(geo, deformer) = self.testModifyMesh()
swap = geo.createOutputNode( "ieOpHolder", node_name="swapP" )
cl = IECore.ClassLoader.defaultOpLoader().load("swapAttribute", 1)()
fn = IECoreHoudini.FnOpHolder( swap )
fn.setParameterised( cl )
swap.setInput( 1, deformer )
swap.cook()
src = IECoreHoudini.FnOpHolder( geo ).getParameterised().resultParameter().getValue()
deformer = IECoreHoudini.FnOpHolder( deformer ).getParameterised().resultParameter().getValue()
result = cl.resultParameter().getValue()
self.failUnless( 'P' in result )
self.assertNotEqual( result['P'].data, src['P'].data)
self.assertEqual( result['P'].data, deformer['P'].data)
self.assertEqual( result['N'].data, src['N'].data)
self.assertNotEqual( result['N'].data, deformer['N'].data)
# tests compound parameter support
def testCompoundParameters(self):
(op,fn)=self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("parameters/compoundParameters", 1)()
fn.setParameterised( cl )
# test we have the parameters & folders
num_folders = [ type(p.parmTemplate()).__name__ for p in op.spareParms()].count("FolderSetParmTemplate")
self.assertEqual( num_folders, 4 )
p = op.parm( "parm_compound_1_jy" )
self.assert_( p )
self.assertEqual( p.containingFolders(), ('Parameters', 'My Compound 1') )
p = op.parm( "parm_compound_2_kx" )
self.assert_( p )
self.assertEqual( p.containingFolders(), ('Parameters', 'My Compound 2') )
p = op.parm( "parm_compound_3_compound_4_some_int" )
self.assert_( p )
self.assertEqual( p.containingFolders(), ('Parameters', 'My Compound 3', 'My Compound 4') )
# test that houdini values get set on cortex parameters correctly
p = op.parmTuple( "parm_compound_3_compound_4_some_int" )
p.set( [345] )
self.assertEqual( cl.parameters()["compound_3"]["compound_4"]["some_int"].getValue().value, 123 )
op.cook()
self.assertEqual( cl.parameters()["compound_3"]["compound_4"]["some_int"].getValue().value, 345 )
p = op.parmTuple( "parm_compound_2_j" )
p.set( [123.456, 456.789, 0.0] )
self.assert_( ( cl.parameters()["compound_2"]["j"].getValue().value - IECore.V3d( 8,16,32 ) ).length() < 0.001 )
op.cook()
self.assert_( ( cl.parameters()["compound_2"]["j"].getValue().value - IECore.V3d( 123.456, 456.789, 0 ) ).length() < 0.001 )
# test that caching parameters works
op.parm( "__classReloadButton" ).pressButton()
op.cook()
self.assertEqual( cl.parameters()["compound_3"]["compound_4"]["some_int"].getValue().value, 345 )
self.assert_( ( cl.parameters()["compound_2"]["j"].getValue().value - IECore.V3d( 123.456, 456.789, 0 ) ).length() < 0.001 )
def testObjectParameterConversion(self):
(op,fn)=self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("objectDebug", 1)()
fn.setParameterised( cl )
op.parm("parm_quiet").set( True )
torus = op.createInputNode(0, "torus" )
op.cook()
result = cl.resultParameter().getValue()
self.assertEqual( op.errors(), "" )
self.assertEqual( result.typeId(), IECore.TypeId.MeshPrimitive )
torus.parm("type").set(1)
op.cook()
result = cl.resultParameter().getValue()
self.assertEqual( op.errors(), "" )
self.assertEqual( result.typeId(), IECore.TypeId.PointsPrimitive )
op2 = op.createInputNode(0, "ieOpHolder")
fn2 = IECoreHoudini.FnOpHolder( op2 )
cl = IECore.ClassLoader.defaultOpLoader().load("cobReader", 1)()
fn2.setParameterised(cl)
op2.parm("parm_filename").set( self.__torusTestFile )
op.cook()
result2 = fn.getParameterised().resultParameter().getValue()
self.assertEqual( op.errors(), "" )
self.assertEqual( result2.typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( result2["P"].data, result["P"].data )
def testObjectParameterWithMultipleTypesConversion( self ) :
( op, fn ) = self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load( "multiTypeObject", 1 )()
fn.setParameterised( cl )
torus = op.createInputNode( 0, "torus" )
op.cook()
result = cl.resultParameter().getValue()
self.assert_( not op.errors() )
self.assertEqual( result.typeId(), IECore.TypeId.MeshPrimitive )
torus.parm( "type" ).set( 1 )
op.cook()
result2 = cl.resultParameter().getValue()
self.assert_( not op.errors() )
self.assertEqual( result2.typeId(), IECore.TypeId.PointsPrimitive )
op2 = op.createInputNode( 0, "ieOpHolder" )
fn2 = IECoreHoudini.FnOpHolder( op2 )
cl = IECore.ClassLoader.defaultOpLoader().load( "cobReader", 1 )()
fn2.setParameterised( cl )
op2.parm( "parm_filename" ).set( self.__torusTestFile )
op.cook()
result3 = fn.getParameterised().resultParameter().getValue()
self.assert_( not op.errors() )
self.assertEqual( result3.typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( result3["P"].data, result["P"].data )
cl = IECore.ClassLoader.defaultOpLoader().load( "vectors/V3fVectorAdder", 1 )()
fn2.setParameterised( cl )
fn2.getParameterised().parameters()['vector1'].setValue( result["P"].data )
fn2.getParameterised().parameters()['vector2'].setValue( result["P"].data )
op.cook()
result4 = fn.getParameterised().resultParameter().getValue()
self.assert_( not op.errors() )
self.assertEqual( result4.typeId(), IECore.TypeId.PointsPrimitive )
self.assertEqual( result4["P"].data, result["P"].data + result["P"].data )
def testPrimitiveParameterConversion(self):
(op,fn)=self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("parameters/primitives/primParam", 1)()
fn.setParameterised( cl )
torus = op.createInputNode(0, "torus" )
op.cook()
result = cl.resultParameter().getValue()
self.assertEqual( op.errors(), "" )
self.assertEqual( result.typeId(), IECore.TypeId.MeshPrimitive )
torus.parm("type").set(1)
op.cook()
result = cl.resultParameter().getValue()
self.assertEqual( op.errors(), "" )
self.assertEqual( result.typeId(), IECore.TypeId.PointsPrimitive )
op2 = op.createInputNode(0, "ieOpHolder")
fn = IECoreHoudini.FnOpHolder( op2 )
cl = IECore.ClassLoader.defaultOpLoader().load("cobReader", 1)()
fn.setParameterised(cl)
op2.parm("parm_filename").set( self.__torusTestFile )
op.cook()
result2 = fn.getParameterised().resultParameter().getValue()
self.assertEqual( op.errors(), "" )
self.assertEqual( result2.typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( result2["P"].data, result["P"].data )
def testPointsParameterConversion(self):
(op,fn)=self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("parameters/primitives/pointParam", 1)()
fn.setParameterised( cl )
cob = op.createInputNode(0, "ieOpHolder" )
cl = IECore.ClassLoader.defaultOpLoader().load("cobReader", 1)()
fn2 = IECoreHoudini.FnOpHolder( cob )
fn2.setParameterised( cl )
cob.parm("parm_filename").set( self.__torusTestFile )
self.assertRaises( hou.OperationFailed, op.cook )
self.assertNotEqual( op.errors(), "" )
cob = op.createInputNode(0, "torus" )
op.cook() # should pass because torus will be converted to points
self.assertEqual( fn.getParameterised()['input'].getValue().typeId(), IECore.TypeId.PointsPrimitive )
self.assertEqual( fn.getParameterised().resultParameter().getValue().typeId(), IECore.TypeId.PointsPrimitive )
def testPolygonsParameterConversion(self):
(op,fn)=self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("parameters/primitives/polyParam", 1)()
fn.setParameterised( cl )
cob = op.createInputNode(0, "ieOpHolder" )
cl = IECore.ClassLoader.defaultOpLoader().load("cobReader", 1)()
fn2 = IECoreHoudini.FnOpHolder( cob )
fn2.setParameterised( cl )
cob.parm("parm_filename").set( self.__torusTestFile )
op.cook() # should pass because we have a mesh primitive
torus = op.createInputNode(0, "torus" )
op.cook() # should pass because torus will be converted to mesh
self.assertEqual( fn.getParameterised()['input'].getValue().typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( fn.getParameterised().resultParameter().getValue().typeId(), IECore.TypeId.MeshPrimitive )
op2 = torus.createOutputNode( "ieOpHolder" )
cl = IECore.ClassLoader.defaultOpLoader().load("parameters/primitives/pointParam", 1)()
fn = IECoreHoudini.FnOpHolder( op2 )
fn.setParameterised( cl )
op2.cook()
self.assertEqual( fn.getParameterised().resultParameter().getValue().typeId(), IECore.TypeId.PointsPrimitive )
op.setInput( 0, op2 )
self.assertRaises( hou.OperationFailed, op.cook )
self.assertNotEqual( op.errors(), "" )
def testGroupParameterConversion( self ) :
( holder, fn ) = self.testOpHolder()
fn.setOp( "parameters/groupParam", 1 )
merge = holder.createInputNode( 0, "merge" )
attrib1 = merge.createInputNode( 0, "attribcreate" )
attrib1.parm( "name1" ).set( "name" )
attrib1.parm( "class1" ).set( 1 ) # Prim
attrib1.parm( "type1" ).set( 3 ) # String
attrib1.parm( "string1" ).set( "torusGroup" )
group1 = attrib1.createInputNode( 0, "group" )
group1.parm( "crname" ).set( "torusGroup" )
torus = group1.createInputNode( 0, "torus" )
attrib2 = merge.createInputNode( 1, "attribcreate" )
attrib2.parm( "name1" ).set( "name" )
attrib2.parm( "class1" ).set( 1 ) # Prim
attrib2.parm( "type1" ).set( 3 ) # String
attrib2.parm( "string1" ).set( "boxGroup" )
group2 = attrib2.createInputNode( 0, "group" )
group2.parm( "crname" ).set( "boxGroup" )
box = group2.createInputNode( 0, "box" )
holder.parm( "parm_input_groupingMode" ).set( IECoreHoudini.FromHoudiniGroupConverter.GroupingMode.PrimitiveGroup )
holder.cook()
result = fn.getOp().resultParameter().getValue()
self.assertEqual( fn.getOp()['input'].getValue().typeId(), IECore.TypeId.Group )
self.assertEqual( result.typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( result.blindData()["name"].value, "torusGroup" )
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ), 100 )
group1.bypass( True )
group2.bypass( True )
attrib1.bypass( True )
attrib2.bypass( True )
holder.cook()
result = fn.getOp().resultParameter().getValue()
self.assertEqual( fn.getOp()['input'].getValue().typeId(), IECore.TypeId.Group )
self.assertEqual( result.typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( result.blindData(), IECore.CompoundData() )
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ), 106 )
## \todo: keep the names and convert in PrimitiveGroup mode. see todo in FromHoudiniGroupConverter.cpp
attrib1.bypass( False )
attrib2.bypass( False )
holder.parm( "parm_input_groupingMode" ).set( IECoreHoudini.FromHoudiniGroupConverter.GroupingMode.NameAttribute )
holder.cook()
result = fn.getOp().resultParameter().getValue()
self.assertEqual( fn.getOp()['input'].getValue().typeId(), IECore.TypeId.Group )
self.assertEqual( result.typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( result.blindData()["name"].value, "boxGroup" )
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ), 6 )
def testInputConnectionsSaveLoad( self ) :
hou.hipFile.clear( suppress_save_prompt=True )
( holder, fn ) = self.testOpHolder()
fn.setOp( "parameters/groupParam", 2 )
holderPath = holder.path()
torusPath = holder.createInputNode( 0, "torus" ).path()
boxPath = holder.createInputNode( 1, "box" ).path()
self.assertEqual( len(holder.inputs()), 2 )
self.assertEqual( holder.inputs()[0].path(), torusPath )
self.assertEqual( holder.inputs()[1].path(), boxPath )
hip = "test/opHolder_testData/opSave_test.hip"
hou.hipFile.save( hip )
hou.hipFile.clear( suppress_save_prompt=True )
hou.hipFile.load( hip )
holder = hou.node( holderPath )
self.assertEqual( len(holder.inputs()), 2 )
self.assertEqual( holder.inputs()[0].path(), torusPath )
self.assertEqual( holder.inputs()[1].path(), boxPath )
def testInvalidValidation(self):
(op,fn)=self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("cobReader", 1)()
fn.setParameterised( cl )
op.parm("parm_filename").set( self.__torusTestFile )
op2 = op.createOutputNode( "ieOpHolder" )
cl = IECore.ClassLoader.defaultOpLoader().load("parameters/primitives/pointParam", 1)()
fn = IECoreHoudini.FnOpHolder(op2)
fn.setParameterised(cl)
self.assertRaises( hou.OperationFailed, op2.cook )
self.assertNotEqual( op2.errors(), "" )
def testInvalidOp(self):
(op,fn)=self.testOpHolder()
cl = IECore.ClassLoader.defaultOpLoader().load("noiseDeformer", 1)()
fn.setParameterised( cl )
self.assertRaises( hou.OperationFailed, op.cook )
self.assertNotEqual( op.errors(), "" )
def testMatchString(self):
(op,fn)=self.testOpHolder()
op.parm( "__classMatchString" ).set( "*" )
op.parm( "__className" ).set( "cobReader" )
op.parm( "__className" ).pressButton()
cl = fn.getParameterised()
self.assertEqual( cl.typeName(), "cobReader" )
op.parm( "__classMatchString" ).set( "object*" )
results = fn.classNames()
self.assertEqual( len(fn.classNames()), 1 )
op.parm( "__className" ).set( "cobReader" ) # this still works, should it be invalid?
op.parm( "__className" ).pressButton()
cl = fn.getParameterised()
self.assertEqual( cl.typeName(), "cobReader" )
op.parm( "__classMatchString" ).set("*")
self.failUnless( len(fn.classNames()) > 1 )
def testCategories( self ) :
( op, fn ) = self.testOpHolder()
op.parm( "__classMatchString" ).set( "*" )
self.assertEqual( op.parm( "__classCategory" ).eval(), "" )
op.parm( "__className" ).set( "cobReader" )
self.assertEqual( op.parm( "__classCategory" ).eval(), "" )
op.parm( "__className" ).pressButton()
self.assertEqual( fn.getParameterised().typeName(), "cobReader" )
self.assertEqual( fn.getParameterised().path, "cobReader" )
op.parm( "__className" ).set( "vectors/V3fVectorCreator" )
op.parm( "__className" ).pressButton()
self.assertEqual( op.parm( "__classCategory" ).eval(), "vectors" )
self.assertEqual( fn.getParameterised().typeName(), "V3fVectorCreator" )
self.assertEqual( fn.getParameterised().path, "vectors/V3fVectorCreator" )
op.parm( "__className" ).set( "" )
op.parm( "__className" ).pressButton()
self.assertEqual( op.parm( "__classCategory" ).eval(), "vectors" )
op.parm( "__classCategory" ).set( "" )
op.parm( "__classCategory" ).pressButton()
self.assertRaises( hou.OperationFailed, op.cook )
self.assertEqual( op.parm( "__className" ).eval(), "" )
op.parm( "__className" ).set( "parameters/compoundParameters" )
op.parm( "__className" ).pressButton()
self.assertEqual( op.parm( "__classCategory" ).eval(), "parameters" )
self.assertEqual( fn.getParameterised().typeName(), "compoundParameters" )
self.assertEqual( fn.getParameterised().path, "parameters/compoundParameters" )
op.parm( "__className" ).set( "parameters/primitives/pointParam" )
op.parm( "__className" ).pressButton()
self.assertEqual( op.parm( "__classCategory" ).eval(), "parameters/primitives" )
self.assertEqual( fn.getParameterised().typeName(), "pointParam" )
self.assertEqual( fn.getParameterised().path, "parameters/primitives/pointParam" )
op.parm( "__classCategory" ).set( "" )
op.parm( "__classCategory" ).pressButton()
self.failUnless( len(fn.classNames()) > 4 )
op.parm( "__classMatchString" ).set( "parameters/*" )
self.assertEqual( len(fn.classNames()), 5 )
def testSetOpValues( self ) :
( holder, fn ) = self.testOpHolder()
op = IECore.ClassLoader.defaultOpLoader().load( "noiseDeformer", 1 )()
fn.setOp( op )
self.assertEqual( tuple(op.parameters()['frequency'].defaultValue.value), holder.parmTuple( "parm_frequency" ).parmTemplate().defaultValue() )
self.assertEqual( tuple(op.parameters()['frequency'].defaultValue.value), holder.parmTuple( "parm_frequency" ).eval() )
self.assertEqual( tuple(op.parameters()['frequency'].getTypedValue()), holder.parmTuple( "parm_frequency" ).eval() )
( holder2, fn2 ) = self.testOpHolder()
op.parameters()['frequency'].setTypedValue( IECore.V3f( 0.2, 0.4, 0.6 ) )
fn2.setOp( op )
self.assertEqual( tuple(op.parameters()['frequency'].defaultValue.value), holder2.parmTuple( "parm_frequency" ).parmTemplate().defaultValue() )
self.assertNotEqual( tuple(op.parameters()['frequency'].defaultValue.value), holder2.parmTuple( "parm_frequency" ).eval() )
self.assertEqual( tuple(op.parameters()['frequency'].getTypedValue()), holder2.parmTuple( "parm_frequency" ).eval() )
def testParameterDescriptions( self ) :
( holder, fn ) = self.testOpHolder()
fn.setOp( "parameters/compoundParameters" )
parameters = fn.getOp().parameters()
self.assertEqual( parameters['blah'].description, holder.parm( "parm_blah" ).parmTemplate().help() )
self.assertEqual( parameters['compound_1']['j'].description, holder.parmTuple( "parm_compound_1_j" ).parmTemplate().help() )
self.assertEqual( parameters['compound_1']['k'].description, holder.parmTuple( "parm_compound_1_k" ).parmTemplate().help() )
self.assertEqual( parameters['compound_3']['compound_4']['some_int'].description, holder.parm( "parm_compound_3_compound_4_some_int" ).parmTemplate().help() )
self.assertEqual( parameters['compound_5']['bool_1'].description, holder.parm( "parm_compound_5_bool_1" ).parmTemplate().help() )
def testNumericPresetMenus( self ) :
# at present, Int/FloatParameters only support presetsOnly presets, due to the limitations of hou.MenuParmTemplate
( holder, fn ) = self.testOpHolder()
fn.setOp( "parameters/groupParam", 2 )
parm = holder.parm( "parm_switch" )
self.failUnless( isinstance( parm, hou.Parm ) )
template = parm.parmTemplate()
self.failUnless( isinstance( template, hou.MenuParmTemplate ) )
# the int values are stored as strings in this crazy Houdini world
self.assertEqual( template.menuItems(), ( "20", "30" ) )
self.assertEqual( template.menuLabels(), ( "A", "B" ) )
self.assertEqual( template.defaultValue(), 0 )
self.assertEqual( template.defaultValueAsString(), "20" )
self.assertEqual( parm.eval(), 0 )
self.assertEqual( parm.evalAsString(), "20" )
# but on the op values are really the ints we require
op = fn.getOp()
self.assertEqual( op["switch"].getTypedValue(), 20 )
parm.set( 1 )
holder.cook()
self.assertEqual( op["switch"].getTypedValue(), 30 )
parm.set( 2 )
self.assertRaises( hou.OperationFailed, holder.cook )
parm.set( -1 )
self.assertRaises( hou.OperationFailed, holder.cook )
parm.set( 0 )
holder.cook()
self.failUnless( not holder.errors() )
newHolder = holder.parent().createNode( "ieOpHolder" )
newFn = IECoreHoudini.FnOpHolder( newHolder )
op["switch"].setTypedValue( 30 )
newFn.setOp( op )
newParm = newHolder.parm( "parm_switch" )
self.assertEqual( newParm.eval(), 1 )
self.assertEqual( newParm.evalAsString(), "30" )
def testMessageHandling( self ) :
( holder, fn ) = self.testOpHolder()
fn.setOp( "noiseDeformer" )
self.assertRaises( hou.OperationFailed, holder.cook )
self.failUnless( "Must have primvar 'N' in primitive!" in holder.errors() )
torus = holder.createInputNode( 0, "torus" )
self.assertRaises( hou.OperationFailed, holder.cook )
self.failUnless( "Must have primvar 'N' in primitive!" in holder.errors() )
holder2 = holder.createInputNode( 0, "ieOpHolder" )
fn2 = IECoreHoudini.FnOpHolder( holder2 )
fn2.setOp( "meshNormalsOp" )
holder2.setInput( 0, torus )
holder.cook()
self.assertEqual( holder.errors(), "" )
self.assertEqual( holder2.errors(), "" )
fn2.setOp( "objectDebug", 2 )
self.assertEqual( holder2.errors(), "" )
self.assertEqual( holder2.warnings(), "" )
holder2.parm( "parm_messageLevel" ).set( int(IECore.MessageHandler.Level.Warning) )
holder2.cook()
self.assertEqual( holder2.errors(), "" )
self.assertNotEqual( holder2.warnings(), "" )
holder2.parm( "parm_messageLevel" ).set( int(IECore.MessageHandler.Level.Error) )
self.assertRaises( hou.OperationFailed, holder2.cook )
self.assertNotEqual( holder2.errors(), "" )
self.assertEqual( holder2.warnings(), "" )
def testAnimatedValues( self ) :
noise = IECoreHoudini.FnOpHolder.create( "test", "noiseDeformer", 1 )
fn = IECoreHoudini.FnOpHolder( noise )
noise.parm( "parm_magnitude" ).setExpression( "$FF" )
hou.setFrame( 1 )
self.assertEqual( noise.evalParm( "parm_magnitude" ), 1 )
self.assertEqual( fn.getOp().parameters()["magnitude"].getTypedValue(), 1 )
hou.setFrame( 12.25 )
self.assertEqual( noise.evalParm( "parm_magnitude" ), 12.25 )
# values haven't been flushed yet
self.assertAlmostEqual( fn.getOp().parameters()["magnitude"].getTypedValue(), 1 )
# so we flush them
fn.setParameterisedValues()
self.assertAlmostEqual( fn.getOp().parameters()["magnitude"].getTypedValue(), 12.25 )
def setUp( self ) :
IECoreHoudini.TestCase.setUp( self )
self.__torusTestFile = "test/IECoreHoudini/data/torus.cob"
self.__torusNormalsTestFile = "test/IECoreHoudini/data/torus_with_normals.cob"
if not os.path.exists( "test/opHolder_testData" ):
os.mkdir( "test/opHolder_testData" )
def tearDown( self ) :
if os.path.exists( "test/opHolder_testData" ):
shutil.rmtree( "test/opHolder_testData" )
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "bfd901a7ad749037fea33aa8b550dd5d",
"timestamp": "",
"source": "github",
"line_count": 706,
"max_line_length": 160,
"avg_line_length": 44.259206798866856,
"alnum_prop": 0.6996831695842801,
"repo_name": "Alwnikrotikz/cortex-vfx",
"id": "ca85c3984d9b757eda8ae43a5b8af17cb8dc16a5",
"size": "33156",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/IECoreHoudini/OpHolder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "65905"
},
{
"name": "C++",
"bytes": "10534690"
},
{
"name": "CMake",
"bytes": "14161"
},
{
"name": "GLSL",
"bytes": "31102"
},
{
"name": "Mathematica",
"bytes": "255937"
},
{
"name": "Objective-C",
"bytes": "1859"
},
{
"name": "Python",
"bytes": "4463622"
},
{
"name": "Slash",
"bytes": "7896"
},
{
"name": "Tcl",
"bytes": "1796"
}
],
"symlink_target": ""
}
|
"""A module providing information about the necessity of brackets"""
from sympy import S
from sympy.core.function import _coeff_isneg
# Default precedence values for some basic types
PRECEDENCE = {
"Lambda":1,
"Relational":20,
"Or":20,
"And":30,
"Add":40,
"Mul":50,
"Pow":60,
"Not":100,
"Atom":1000
}
# A dictionary assigning precedence values to certain classes. These values are
# treated like they were inherited, so not every single class has to be named
# here.
PRECEDENCE_VALUES = {
"Or" : PRECEDENCE["Or"],
"And" : PRECEDENCE["And"],
"Add" : PRECEDENCE["Add"],
"Pow" : PRECEDENCE["Pow"],
"Relational" : PRECEDENCE["Relational"],
"Sub" : PRECEDENCE["Add"],
"Not": PRECEDENCE["Not"],
"factorial": PRECEDENCE["Pow"],
"factorial2": PRECEDENCE["Pow"],
"NegativeInfinity": PRECEDENCE["Add"],
}
# Sometimes it's not enough to assign a fixed precedence value to a
# class. Then a function can be inserted in this dictionary that takes
# an instance of this class as argument and returns the appropriate
# precedence value.
# Precedence functions
def precedence_Mul(item):
if _coeff_isneg(item):
return PRECEDENCE["Add"]
return PRECEDENCE["Mul"]
def precedence_Rational(item):
if item.p < 0:
return PRECEDENCE["Add"]
return PRECEDENCE["Mul"]
def precedence_Integer(item):
if item.p < 0:
return PRECEDENCE["Add"]
return PRECEDENCE["Atom"]
def precedence_Float(item):
if item < 0:
return PRECEDENCE["Add"]
return PRECEDENCE["Atom"]
PRECEDENCE_FUNCTIONS = {
"Integer" : precedence_Integer,
"Mul" : precedence_Mul,
"Rational" : precedence_Rational,
"Float": precedence_Float,
}
def precedence(item):
"""
Returns the precedence of a given object.
"""
if hasattr(item, "precedence"):
return item.precedence
try:
mro = item.__class__.__mro__
except AttributeError:
return PRECEDENCE["Atom"]
for i in mro:
n = i.__name__
if n in PRECEDENCE_FUNCTIONS:
return PRECEDENCE_FUNCTIONS[n](item)
elif n in PRECEDENCE_VALUES:
return PRECEDENCE_VALUES[n]
return PRECEDENCE["Atom"]
|
{
"content_hash": "1d1236c3c80fe4adb66bb91ba40c3406",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 79,
"avg_line_length": 26.258823529411764,
"alnum_prop": 0.6420250896057348,
"repo_name": "flacjacket/sympy",
"id": "214136ed262b6bfde3df51d61087b9784f07e391",
"size": "2232",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sympy/printing/precedence.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10293116"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
import contextlib
import os
import re
import socket
import subprocess
import time
from collections import OrderedDict
from tempfile import NamedTemporaryFile, TemporaryDirectory
from typing import Any, Dict, List, Optional, Union
import pandas
import unicodecsv as csv
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.hooks.dbapi import DbApiHook
from airflow.security import utils
from airflow.utils.helpers import as_flattened_list
from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
def get_context_from_env_var() -> Dict[Any, Any]:
"""
Extract context from env variable, e.g. dag_id, task_id and execution_date,
so that they can be used inside BashOperator and PythonOperator.
:return: The context of interest.
"""
return {
format_map['default']: os.environ.get(format_map['env_var_format'], '')
for format_map in AIRFLOW_VAR_NAME_FORMAT_MAPPING.values()
}
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param hive_cli_conn_id: Reference to the
:ref:`Hive CLI connection id <howto/connection:hive_cli>`.
:type hive_cli_conn_id: str
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: str
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: str
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: str
"""
conn_name_attr = 'hive_cli_conn_id'
default_conn_name = 'hive_cli_default'
conn_type = 'hive_cli'
hook_name = 'Hive Client Wrapper'
def __init__(
self,
hive_cli_conn_id: str = default_conn_name,
run_as: Optional[str] = None,
mapred_queue: Optional[str] = None,
mapred_queue_priority: Optional[str] = None,
mapred_job_name: Optional[str] = None,
) -> None:
super().__init__()
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params: str = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline: bool = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
self.sub_process: Any = None
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
f"Invalid Mapred Queue Priority. Valid values are: {', '.join(HIVE_QUEUE_PRIORITIES)}"
)
self.mapred_queue = mapred_queue or conf.get('hive', 'default_hive_mapred_queue')
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _get_proxy_user(self) -> str:
"""This function set the proper proxy_user value in case the user overwrite the default."""
conn = self.conn
proxy_user_value: str = conn.extra_dejson.get('proxy_user', "")
if proxy_user_value == "login" and conn.login:
return f"hive.server2.proxy.user={conn.login}"
if proxy_user_value == "owner" and self.run_as:
return f"hive.server2.proxy.user={self.run_as}"
if proxy_user_value != "": # There is a custom proxy user
return f"hive.server2.proxy.user={proxy_user_value}"
return proxy_user_value # The default proxy user (undefined)
def _prepare_cli_cmd(self) -> List[Any]:
"""This function creates the command list from available information"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = f"jdbc:hive2://{conn.host}:{conn.port}/{conn.schema}"
if conf.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get('principal', "hive/_HOST@EXAMPLE.COM")
if "_HOST" in template:
template = utils.replace_hostname_pattern(utils.get_components(template))
proxy_user = self._get_proxy_user()
jdbc_url += f";principal={template};{proxy_user}"
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = f'"{jdbc_url}"'
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
@staticmethod
def _prepare_hiveconf(d: Dict[Any, Any]) -> List[Any]:
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(zip(["-hiveconf"] * len(d), [f"{k}={v}" for k, v in d.items()]))
def run_cli(
self,
hql: Union[str, str],
schema: Optional[str] = None,
verbose: bool = True,
hive_conf: Optional[Dict[Any, Any]] = None,
) -> Any:
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = f"USE {schema};\n{hql}"
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
hql += '\n'
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
env_context = get_context_from_env_var()
# Only extend the hive_conf if it is defined.
if hive_conf:
env_context.update(hive_conf)
hive_conf_params = self._prepare_hiveconf(env_context)
if self.mapred_queue:
hive_conf_params.extend(
[
'-hiveconf',
f'mapreduce.job.queuename={self.mapred_queue}',
'-hiveconf',
f'mapred.job.queue.name={self.mapred_queue}',
'-hiveconf',
f'tez.queue.name={self.mapred_queue}',
]
)
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf', f'mapreduce.job.priority={self.mapred_queue_priority}']
)
if self.mapred_job_name:
hive_conf_params.extend(['-hiveconf', f'mapred.job.name={self.mapred_job_name}'])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
self.log.info("%s", " ".join(hive_cmd))
sub_process: Any = subprocess.Popen(
hive_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tmp_dir, close_fds=True
)
self.sub_process = sub_process
stdout = ''
while True:
line = sub_process.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
self.log.info(line.decode('UTF-8').strip())
sub_process.wait()
if sub_process.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql: Union[str, str]) -> None:
"""Test an hql statement using the hive cli and EXPLAIN"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ', 'add jar ', 'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other_ = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
self.log.info("Testing HQL [%s (...)]", query_preview)
if query_set == insert:
query = other_ + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
self.log.info(message)
error_loc = re.search(r'(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
lst = int(error_loc.group(1))
begin = max(lst - 2, 0)
end = min(lst + 3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
self.log.info("Context :\n %s", context)
else:
self.log.info("SUCCESS")
def load_df(
self,
df: pandas.DataFrame,
table: str,
field_dict: Optional[Dict[Any, Any]] = None,
delimiter: str = ',',
encoding: str = 'utf8',
pandas_kwargs: Any = None,
**kwargs: Any,
) -> None:
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param df: DataFrame to load into a Hive table
:type df: pandas.DataFrame
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param field_dict: mapping from column name to hive data type.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param delimiter: field delimiter in the file
:type delimiter: str
:param encoding: str encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df: pandas.DataFrame) -> Dict[Any, Any]:
dtype_kind_hive_type = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'M': 'TIMESTAMP', # datetime
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING', # void
}
order_type = OrderedDict()
for col, dtype in df.dtypes.iteritems():
order_type[col] = dtype_kind_hive_type[dtype.kind]
return order_type
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, mode="w") as f:
if field_dict is None:
field_dict = _infer_field_types_from_df(df)
df.to_csv(
path_or_buf=f,
sep=delimiter,
header=False,
index=False,
encoding=encoding,
date_format="%Y-%m-%d %H:%M:%S",
**pandas_kwargs,
)
f.flush()
return self.load_file(
filepath=f.name, table=table, delimiter=delimiter, field_dict=field_dict, **kwargs
)
def load_file(
self,
filepath: str,
table: str,
delimiter: str = ",",
field_dict: Optional[Dict[Any, Any]] = None,
create: bool = True,
overwrite: bool = True,
partition: Optional[Dict[str, Any]] = None,
recreate: bool = False,
tblproperties: Optional[Dict[str, Any]] = None,
) -> None:
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
hql = ''
if recreate:
hql += f"DROP TABLE IF EXISTS {table};\n"
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(f"`{k.strip('`')}` {v}" for k, v in field_dict.items())
hql += f"CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(p + " STRING" for p in partition)
hql += f"PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += f"FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(f"'{k}'='{v}'" for k, v in tblproperties.items())
hql += f"TBLPROPERTIES({tprops})\n"
hql += ";"
self.log.info(hql)
self.run_cli(hql)
hql = f"LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += f"INTO TABLE {table} "
if partition:
pvals = ", ".join(f"{k}='{v}'" for k, v in partition.items())
hql += f"PARTITION ({pvals})"
# As a workaround for HIVE-10541, add a newline character
# at the end of hql (AIRFLOW-2412).
hql += ';\n'
self.log.info(hql)
self.run_cli(hql)
def kill(self) -> None:
"""Kill Hive cli command"""
if hasattr(self, 'sub_process'):
if self.sub_process.poll() is None:
print("Killing the Hive job")
self.sub_process.terminate()
time.sleep(60)
self.sub_process.kill()
class HiveMetastoreHook(BaseHook):
"""
Wrapper to interact with the Hive Metastore
:param metastore_conn_id: reference to the
:ref: `metastore thrift service connection id <howto/connection:hive_metastore>`.
:type metastore_conn_id: str
"""
# java short max val
MAX_PART_COUNT = 32767
conn_name_attr = 'metastore_conn_id'
default_conn_name = 'metastore_default'
conn_type = 'hive_metastore'
hook_name = 'Hive Metastore Thrift'
def __init__(self, metastore_conn_id: str = default_conn_name) -> None:
super().__init__()
self.conn_id = metastore_conn_id
self.metastore = self.get_metastore_client()
def __getstate__(self) -> Dict[str, Any]:
# This is for pickling to work despite the thrift hive client not
# being picklable
state = dict(self.__dict__)
del state['metastore']
return state
def __setstate__(self, d: Dict[str, Any]) -> None:
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self) -> Any:
"""Returns a Hive thrift client."""
import hmsclient
from thrift.protocol import TBinaryProtocol
from thrift.transport import TSocket, TTransport
conn = self._find_valid_server()
if not conn:
raise AirflowException("Failed to locate the valid server.")
auth_mechanism = conn.extra_dejson.get('authMechanism', 'NOSASL')
if conf.get('core', 'security') == 'kerberos':
auth_mechanism = conn.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = conn.extra_dejson.get('kerberos_service_name', 'hive')
conn_socket = TSocket.TSocket(conn.host, conn.port)
if conf.get('core', 'security') == 'kerberos' and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory() -> sasl.Client:
sasl_client = sasl.Client()
sasl_client.setAttr("host", conn.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", conn_socket)
else:
transport = TTransport.TBufferedTransport(conn_socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return hmsclient.HMSClient(iprot=protocol)
def _find_valid_server(self) -> Any:
conn = self.get_connection(self.conn_id)
host_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.log.info("Trying to connect to %s:%s", conn.host, conn.port)
if host_socket.connect_ex((conn.host, conn.port)) == 0:
self.log.info("Connected to %s:%s", conn.host, conn.port)
host_socket.close()
return conn
else:
self.log.error("Could not connect to %s:%s", conn.host, conn.port)
return None
def get_conn(self) -> Any:
return self.metastore
def check_for_partition(self, schema: str, table: str, partition: str) -> bool:
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
with self.metastore as client:
partitions = client.get_partitions_by_filter(schema, table, partition, 1)
return bool(partitions)
def check_for_named_partition(self, schema: str, table: str, partition_name: str) -> Any:
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type table: str
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type table: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
with self.metastore as client:
return client.check_for_named_partition(schema, table, partition_name)
def get_table(self, table_name: str, db: str = 'default') -> Any:
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
with self.metastore as client:
return client.get_table(dbname=db, tbl_name=table_name)
def get_tables(self, db: str, pattern: str = '*') -> Any:
"""Get a metastore table object"""
with self.metastore as client:
tables = client.get_tables(db_name=db, pattern=pattern)
return client.get_table_objects_by_name(db, tables)
def get_databases(self, pattern: str = '*') -> Any:
"""Get a metastore table object"""
with self.metastore as client:
return client.get_databases(pattern)
def get_partitions(
self, schema: str, table_name: str, partition_filter: Optional[str] = None
) -> List[Any]:
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if partition_filter:
parts = client.get_partitions_by_filter(
db_name=schema,
tbl_name=table_name,
filter=partition_filter,
max_parts=HiveMetastoreHook.MAX_PART_COUNT,
)
else:
parts = client.get_partitions(
db_name=schema, tbl_name=table_name, max_parts=HiveMetastoreHook.MAX_PART_COUNT
)
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
@staticmethod
def _get_max_partition_from_part_specs(
part_specs: List[Any], partition_key: Optional[str], filter_map: Optional[Dict[str, Any]]
) -> Any:
"""
Helper method to get max partition of partitions with partition_key
from part specs. key:value pair in filter_map will be used to
filter out partitions.
:param part_specs: list of partition specs.
:type part_specs: list
:param partition_key: partition key name.
:type partition_key: str
:param filter_map: partition_key:partition_value map used for partition filtering,
e.g. {'key1': 'value1', 'key2': 'value2'}.
Only partitions matching all partition_key:partition_value
pairs will be considered as candidates of max partition.
:type filter_map: map
:return: Max partition or None if part_specs is empty.
:rtype: basestring
"""
if not part_specs:
return None
# Assuming all specs have the same keys.
if partition_key not in part_specs[0].keys():
raise AirflowException(f"Provided partition_key {partition_key} is not in part_specs.")
is_subset = None
if filter_map:
is_subset = set(filter_map.keys()).issubset(set(part_specs[0].keys()))
if filter_map and not is_subset:
raise AirflowException(
f"Keys in provided filter_map {', '.join(filter_map.keys())} "
f"are not subset of part_spec keys: {', '.join(part_specs[0].keys())}"
)
candidates = [
p_dict[partition_key]
for p_dict in part_specs
if filter_map is None or all(item in p_dict.items() for item in filter_map.items())
]
if not candidates:
return None
else:
return max(candidates)
def max_partition(
self,
schema: str,
table_name: str,
field: Optional[str] = None,
filter_map: Optional[Dict[Any, Any]] = None,
) -> Any:
"""
Returns the maximum value for all partitions with given field in a table.
If only one partition key exist in the table, the key will be used as field.
filter_map should be a partition_key:partition_value map and will be used to
filter out partitions.
:param schema: schema name.
:type schema: str
:param table_name: table name.
:type table_name: str
:param field: partition key to get max partition from.
:type field: str
:param filter_map: partition_key:partition_value map used for partition filtering.
:type filter_map: map
>>> hh = HiveMetastoreHook()
>>> filter_map = {'ds': '2015-01-01'}
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow',\
... table_name=t, field='ds', filter_map=filter_map)
'2015-01-01'
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
key_name_set = {key.name for key in table.partitionKeys}
if len(table.partitionKeys) == 1:
field = table.partitionKeys[0].name
elif not field:
raise AirflowException("Please specify the field you want the max value for.")
elif field not in key_name_set:
raise AirflowException("Provided field is not a partition key.")
if filter_map and not set(filter_map.keys()).issubset(key_name_set):
raise AirflowException("Provided filter_map contains keys that are not partition key.")
part_names = client.get_partition_names(
schema, table_name, max_parts=HiveMetastoreHook.MAX_PART_COUNT
)
part_specs = [client.partition_name_to_spec(part_name) for part_name in part_names]
return HiveMetastoreHook._get_max_partition_from_part_specs(part_specs, field, filter_map)
def table_exists(self, table_name: str, db: str = 'default') -> bool:
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
self.get_table(table_name, db)
return True
except Exception:
return False
def drop_partitions(self, table_name, part_vals, delete_data=False, db='default'):
"""
Drop partitions from the given table matching the part_vals input
:param table_name: table name.
:type table_name: str
:param part_vals: list of partition specs.
:type part_vals: list
:param delete_data: Setting to control if underlying data have to deleted
in addition to dropping partitions.
:type delete_data: bool
:param db: Name of hive schema (database) @table belongs to
:type db: str
>>> hh = HiveMetastoreHook()
>>> hh.drop_partitions(db='airflow', table_name='static_babynames',
part_vals="['2020-05-01']")
True
"""
if self.table_exists(table_name, db):
with self.metastore as client:
self.log.info(
"Dropping partition of table %s.%s matching the spec: %s", db, table_name, part_vals
)
return client.drop_partition(db, table_name, part_vals, delete_data)
else:
self.log.info("Table %s.%s does not exist!", db, table_name)
return False
class HiveServer2Hook(DbApiHook):
"""
Wrapper around the pyhive library
Notes:
* the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI
* the default for run_set_variable_statements is true, if you
are using impala you may need to set it to false in the
``extra`` of your connection in the UI
:param hiveserver2_conn_id: Reference to the
:ref: `Hive Server2 thrift service connection id <howto/connection:hiveserver2>`.
:type hiveserver2_conn_id: str
:param schema: Hive database name.
:type schema: Optional[str]
"""
conn_name_attr = 'hiveserver2_conn_id'
default_conn_name = 'hiveserver2_default'
conn_type = 'hiveserver2'
hook_name = 'Hive Server 2 Thrift'
supports_autocommit = False
def get_conn(self, schema: Optional[str] = None) -> Any:
"""Returns a Hive connection object."""
username: Optional[str] = None
password: Optional[str] = None
db = self.get_connection(self.hiveserver2_conn_id) # type: ignore
auth_mechanism = db.extra_dejson.get('authMechanism', 'NONE')
if auth_mechanism == 'NONE' and db.login is None:
# we need to give a username
username = 'airflow'
kerberos_service_name = None
if conf.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'KERBEROS')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# pyhive uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'GSSAPI':
self.log.warning(
"Detected deprecated 'GSSAPI' for authMechanism for %s. Please use 'KERBEROS' instead",
self.hiveserver2_conn_id, # type: ignore
)
auth_mechanism = 'KERBEROS'
# Password should be set if and only if in LDAP or CUSTOM mode
if auth_mechanism in ('LDAP', 'CUSTOM'):
password = db.password
from pyhive.hive import connect
return connect(
host=db.host,
port=db.port,
auth=auth_mechanism,
kerberos_service_name=kerberos_service_name,
username=db.login or username,
password=password,
database=schema or db.schema or 'default',
)
def _get_results(
self,
hql: Union[str, str, List[str]],
schema: str = 'default',
fetch_size: Optional[int] = None,
hive_conf: Optional[Dict[Any, Any]] = None,
) -> Any:
from pyhive.exc import ProgrammingError
if isinstance(hql, str):
hql = [hql]
previous_description = None
with contextlib.closing(self.get_conn(schema)) as conn, contextlib.closing(conn.cursor()) as cur:
cur.arraysize = fetch_size or 1000
# not all query services (e.g. impala AIRFLOW-4434) support the set command
db = self.get_connection(self.hiveserver2_conn_id) # type: ignore
if db.extra_dejson.get('run_set_variable_statements', True):
env_context = get_context_from_env_var()
if hive_conf:
env_context.update(hive_conf)
for k, v in env_context.items():
cur.execute(f"set {k}={v}")
for statement in hql:
cur.execute(statement)
# we only get results of statements that returns
lowered_statement = statement.lower().strip()
if (
lowered_statement.startswith('select')
or lowered_statement.startswith('with')
or lowered_statement.startswith('show')
or (lowered_statement.startswith('set') and '=' not in lowered_statement)
):
description = cur.description
if previous_description and previous_description != description:
message = f'''The statements are producing different descriptions:
Current: {repr(description)}
Previous: {repr(previous_description)}'''
raise ValueError(message)
elif not previous_description:
previous_description = description
yield description
try:
# DB API 2 raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
yield from cur
except ProgrammingError:
self.log.debug("get_results returned no records")
def get_results(
self,
hql: Union[str, str],
schema: str = 'default',
fetch_size: Optional[int] = None,
hive_conf: Optional[Dict[Any, Any]] = None,
) -> Dict[str, Any]:
"""
Get results of the provided hql in target schema.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param fetch_size: max size of result to fetch.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: results of hql execution, dict with data (list of results) and header
:rtype: dict
"""
results_iter = self._get_results(hql, schema, fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
results = {'data': list(results_iter), 'header': header}
return results
def to_csv(
self,
hql: Union[str, str],
csv_filepath: str,
schema: str = 'default',
delimiter: str = ',',
lineterminator: str = '\r\n',
output_header: bool = True,
fetch_size: int = 1000,
hive_conf: Optional[Dict[Any, Any]] = None,
) -> None:
"""
Execute hql in target schema and write results to a csv file.
:param hql: hql to be executed.
:type hql: str or list
:param csv_filepath: filepath of csv to write results into.
:type csv_filepath: str
:param schema: target schema, default to 'default'.
:type schema: str
:param delimiter: delimiter of the csv file, default to ','.
:type delimiter: str
:param lineterminator: lineterminator of the csv file.
:type lineterminator: str
:param output_header: header of the csv file, default to True.
:type output_header: bool
:param fetch_size: number of result rows to write into the csv file, default to 1000.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
"""
results_iter = self._get_results(hql, schema, fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
message = None
i = 0
with open(csv_filepath, 'wb') as file:
writer = csv.writer(file, delimiter=delimiter, lineterminator=lineterminator, encoding='utf-8')
try:
if output_header:
self.log.debug('Cursor description is %s', header)
writer.writerow([c[0] for c in header])
for i, row in enumerate(results_iter, 1):
writer.writerow(row)
if i % fetch_size == 0:
self.log.info("Written %s rows so far.", i)
except ValueError as exception:
message = str(exception)
if message:
# need to clean up the file first
os.remove(csv_filepath)
raise ValueError(message)
self.log.info("Done. Loaded a total of %s rows.", i)
def get_records(
self, hql: Union[str, str], schema: str = 'default', hive_conf: Optional[Dict[Any, Any]] = None
) -> Any:
"""
Get a set of records from a Hive query.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: result of hive execution
:rtype: list
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema, hive_conf=hive_conf)['data']
def get_pandas_df( # type: ignore
self,
hql: Union[str, str],
schema: str = 'default',
hive_conf: Optional[Dict[Any, Any]] = None,
**kwargs,
) -> pandas.DataFrame:
"""
Get a pandas dataframe from a Hive query
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:param kwargs: (optional) passed into pandas.DataFrame constructor
:type kwargs: dict
:return: result of hive execution
:rtype: DataFrame
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
:return: pandas.DateFrame
"""
res = self.get_results(hql, schema=schema, hive_conf=hive_conf)
df = pandas.DataFrame(res['data'], columns=[c[0] for c in res['header']], **kwargs)
return df
|
{
"content_hash": "3baffedd8a836c0e570b57bb67614ebd",
"timestamp": "",
"source": "github",
"line_count": 1047,
"max_line_length": 107,
"avg_line_length": 39.080229226361034,
"alnum_prop": 0.56306669599433,
"repo_name": "apache/incubator-airflow",
"id": "ce2e4d9eaa998ec1c46d3bd281658ae0a9be4843",
"size": "41704",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/providers/apache/hive/hooks/hive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69070"
},
{
"name": "Dockerfile",
"bytes": "2001"
},
{
"name": "HTML",
"bytes": "283783"
},
{
"name": "JavaScript",
"bytes": "1387552"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5482822"
},
{
"name": "Shell",
"bytes": "40957"
}
],
"symlink_target": ""
}
|
__author__ = "Rick Sherman, Nitin Kumar"
__credits__ = "Jeremy Schulman"
import unittest
from nose.plugins.attrib import attr
from mock import MagicMock, patch, mock_open
import os
from lxml import etree
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
import ncclient.transport.errors as NcErrors
from jnpr.junos.facts.swver import version_info
from jnpr.junos import Device
from jnpr.junos.exception import RpcError
from jnpr.junos import exception as EzErrors
facts = {'domain': None, 'hostname': 'firefly', 'ifd_style': 'CLASSIC',
'version_info': version_info('12.1X46-D15.3'),
'2RE': False, 'serialnumber': 'aaf5fe5f9b88', 'fqdn': 'firefly',
'virtual': True, 'switch_style': 'NONE', 'version': '12.1X46-D15.3',
'HOME': '/cf/var/home/rick', 'srx_cluster': False,
'model': 'FIREFLY-PERIMETER',
'RE0': {'status': 'Testing',
'last_reboot_reason': 'Router rebooted after a '
'normal shutdown.',
'model': 'FIREFLY-PERIMETER RE',
'up_time': '6 hours, 29 minutes, 30 seconds'},
'vc_capable': False, 'personality': 'SRX_BRANCH'}
@attr('unit')
class Test_MyTemplateLoader(unittest.TestCase):
def setUp(self):
from jnpr.junos.device import _MyTemplateLoader
self.template_loader = _MyTemplateLoader()
@patch('__builtin__.filter')
def test_temp_load_get_source_filter_false(self, filter_mock):
filter_mock.return_value = False
try:
self.template_loader.get_source(None, None)
except Exception as ex:
import jinja2
self.assertEqual(type(ex), jinja2.exceptions.TemplateNotFound)
@patch('jnpr.junos.device.os.path')
def test_temp_load_get_source_filter_true(self, os_path_mock):
#cant use @patch here as with statement will have exit
m = mock_open()
with patch('__builtin__.file', m, create=True):
self.template_loader.get_source(None, None)
@attr('unit')
class TestDevice(unittest.TestCase):
@patch('ncclient.manager.connect')
def setUp(self, mock_connect):
mock_connect.side_effect = self._mock_manager
self.dev = Device(host='1.1.1.1', user='rick', password='password123',
gather_facts=False)
self.dev.open()
@patch('ncclient.operations.session.CloseSession.request')
def tearDown(self, mock_session):
self.dev.close()
@patch('jnpr.junos.device.netconf_ssh')
def test_device_ConnectAuthError(self, mock_manager):
mock_manager.connect.side_effect = NcErrors.AuthenticationError
with self.assertRaises(EzErrors.ConnectAuthError):
self.dev.open()
@patch('jnpr.junos.device.netconf_ssh')
def test_device_ConnectRefusedError(self, mock_manager):
mock_manager.connect.side_effect = NcErrors.SSHError
with self.assertRaises(EzErrors.ConnectRefusedError):
self.dev.open()
@patch('jnpr.junos.device.netconf_ssh')
@patch('jnpr.junos.device.datetime')
def test_device_ConnectTimeoutError(self, mock_datetime, mock_manager):
NcErrors.SSHError.message = 'cannot open'
mock_manager.connect.side_effect = NcErrors.SSHError
from datetime import timedelta, datetime
currenttime = datetime.now()
mock_datetime.datetime.now.side_effect = [currenttime,
currenttime + timedelta(minutes=4)]
with self.assertRaises(EzErrors.ConnectTimeoutError):
self.dev.open()
@patch('jnpr.junos.device.netconf_ssh')
@patch('jnpr.junos.device.datetime')
def test_device_diff_err_message(self, mock_datetime, mock_manager):
NcErrors.SSHError.message = 'why are you trying :)'
mock_manager.connect.side_effect = NcErrors.SSHError
from datetime import timedelta, datetime
currenttime = datetime.now()
mock_datetime.datetime.now.side_effect = [currenttime,
currenttime + timedelta(minutes=4)]
with self.assertRaises(EzErrors.ConnectError):
self.dev.open()
@patch('jnpr.junos.device.netconf_ssh')
def test_device_ConnectUnknownHostError(self, mock_manager):
import socket
mock_manager.connect.side_effect = socket.gaierror
with self.assertRaises(EzErrors.ConnectUnknownHostError):
self.dev.open()
@patch('jnpr.junos.device.netconf_ssh')
def test_device_other_error(self, mock_manager):
mock_manager.connect.side_effect = TypeError
with self.assertRaises(EzErrors.ConnectError):
self.dev.open()
def test_device_property_logfile_isinstance(self):
mock = MagicMock()
with patch('__builtin__.open', mock):
with patch('__builtin__.file', MagicMock):
handle = open('filename', 'r')
self.dev.logfile = handle
self.assertEqual(self.dev.logfile, handle)
def test_device_host_mand_param(self):
self.assertRaises(ValueError, Device, user='rick',
password='password123',
gather_facts=False)
def test_device_property_logfile_close(self):
self.dev._logfile = MagicMock()
self.dev._logfile.close.return_value = 0
self.dev.logfile = None
self.assertFalse(self.dev._logfile)
def test_device_property_logfile_exception(self):
try:
self.dev.logfile = True
except Exception as ex:
self.assertEqual(type(ex), ValueError)
def test_device_repr(self):
localdev = Device(host='1.1.1.1', user='rick', password='password123',
gather_facts=False)
self.assertEqual(repr(localdev), 'Device(1.1.1.1)')
@patch('jnpr.junos.device.os')
@patch('__builtin__.open')
@patch('paramiko.config.SSHConfig.lookup')
def test_device__sshconf_lkup(self, os_mock, open_mock, mock_paramiko):
os_mock.path.exists.return_value = True
self.dev._sshconf_lkup()
mock_paramiko.assert_called_any()
@patch('os.getenv')
def test_device__sshconf_lkup_path_not_exists(self, mock_env):
mock_env.return_value = '/home/test'
self.assertIsNone(self.dev._sshconf_lkup())
@patch('os.getenv')
def test_device__sshconf_lkup_home_not_defined(self, mock_env):
mock_env.return_value = None
self.assertIsNone(self.dev._sshconf_lkup())
mock_env.assert_called_with('HOME')
@patch('ncclient.manager.connect')
@patch('jnpr.junos.Device.execute')
def test_device_open(self, mock_connect, mock_execute):
with patch('jnpr.junos.utils.fs.FS.cat') as mock_cat:
mock_cat.return_value = """
domain jls.net
"""
mock_connect.side_effect = self._mock_manager
mock_execute.side_effect = self._mock_manager
self.dev2 = Device(host='2.2.2.2', user='rick', password='password123')
self.dev2.open()
self.assertEqual(self.dev2.connected, True)
@patch('jnpr.junos.Device.execute')
def test_device_facts(self, mock_execute):
with patch('jnpr.junos.utils.fs.FS.cat') as mock_cat:
mock_execute.side_effect = self._mock_manager
mock_cat.return_value = """
domain jls.net
"""
self.dev.facts_refresh()
assert self.dev.facts['version'] == facts['version']
def test_device_hostname(self):
self.assertEqual(self.dev.hostname, '1.1.1.1')
def test_device_user(self):
self.assertEqual(self.dev.user, 'rick')
def test_device_get_password(self):
self.assertIsNone(self.dev.password)
def test_device_set_password(self):
self.dev.password = 'secret'
self.assertEqual(self.dev._password, 'secret')
def test_device_get_timeout(self):
self.assertEqual(self.dev.timeout, 30)
def test_device_set_timeout(self):
self.dev.timeout = 10
self.assertEqual(self.dev.timeout, 10)
def test_device_manages(self):
self.assertEqual(self.dev.manages, [],
'By default manages will be empty list')
def test_device_set_facts_exception(self):
try:
self.dev.facts = 'test'
except RuntimeError as ex:
self.assertEqual(RuntimeError, type(ex))
@patch('jnpr.junos.Device.execute')
def test_device_cli(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(self.dev.cli('show cli directory').tag, 'cli')
@patch('jnpr.junos.Device.execute')
def test_device_cli_conf_info(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertIn('ge-0/0/0', self.dev.cli('show configuration'))
@patch('jnpr.junos.Device.execute')
def test_device_cli_output(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertIn('Alarm', self.dev.cli('show system alarms'))
@patch('jnpr.junos.Device.execute')
def test_device_cli_rpc(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(self.dev.cli('show system uptime | display xml rpc')\
.tag, 'get-system-uptime-information')
def test_device_cli_exception(self):
self.dev.rpc.cli = MagicMock(side_effect=AttributeError)
val = self.dev.cli('show version')
self.assertEqual(val, 'invalid command: show version')
def test_device_execute(self):
self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.dev.execute('<get-system-core-dumps/>').tag,
'directory-list')
def test_device_execute_topy(self):
self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.dev.execute('<get-system-core-dumps/>',
to_py=self._do_nothing), 'Nothing')
def test_device_execute_exception(self):
class MyException(Exception):
rpc_err = """
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/12.1X46/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-info>
<bad-element>get-bgp-summary-information</bad-element>
</error-info>
<error-message>permission denied</error-message>
</rpc-error>
"""
xml = etree.XML(rpc_err)
self.dev._conn.rpc = MagicMock(side_effect=MyException)
self.assertRaises(RpcError, self.dev.execute,
'<get-software-information/>')
def test_device_execute_rpc_error(self):
self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager)
self.assertRaises(RpcError, self.dev.rpc.get_rpc_error)
def test_device_execute_index_error(self):
self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager)
self.assertTrue(self.dev.rpc.get_index_error())
def test_device_execute_ValueError(self):
self.assertRaises(ValueError, self.dev.execute, None)
def test_device_rpcmeta(self):
self.assertEqual(self.dev.rpc.get_software_information.func_doc,
'get-software-information')
def test_device_probe_timeout_zero(self):
with patch('jnpr.junos.device.socket'):
self.assertFalse(self.dev.probe(0))
def test_device_probe_timeout_gt_zero(self):
with patch('jnpr.junos.device.socket'):
self.assertTrue(self.dev.probe(1),
'probe fn is not working for'
' timeout greater than zero')
def test_device_probe_timeout_exception(self):
with patch('jnpr.junos.device.socket') as mock_socket:
with patch('jnpr.junos.device.time.sleep') as mock_time:
mock_socket.socket.return_value.close.side_effect \
= RuntimeError
mock_time.return_value = None
self.assertFalse(self.dev.probe(.01))
def test_device_bind_varg(self):
self.dev.bind()
mock = MagicMock()
mock.__name__ = 'magic_mock'
self.dev.bind(mock)
self.assertEqual(self.dev.magic_mock.__name__, 'magic_mock')
def test_device_bind_kvarg(self):
self.dev.bind()
mock = MagicMock()
mock.return_value = 'Test'
self.dev.bind(kw=mock)
self.assertEqual(self.dev.kw, 'Test')
def test_device_bind_varg_exception(self):
with self.assertRaises(ValueError):
self.dev.bind()
mock = MagicMock()
mock.__name__ = 'magic mock'
#for *args
self.dev.bind(mock)
self.dev.bind(mock)
def test_device_bind_kvarg_exception(self):
with self.assertRaises(ValueError):
self.dev.bind()
mock = MagicMock()
mock.__name__ = 'magic mock'
#for **kwargs
self.dev.bind(kw=mock)
self.dev.bind(kw=mock)
def test_device_template(self):
# Try to load the template relative to module base
try:
template = self.dev.Template('tests/unit/templates/config-example')
except:
# Try to load the template relative to test base
try:
template = self.dev.Template('templates/config-example')
except:
raise
self.assertEqual(template.render({'host_name': '1',
'domain_name': '2'}),
'system {\n host-name 1;\n domain-name 2;\n}')
def test_device_close(self):
def close_conn():
self.dev.connected = False
self.dev.close = MagicMock(name='close')
self.dev.close.side_effect = close_conn
self.dev.close()
self.assertEqual(self.dev.connected, False)
def _read_file(self, fname):
from ncclient.xml_ import NCElement
fpath = os.path.join(os.path.dirname(__file__),
'rpc-reply', fname)
foo = open(fpath).read()
if (fname == 'get-rpc-error.xml' or
fname == 'get-index-error.xml' or
fname == 'get-system-core-dumps.xml'):
rpc_reply = NCElement(foo, self.dev._conn._device_handler
.transform_reply())
elif (fname == 'show-configuration.xml' or
fname == 'show-system-alarms.xml'):
rpc_reply = NCElement(foo, self.dev._conn._device_handler
.transform_reply())._NCElement__doc
else:
rpc_reply = NCElement(foo, self.dev._conn._device_handler
.transform_reply())._NCElement__doc[0]
return rpc_reply
def _mock_manager(self, *args, **kwargs):
if kwargs:
device_params = kwargs['device_params']
device_handler = make_device_handler(device_params)
session = SSHSession(device_handler)
return Manager(session, device_handler)
elif args:
if args[0].tag == 'command':
if args[0].text == 'show cli directory':
return self._read_file('show-cli-directory.xml')
elif args[0].text == 'show configuration':
return self._read_file('show-configuration.xml')
elif args[0].text == 'show system alarms':
return self._read_file('show-system-alarms.xml')
elif args[0].text == 'show system uptime | display xml rpc':
return self._read_file('show-system-uptime-rpc.xml')
else:
raise RpcError
else:
return self._read_file(args[0].tag + '.xml')
def _do_nothing(self, *args, **kwargs):
return 'Nothing'
|
{
"content_hash": "bc36f1d196621d88b6e0d7634be98935",
"timestamp": "",
"source": "github",
"line_count": 412,
"max_line_length": 167,
"avg_line_length": 40.19174757281554,
"alnum_prop": 0.5877166495561326,
"repo_name": "dgjnpr/py-junos-eznc",
"id": "7837474bab16c41fd19daa7c4028c616035164c4",
"size": "16559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Pascal",
"bytes": "13"
},
{
"name": "Puppet",
"bytes": "2658"
},
{
"name": "Python",
"bytes": "376420"
},
{
"name": "Ruby",
"bytes": "4840"
},
{
"name": "Shell",
"bytes": "597"
}
],
"symlink_target": ""
}
|
"""test the IPython Kernel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import io
import os.path
import sys
import nose.tools as nt
from IPython.testing import decorators as dec, tools as tt
from IPython.utils import py3compat
from IPython.utils.path import locate_profile
from IPython.utils.tempdir import TemporaryDirectory
from .utils import (new_kernel, kernel, TIMEOUT, assemble_output, execute,
flush_channels, wait_for_idle)
def _check_mp_mode(kc, expected=False, stream="stdout"):
execute(kc=kc, code="import sys")
flush_channels(kc)
msg_id, content = execute(kc=kc, code="print (sys.%s._check_mp_mode())" % stream)
stdout, stderr = assemble_output(kc.iopub_channel)
nt.assert_equal(eval(stdout.strip()), expected)
# printing tests
def test_simple_print():
"""simple print statement in kernel"""
with kernel() as kc:
iopub = kc.iopub_channel
msg_id, content = execute(kc=kc, code="print ('hi')")
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout, 'hi\n')
nt.assert_equal(stderr, '')
_check_mp_mode(kc, expected=False)
def test_sys_path():
"""test that sys.path doesn't get messed up by default"""
with kernel() as kc:
msg_id, content = execute(kc=kc, code="import sys; print (repr(sys.path[0]))")
stdout, stderr = assemble_output(kc.iopub_channel)
nt.assert_equal(stdout, "''\n")
def test_sys_path_profile_dir():
"""test that sys.path doesn't get messed up when `--profile-dir` is specified"""
with new_kernel(['--profile-dir', locate_profile('default')]) as kc:
msg_id, content = execute(kc=kc, code="import sys; print (repr(sys.path[0]))")
stdout, stderr = assemble_output(kc.iopub_channel)
nt.assert_equal(stdout, "''\n")
@dec.knownfailureif(sys.platform == 'win32', "subprocess prints fail on Windows")
def test_subprocess_print():
"""printing from forked mp.Process"""
with new_kernel() as kc:
iopub = kc.iopub_channel
_check_mp_mode(kc, expected=False)
flush_channels(kc)
np = 5
code = '\n'.join([
"from __future__ import print_function",
"import multiprocessing as mp",
"pool = [mp.Process(target=print, args=('hello', i,)) for i in range(%i)]" % np,
"for p in pool: p.start()",
"for p in pool: p.join()"
])
expected = '\n'.join([
"hello %s" % i for i in range(np)
]) + '\n'
msg_id, content = execute(kc=kc, code=code)
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout.count("hello"), np, stdout)
for n in range(np):
nt.assert_equal(stdout.count(str(n)), 1, stdout)
nt.assert_equal(stderr, '')
_check_mp_mode(kc, expected=False)
_check_mp_mode(kc, expected=False, stream="stderr")
def test_subprocess_noprint():
"""mp.Process without print doesn't trigger iostream mp_mode"""
with kernel() as kc:
iopub = kc.iopub_channel
np = 5
code = '\n'.join([
"import multiprocessing as mp",
"pool = [mp.Process(target=range, args=(i,)) for i in range(%i)]" % np,
"for p in pool: p.start()",
"for p in pool: p.join()"
])
msg_id, content = execute(kc=kc, code=code)
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout, '')
nt.assert_equal(stderr, '')
_check_mp_mode(kc, expected=False)
_check_mp_mode(kc, expected=False, stream="stderr")
@dec.knownfailureif(sys.platform == 'win32', "subprocess prints fail on Windows")
def test_subprocess_error():
"""error in mp.Process doesn't crash"""
with new_kernel() as kc:
iopub = kc.iopub_channel
code = '\n'.join([
"import multiprocessing as mp",
"p = mp.Process(target=int, args=('hi',))",
"p.start()",
"p.join()",
])
msg_id, content = execute(kc=kc, code=code)
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout, '')
nt.assert_true("ValueError" in stderr, stderr)
_check_mp_mode(kc, expected=False)
_check_mp_mode(kc, expected=False, stream="stderr")
# raw_input tests
def test_raw_input():
"""test [raw_]input"""
with kernel() as kc:
iopub = kc.iopub_channel
input_f = "input" if py3compat.PY3 else "raw_input"
theprompt = "prompt> "
code = 'print({input_f}("{theprompt}"))'.format(**locals())
msg_id = kc.execute(code, allow_stdin=True)
msg = kc.get_stdin_msg(block=True, timeout=TIMEOUT)
nt.assert_equal(msg['header']['msg_type'], u'input_request')
content = msg['content']
nt.assert_equal(content['prompt'], theprompt)
text = "some text"
kc.input(text)
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
nt.assert_equal(reply['content']['status'], 'ok')
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout, text + "\n")
@dec.skipif(py3compat.PY3)
def test_eval_input():
"""test input() on Python 2"""
with kernel() as kc:
iopub = kc.iopub_channel
input_f = "input" if py3compat.PY3 else "raw_input"
theprompt = "prompt> "
code = 'print(input("{theprompt}"))'.format(**locals())
msg_id = kc.execute(code, allow_stdin=True)
msg = kc.get_stdin_msg(block=True, timeout=TIMEOUT)
nt.assert_equal(msg['header']['msg_type'], u'input_request')
content = msg['content']
nt.assert_equal(content['prompt'], theprompt)
kc.input("1+1")
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
nt.assert_equal(reply['content']['status'], 'ok')
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout, "2\n")
def test_save_history():
# Saving history from the kernel with %hist -f was failing because of
# unicode problems on Python 2.
with kernel() as kc, TemporaryDirectory() as td:
file = os.path.join(td, 'hist.out')
execute(u'a=1', kc=kc)
wait_for_idle(kc)
execute(u'b=u"abcþ"', kc=kc)
wait_for_idle(kc)
_, reply = execute("%hist -f " + file, kc=kc)
nt.assert_equal(reply['status'], 'ok')
with io.open(file, encoding='utf-8') as f:
content = f.read()
nt.assert_in(u'a=1', content)
nt.assert_in(u'b=u"abcþ"', content)
def test_help_output():
"""ipython kernel --help-all works"""
tt.help_all_output_test('kernel')
def test_is_complete():
with kernel() as kc:
# There are more test cases for this in core - here we just check
# that the kernel exposes the interface correctly.
kc.is_complete('2+2')
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'complete'
# SyntaxError should mean it's complete
kc.is_complete('raise = 2')
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'invalid'
kc.is_complete('a = [1,\n2,')
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'incomplete'
assert reply['content']['indent'] == ''
def test_complete():
with kernel() as kc:
execute(u'a = 1', kc=kc)
wait_for_idle(kc)
cell = 'import IPython\nb = a.'
kc.complete(cell)
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
c = reply['content']
nt.assert_equal(c['status'], 'ok')
nt.assert_equal(c['cursor_start'], cell.find('a.'))
nt.assert_equal(c['cursor_end'], cell.find('a.') + 2)
matches = c['matches']
nt.assert_greater(len(matches), 0)
for match in matches:
nt.assert_equal(match[:2], 'a.')
|
{
"content_hash": "2947b79308dd4c09d9e021ae0725b98c",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 92,
"avg_line_length": 35.74008810572687,
"alnum_prop": 0.585480093676815,
"repo_name": "martydill/url_shortener",
"id": "2849da552732e2b957a99a27ef9532f8b77178a0",
"size": "8131",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "code/venv/lib/python2.7/site-packages/IPython/kernel/tests/test_kernel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "8422"
},
{
"name": "HTML",
"bytes": "38632"
},
{
"name": "JavaScript",
"bytes": "1374725"
},
{
"name": "Python",
"bytes": "11917924"
},
{
"name": "Shell",
"bytes": "3769"
},
{
"name": "Smarty",
"bytes": "21425"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import *
# Shop
urlpatterns = patterns('lfs.utils.views',
(r'^/test$', "test"),
(r'^/upload-test$', "upload_test"),
(r'^/import-easyshop$', "import_easyshop"),
(r'^/update-products-from-es$', "update_products_from_es"),
(r'^/update-product-images-from-es$', "update_images_from_es"),
(r'^/update-accessories-from-es$', "update_accessories_from_es"),
)
|
{
"content_hash": "f8d8708905782e64d21775be53cb06e0",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 69,
"avg_line_length": 37.09090909090909,
"alnum_prop": 0.6348039215686274,
"repo_name": "baffolobill/django-lfs",
"id": "8ccdb1dea30611fb50d06fa51445d7701e09fed1",
"size": "408",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "lfs/utils/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "96502"
},
{
"name": "HTML",
"bytes": "615650"
},
{
"name": "JavaScript",
"bytes": "591493"
},
{
"name": "Python",
"bytes": "1385034"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class MetaValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="meta", parent_name="candlestick", **kwargs):
super(MetaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
{
"content_hash": "b62a5184bd7ea396a0d529136a6910db",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 80,
"avg_line_length": 37.23076923076923,
"alnum_prop": 0.5929752066115702,
"repo_name": "plotly/python-api",
"id": "5489a00f76e2ba8426f4a1fee03ee5ba6797789e",
"size": "484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/candlestick/_meta.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import resolve, reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.models import Displayable, Orderable, RichText
from mezzanine.pages.fields import MenusField
from mezzanine.pages.managers import PageManager
from mezzanine.utils.urls import path_to_slug, slugify
class BasePage(Orderable, Displayable):
"""
Exists solely to store ``PageManager`` as the main manager.
If it's defined on ``Page``, a concrete model, then each
``Page`` subclass loses the custom manager.
"""
objects = PageManager()
class Meta:
abstract = True
class Page(BasePage):
"""
A page in the page tree. This is the base class that custom content types
need to subclass.
"""
parent = models.ForeignKey("Page", blank=True, null=True,
related_name="children")
in_menus = MenusField(_("Show in menus"), blank=True, null=True)
titles = models.CharField(editable=False, max_length=1000, null=True)
content_model = models.CharField(editable=False, max_length=50, null=True)
login_required = models.BooleanField(_("Login required"),
help_text=_("If checked, only logged in users can view this page"))
class Meta:
verbose_name = _("Page")
verbose_name_plural = _("Pages")
ordering = ("titles",)
order_with_respect_to = "parent"
def __unicode__(self):
return self.titles
def get_absolute_url(self):
"""
URL for a page - for ``Link`` page types, simply return its
slug since these don't have an actual URL pattern. Also handle
the special case of the homepage being a page object.
"""
slug = self.slug
if self.content_model == "link":
# Ensure the URL is absolute.
if not slug.lower().startswith("http"):
slug = "/" + self.slug.lstrip("/")
return slug
if slug == "/":
return reverse("home")
else:
return reverse("page", kwargs={"slug": slug})
def save(self, *args, **kwargs):
"""
Create the titles field using the titles up the parent chain
and set the initial value for ordering.
"""
if self.id is None:
self.content_model = self._meta.object_name.lower()
titles = [self.title]
parent = self.parent
while parent is not None:
titles.insert(0, parent.title)
parent = parent.parent
self.titles = " / ".join(titles)
super(Page, self).save(*args, **kwargs)
def description_from_content(self):
"""
Override ``Displayable.description_from_content`` to load the
content type subclass for when ``save`` is called directly on a
``Page`` instance, so that all fields defined on the subclass
are available for generating the description.
"""
if self.__class__ == Page:
content_model = self.get_content_model()
if content_model:
return content_model.description_from_content()
return super(Page, self).description_from_content()
def get_ascendants(self, for_user=None):
"""
Returns the ascendants for the page. Ascendants are cached in
the ``_ascendants`` attribute, which is populated when the page
is loaded via ``Page.objects.with_ascendants_for_slug``.
"""
if not self.parent_id:
# No parents at all, bail out.
return []
if not hasattr(self, "_ascendants"):
# _ascendants has not been either page.get_ascendants or
# Page.objects.assigned by with_ascendants_for_slug, so
# run it to see if we can retrieve all parents in a single
# query, which will occur if the slugs for each of the pages
# have not been customised.
if self.slug:
kwargs = {"for_user": for_user}
pages = Page.objects.with_ascendants_for_slug(self.slug,
**kwargs)
self._ascendants = pages[0]._ascendants
else:
self._ascendants = []
if not self._ascendants:
# Page has a parent but with_ascendants_for_slug failed to
# find them due to custom slugs, so retrieve the parents
# recursively.
child = self
while child.parent_id is not None:
self._ascendants.append(child.parent)
child = child.parent
return self._ascendants
@classmethod
def get_content_models(cls):
"""
Return all Page subclasses.
"""
is_content_model = lambda m: m is not Page and issubclass(m, Page)
return filter(is_content_model, models.get_models())
def get_content_model(self):
"""
Provies a generic method of retrieving the instance of the custom
content type's model for this page.
"""
return getattr(self, self.content_model, None)
def get_slug(self):
"""
Recursively build the slug from the chain of parents.
"""
slug = slugify(self.title)
if self.parent is not None:
return "%s/%s" % (self.parent.slug, slug)
return slug
def set_slug(self, new_slug):
"""
Changes this page's slug, and all other pages whose slugs
start with this page's slug.
"""
for page in Page.objects.filter(slug__startswith=self.slug):
if not page.overridden():
page.slug = new_slug + page.slug[len(self.slug):]
page.save()
self.slug = new_slug
def set_parent(self, new_parent):
"""
Change the parent of this page, changing this page's slug to match
the new parent if necessary.
"""
self_slug = self.slug
old_parent_slug = self.parent.slug if self.parent else ""
new_parent_slug = new_parent.slug if new_parent else ""
# Make sure setting the new parent won't cause a cycle.
parent = new_parent
while parent is not None:
if parent.pk == self.pk:
raise AttributeError("You can't set a page or its child as"
" a parent.")
parent = parent.parent
self.parent = new_parent
self.save()
if self_slug:
if not old_parent_slug:
self.set_slug("/".join((new_parent_slug, self.slug)))
elif self.slug.startswith(old_parent_slug):
new_slug = self.slug.replace(old_parent_slug,
new_parent_slug, 1)
self.set_slug(new_slug.strip("/"))
def overridden(self):
"""
Returns ``True`` if the page's slug has an explicitly defined
urlpattern and is therefore considered to be overridden.
"""
from mezzanine.pages.views import page
page_url = reverse("page", kwargs={"slug": self.slug})
resolved_view = resolve(page_url)[0]
return resolved_view != page
def can_add(self, request):
"""
Dynamic ``add`` permission for content types to override.
"""
return self.slug != "/"
def can_change(self, request):
"""
Dynamic ``change`` permission for content types to override.
"""
return True
def can_delete(self, request):
"""
Dynamic ``delete`` permission for content types to override.
"""
return True
def set_helpers(self, context):
"""
Called from the ``page_menu`` template tag and assigns a
handful of properties based on the current page, that are used
within the various types of menus.
"""
current_page = context["_current_page"]
current_page_id = getattr(current_page, "id", None)
current_parent_id = getattr(current_page, "parent_id", None)
# Am I a child of the current page?
self.is_current_child = self.parent_id == current_page_id
self.is_child = self.is_current_child # Backward compatibility
# Is my parent the same as the current page's?
self.is_current_sibling = self.parent_id == current_parent_id
# Am I the current page?
try:
request = context["request"]
except KeyError:
# No request context, most likely when tests are run.
self.is_current = False
else:
self.is_current = self.slug == path_to_slug(request.path_info)
# Is the current page me or any page up the parent chain?
def is_c_or_a(page_id):
parent_id = context["_parent_page_ids"].get(page_id)
return self.id == page_id or (parent_id and is_c_or_a(parent_id))
self.is_current_or_ascendant = lambda: bool(is_c_or_a(current_page_id))
# Am I a primary page?
self.is_primary = self.parent_id is None
# What's an ID I can use in HTML?
self.html_id = self.slug.replace("/", "-")
# Default branch level - gets assigned in the page_menu tag.
self.branch_level = 0
def in_menu_template(self, template_name):
if self.in_menus is not None:
for i, l, t in settings.PAGE_MENU_TEMPLATES:
if not unicode(i) in self.in_menus and t == template_name:
return False
return True
class RichTextPage(Page, RichText):
"""
Implements the default type of page with a single Rich Text
content field.
"""
class Meta:
verbose_name = _("Rich text page")
verbose_name_plural = _("Rich text pages")
class Link(Page):
"""
A general content type for creating external links in the page
menu.
"""
class Meta:
verbose_name = _("Link")
verbose_name_plural = _("Links")
|
{
"content_hash": "2f933d85d3a4cc1df4ee173dda3c1f08",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 79,
"avg_line_length": 36.2589928057554,
"alnum_prop": 0.5822420634920635,
"repo_name": "wrwrwr/mezzanine",
"id": "dee0d1e81f777a7f29efae4cc63438a336f78a2f",
"size": "10080",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mezzanine/pages/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "75436"
},
{
"name": "JavaScript",
"bytes": "212127"
},
{
"name": "Python",
"bytes": "978985"
}
],
"symlink_target": ""
}
|
"""
svn vcs support.
"""
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import tarfile
import dateutil.parser # For parsing date strings
import xml.dom.minidom # For parsing logfiles
from vcstools.vcs_base import VcsClientBase, VcsError
from vcstools.common import sanitized, normalized_rel_path, \
run_shell_command, ensure_dir_notexists
def _get_svn_version():
"""Looks up svn version by calling svn --version.
:raises: VcsError if svn is not installed"""
try:
# SVN commands produce differently formatted output for french locale
value, output, _ = run_shell_command('svn --version',
shell=True,
us_env=True)
if value == 0 and output is not None and len(output.splitlines()) > 0:
version = output.splitlines()[0]
else:
raise VcsError("svn --version returned "
+ "%s maybe svn is not installed" % value)
except VcsError as exc:
raise VcsError("Could not determine whether svn is installed: "
+ str(exc))
return version
class SvnClient(VcsClientBase):
def __init__(self, path):
"""
:raises: VcsError if python-svn not detected
"""
VcsClientBase.__init__(self, 'svn', path)
# test for svn here, we need it for status
_get_svn_version()
@staticmethod
def get_environment_metadata():
metadict = {}
try:
metadict["version"] = _get_svn_version()
except:
metadict["version"] = "no svn installed"
return metadict
def get_url(self):
"""
:returns: SVN URL of the directory path (output of svn info command),
or None if it cannot be determined
"""
if self.detect_presence():
# 3305: parsing not robust to non-US locales
cmd = 'svn info %s' % self._path
_, output, _ = run_shell_command(cmd, shell=True)
matches = [l for l in output.splitlines() if l.startswith('URL: ')]
if matches:
return matches[0][5:]
def detect_presence(self):
return self.path_exists() and \
os.path.isdir(os.path.join(self.get_path(), '.svn'))
def checkout(self, url, version='', verbose=False,
shallow=False, timeout=None):
if url is None or url.strip() == '':
raise ValueError('Invalid empty url : "%s"' % url)
# Need to check as SVN 1.6.17 writes into directory even if not empty
if not ensure_dir_notexists(self.get_path()):
self.logger.error("Can't remove %s" % self.get_path())
return False
if version is not None and version != '':
if not version.startswith("-r"):
version = "-r%s" % version
elif version is None:
version = ''
cmd = 'svn co %s %s %s' % (sanitized(version),
sanitized(url),
self._path)
value, _, msg = run_shell_command(cmd,
shell=True,
no_filter=True)
if value != 0:
if msg:
self.logger.error('%s' % msg)
return False
return True
def update(self, version=None, verbose=False, timeout=None):
if not self.detect_presence():
sys.stderr.write("Error: cannot update non-existing directory\n")
return False
# protect against shell injection
if version is not None and version != '':
if not version.startswith("-r"):
version = "-r" + version
elif version is None:
version = ''
cmd = 'svn up %s %s --non-interactive' % (sanitized(version),
self._path)
value, _, _ = run_shell_command(cmd,
shell=True,
no_filter=True)
if value == 0:
return True
return False
def get_version(self, spec=None):
"""
:param spec: (optional) spec can be what 'svn info --help'
allows, meaning a revnumber, {date}, HEAD, BASE, PREV, or
COMMITTED.
:returns: current revision number of the repository. Or if spec
provided, the number of a revision specified by some
token.
"""
command = 'svn info '
if spec is not None:
if spec.isdigit():
# looking up svn with "-r" takes long, and if spec is
# a number, all we get from svn is the same number,
# unless we try to look at higher rev numbers (in
# which case either get the same number, or an error
# if the rev does not exist). So we first do a very
# quick svn info, and check revision numbers.
currentversion = self.get_version(spec=None)
# currentversion is like '-r12345'
if currentversion is not None and \
int(currentversion[2:]) > int(spec):
# so if we know revision exist, just return the
# number, avoid the long call to svn server
return '-r' + spec
if spec.startswith("-r"):
command += sanitized(spec)
else:
command += sanitized('-r%s' % spec)
command += " %s" % self._path
# #3305: parsing not robust to non-US locales
_, output, _ = run_shell_command(command, shell=True, us_env=True)
if output is not None:
matches = \
[l for l in output.splitlines() if l.startswith('Revision: ')]
if len(matches) == 1:
split_str = matches[0].split()
if len(split_str) == 2:
return '-r' + split_str[1]
return None
def get_diff(self, basepath=None):
response = None
if basepath is None:
basepath = self._path
if self.path_exists():
rel_path = normalized_rel_path(self._path, basepath)
command = 'svn diff %s' % sanitized(rel_path)
_, response, _ = run_shell_command(command,
shell=True,
cwd=basepath)
return response
def get_log(self, relpath=None, limit=None):
response = []
if relpath is None:
relpath = ''
if self.path_exists() and os.path.exists(os.path.join(self._path, relpath)):
# Get the log
limit_cmd = (("--limit %d" % (int(limit))) if limit else "")
command = "svn log %s --xml %s" % (limit_cmd, sanitized(relpath) if len(relpath) > 0 else '')
return_code, xml_response, stderr = run_shell_command(command, shell=True, cwd=self._path)
# Parse response
dom = xml.dom.minidom.parseString(xml_response)
log_entries = dom.getElementsByTagName("logentry")
# Extract the entries
for log_entry in log_entries:
author_tag = log_entry.getElementsByTagName("author")[0]
date_tag = log_entry.getElementsByTagName("date")[0]
msg_tags = log_entry.getElementsByTagName("msg")
log_data = dict()
log_data['id'] = log_entry.getAttribute("revision")
log_data['author'] = author_tag.firstChild.nodeValue
log_data['email'] = None
log_data['date'] = dateutil.parser.parse(str(date_tag.firstChild.nodeValue))
if len(msg_tags) > 0 and msg_tags[0].firstChild:
log_data['message'] = msg_tags[0].firstChild.nodeValue
else:
log_data['message'] = ''
response.append(log_data)
return response
def get_status(self, basepath=None, untracked=False):
response = None
if basepath is None:
basepath = self._path
if self.path_exists():
rel_path = normalized_rel_path(self._path, basepath)
# protect against shell injection
command = 'svn status %s' % sanitized(rel_path)
if not untracked:
command += " -q"
_, response, _ = run_shell_command(command,
shell=True,
cwd=basepath)
if response is not None and \
len(response) > 0 and \
response[-1] != '\n':
response += '\n'
return response
def export_repository(self, version, basepath):
# Run the svn export cmd
cmd = 'svn export {0} {1}'.format(os.path.join(self._path, version),
basepath)
result, _, _ = run_shell_command(cmd, shell=True)
if result:
return False
try:
# tar gzip the exported repo
targzip_file = tarfile.open(basepath + '.tar.gz', 'w:gz')
try:
targzip_file.add(basepath, '')
finally:
targzip_file.close()
finally:
# clean up
from shutil import rmtree
rmtree(basepath)
return True
SVNClient = SvnClient
|
{
"content_hash": "4f9d7f60b4a39b7f7bdb3978cd417cda",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 105,
"avg_line_length": 38.626506024096386,
"alnum_prop": 0.5140361821584529,
"repo_name": "k-okada/vcstools",
"id": "a2731a810c903c0c6b921b621daa6535668186fd",
"size": "11224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/vcstools/svn.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
class BaseAction(object):
""" The base class for an action. This should be subclassed for
every new Action created.
At a minimum, every action should have a name, a unique action_id
(e.g. my_action) and a function `execute` that accepts an EmailReport and options.
Each Action can specify a dictionary of values to be requested. Validation is
not currently performed and the options are provided as-is.
For the values to be requested, you can specify a "name" field as well as a possible
"choices" field that expects a list of strings. If you want to load the choices
dynamically, you can specify a "choicesFunc" that is executed on JSON marshalling and
used to fill the "choices" attributes before returning the action to the user.
If choices or a choiceFunc is specified, those will be presented as options to the user.
Otherwise, the UI will provide a generic text field for each option.
Action authors can also add an optional description (str) field.
"""
name = ""
description = ""
options = {}
def execute(self, report, options):
raise NotImplementedError
@classmethod
def to_dict(cls, **kwargs):
options = {}
for option in cls.options:
options[option] = {
"name": cls.options[option]['name'],
}
choices = cls.options[option].get('choices')
choiceFunc = cls.options[option].get('choiceFunc')
if choiceFunc:
choices = choiceFunc(**kwargs)
if choices:
options[option]['choices'] = choices
return {
"name": cls.name,
"description": cls.description,
"action_id": cls.action_id,
"options": options
}
|
{
"content_hash": "43c133ef7fa5b6d073f0bb68a5dafbd8",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 92,
"avg_line_length": 36.714285714285715,
"alnum_prop": 0.6336853807670928,
"repo_name": "duo-labs/isthislegit",
"id": "40a773285dbd9fd0fb3eca9e1bbe182f3a6024c0",
"size": "1799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dashboard/actions/action.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "89903"
},
{
"name": "Dockerfile",
"bytes": "1356"
},
{
"name": "HTML",
"bytes": "39168"
},
{
"name": "JavaScript",
"bytes": "47642"
},
{
"name": "Python",
"bytes": "315671"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'List'
db.create_table(u'simplelist_list', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('spec_name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('create_date', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'simplelist', ['List'])
# Adding model 'Entry'
db.create_table(u'simplelist_entry', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('spec', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['simplelist.List'])),
('heading_text', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('body_text', self.gf('django.db.models.fields.CharField')(max_length=2000, blank=True)),
('doc_sort_order', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal(u'simplelist', ['Entry'])
def backwards(self, orm):
# Deleting model 'List'
db.delete_table(u'simplelist_list')
# Deleting model 'Entry'
db.delete_table(u'simplelist_entry')
models = {
u'simplelist.entry': {
'Meta': {'object_name': 'Entry'},
'body_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'doc_sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'heading_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'spec': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['simplelist.List']"})
},
u'simplelist.list': {
'Meta': {'object_name': 'List'},
'create_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'spec_name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['simplelist']
|
{
"content_hash": "fd84a07665a35e7ef8516833c749d826",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 110,
"avg_line_length": 44.75925925925926,
"alnum_prop": 0.5825403392635499,
"repo_name": "ransage/hatch",
"id": "7c7f7fd257ef3f1a7a655a5051065174557fb0c5",
"size": "2441",
"binary": false,
"copies": "1",
"ref": "refs/heads/dependabot/pip/django-2.2.24",
"path": "simplelist/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "89"
},
{
"name": "HTML",
"bytes": "2887"
},
{
"name": "Python",
"bytes": "27402"
},
{
"name": "Shell",
"bytes": "98"
}
],
"symlink_target": ""
}
|
import os
import re
from setuptools import setup, find_packages, Command
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
def read_reqs(name):
with open(os.path.join(os.path.dirname(__file__), name)) as f:
return [line for line in f.read().split('\n') if line and not line.strip().startswith('#')]
def read_version():
with open(os.path.join('lib', 'tri_named_struct', '__init__.py')) as f:
m = re.search(r'''__version__\s*=\s*['"]([^'"]*)['"]''', f.read())
if m:
return m.group(1)
raise ValueError("couldn't find version")
class Tag(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
from subprocess import call
version = read_version()
errno = call(['git', 'tag', '--annotate', version, '--message', 'Version %s' % version])
if errno == 0:
print("Added tag for version %s" % version)
raise SystemExit(errno)
class ReleaseCheck(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
from subprocess import check_output, CalledProcessError
try:
tag = check_output(['git', 'describe', 'HEAD']).strip().decode('utf8')
except CalledProcessError:
tag = ''
version = read_version()
if tag != version:
print('Missing %s tag on release' % version)
raise SystemExit(1)
current_branch = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip().decode('utf8')
if current_branch != 'master':
print('Only release from master')
raise SystemExit(1)
print("Ok to distribute files")
# NB: _don't_ add namespace_packages to setup(), it'll break
# everything using imp.find_module
setup(
name='tri.named-struct',
version=read_version(),
description='tri.named-struct supplies a class that can be used like dictionaries (or via attribute access), but with a predefined set of possible key values',
long_description=readme + '\n\n' + history,
author='Johan Lübcke',
author_email='johan.lubcke@trioptima.com',
url='https://github.com/TriOptima/tri.named-struct',
packages=find_packages('lib'),
package_dir={'': 'lib'},
include_package_data=True,
install_requires=read_reqs('requirements.txt'),
license="BSD",
zip_safe=False,
keywords='tri.named-struct',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
cmdclass={'tag': Tag,
'release_check': ReleaseCheck},
)
|
{
"content_hash": "67372b21a55179f436aeee6cb2543cb7",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 163,
"avg_line_length": 31.510204081632654,
"alnum_prop": 0.5948834196891192,
"repo_name": "TriOptima/tri.named_struct",
"id": "9ec53bf980bb3dbee93b96269559a69ce6210b5f",
"size": "3135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1126"
},
{
"name": "Python",
"bytes": "11619"
}
],
"symlink_target": ""
}
|
import re
from setuptools import setup
try:
# Distributions have to delete *requirements.txt
with open('requirements.txt', 'r') as fp:
install_requires = [re.split(r'[<>=]', line)[0]
for line in fp if line.strip()]
except EnvironmentError:
print("No requirements.txt, not handling dependencies")
install_requires = []
with open('ironic_discoverd/__init__.py', 'rb') as fp:
exec(fp.read())
setup(
name = "ironic-discoverd",
version = __version__,
description = open('README.rst', 'r').readline().strip(),
author = "Dmitry Tantsur",
author_email = "dtantsur@redhat.com",
url = "https://pypi.python.org/pypi/ironic-discoverd",
packages = ['ironic_discoverd', 'ironic_discoverd.plugins',
'ironic_discoverd.test'],
install_requires = install_requires,
entry_points = {
'console_scripts': [
"ironic-discoverd = ironic_discoverd.main:main"
],
'ironic_discoverd.hooks': [
"scheduler = ironic_discoverd.plugins.standard:SchedulerHook",
"validate_interfaces = ironic_discoverd.plugins.standard:ValidateInterfacesHook",
"ramdisk_error = ironic_discoverd.plugins.standard:RamdiskErrorHook",
"example = ironic_discoverd.plugins.example:ExampleProcessingHook",
],
},
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: OpenStack',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
],
license = 'APL 2.0',
)
|
{
"content_hash": "80b8539da6cce8c23bf1e23843723871",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 93,
"avg_line_length": 33.91836734693877,
"alnum_prop": 0.6197352587244284,
"repo_name": "OpenDaisy/daisy-discoverd",
"id": "b91359d4e22cdd92a85d8b26f260f91f2471b3fe",
"size": "1662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Groff",
"bytes": "747"
},
{
"name": "Python",
"bytes": "119660"
}
],
"symlink_target": ""
}
|
import binascii
import json
import logging
from abc import ABCMeta, abstractmethod
from copy import deepcopy
from datetime import date, datetime, time
from decimal import Decimal
from distutils.util import strtobool
from typing import Any, Callable, Dict, Optional, Type
_logger = logging.getLogger(__name__) # type: ignore
def _to_date(varchar_value: Optional[str]) -> Optional[date]:
if varchar_value is None:
return None
return datetime.strptime(varchar_value, "%Y-%m-%d").date()
def _to_datetime(varchar_value: Optional[str]) -> Optional[datetime]:
if varchar_value is None:
return None
return datetime.strptime(varchar_value, "%Y-%m-%d %H:%M:%S.%f")
def _to_time(varchar_value: Optional[str]) -> Optional[time]:
if varchar_value is None:
return None
return datetime.strptime(varchar_value, "%H:%M:%S.%f").time()
def _to_float(varchar_value: Optional[str]) -> Optional[float]:
if varchar_value is None:
return None
return float(varchar_value)
def _to_int(varchar_value: Optional[str]) -> Optional[int]:
if varchar_value is None:
return None
return int(varchar_value)
def _to_decimal(varchar_value: Optional[str]) -> Optional[Decimal]:
if not varchar_value:
return None
return Decimal(varchar_value)
def _to_boolean(varchar_value: Optional[str]) -> Optional[bool]:
if not varchar_value:
return None
return bool(strtobool(varchar_value))
def _to_binary(varchar_value: Optional[str]) -> Optional[bytes]:
if varchar_value is None:
return None
return binascii.a2b_hex("".join(varchar_value.split(" ")))
def _to_json(varchar_value: Optional[str]) -> Optional[Any]:
if varchar_value is None:
return None
return json.loads(varchar_value)
def _to_default(varchar_value: Optional[str]) -> Optional[str]:
return varchar_value
_DEFAULT_CONVERTERS: Dict[str, Callable[[Optional[str]], Optional[Any]]] = {
"boolean": _to_boolean,
"tinyint": _to_int,
"smallint": _to_int,
"integer": _to_int,
"bigint": _to_int,
"float": _to_float,
"real": _to_float,
"double": _to_float,
"char": _to_default,
"varchar": _to_default,
"string": _to_default,
"timestamp": _to_datetime,
"date": _to_date,
"time": _to_time,
"varbinary": _to_binary,
"array": _to_default,
"map": _to_default,
"row": _to_default,
"decimal": _to_decimal,
"json": _to_json,
}
class Converter(metaclass=ABCMeta):
def __init__(
self,
mappings: Dict[str, Callable[[Optional[str]], Optional[Any]]],
default: Callable[[Optional[str]], Optional[Any]] = _to_default,
types: Optional[Dict[str, Type[Any]]] = None,
) -> None:
if mappings:
self._mappings = mappings
else:
self._mappings = dict()
self._default = default
if types:
self._types = types
else:
self._types = dict()
@property
def mappings(self) -> Dict[str, Callable[[Optional[str]], Optional[Any]]]:
return self._mappings
@property
def types(self) -> Dict[str, Type[Any]]:
return self._types
def get(self, type_: str) -> Callable[[Optional[str]], Optional[Any]]:
return self.mappings.get(type_, self._default)
def set(self, type_: str, converter: Callable[[Optional[str]], Optional[Any]]) -> None:
self.mappings[type_] = converter
def remove(self, type_: str) -> None:
self.mappings.pop(type_, None)
def update(self, mappings: Dict[str, Callable[[Optional[str]], Optional[Any]]]) -> None:
self.mappings.update(mappings)
@abstractmethod
def convert(self, type_: str, value: Optional[str]) -> Optional[Any]:
raise NotImplementedError # pragma: no cover
class DefaultTypeConverter(Converter):
def __init__(self) -> None:
super(DefaultTypeConverter, self).__init__(
mappings=deepcopy(_DEFAULT_CONVERTERS), default=_to_default
)
def convert(self, type_: str, value: Optional[str]) -> Optional[Any]:
converter = self.get(type_)
return converter(value)
|
{
"content_hash": "e68100c892daf5e6f301a9f90046d2e2",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 92,
"avg_line_length": 28.664383561643834,
"alnum_prop": 0.629868578255675,
"repo_name": "laughingman7743/PyAthena",
"id": "8d88b619dde0c97b7a7d9670090702ecef1d9515",
"size": "4209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyathena/converter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "5480"
},
{
"name": "Makefile",
"bytes": "269"
},
{
"name": "Python",
"bytes": "454798"
}
],
"symlink_target": ""
}
|
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
import errno
import json
import os
import re
import subprocess
import sys
try:
import configparser
except ImportError:
import ConfigParser as configparser
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = (
"Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND')."
)
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print(
"Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py)
)
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY[
"git"
] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except OSError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except OSError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(
r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
mo = re.search(
r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert (
cfg.versionfile_source is not None
), "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if "py2exe" in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(
target_versionfile, self._versioneer_generated_versions
)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg", file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except OSError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except OSError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(
" appending versionfile_source ('%s') to MANIFEST.in"
% cfg.versionfile_source
)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
{
"content_hash": "ed24763f3f0b264b26c0b20aa675e2da",
"timestamp": "",
"source": "github",
"line_count": 1879,
"max_line_length": 88,
"avg_line_length": 36.502927088877065,
"alnum_prop": 0.6122410299027541,
"repo_name": "jhamman/xray",
"id": "7b55d5d06cb0ab563ad94eee8fa39192843831f3",
"size": "68622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "versioneer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "2986"
},
{
"name": "Python",
"bytes": "833114"
}
],
"symlink_target": ""
}
|
"""Tests for qhbmlib.inference.vqt_loss"""
import functools
import cirq
import sympy
import tensorflow as tf
import tensorflow_quantum as tfq
from qhbmlib import inference
from qhbmlib import models
from tests import test_util
class VQTTest(tf.test.TestCase):
"""Tests for VQT."""
def setUp(self):
"""Initializes test objects."""
super().setUp()
self.num_qubits_list = [1, 2]
self.tf_random_seed = 5
self.tf_random_seed_alt = 6
self.tfp_seed = tf.constant([7, 8], tf.int32)
self.tfp_seed_alt = tf.constant([9, 10], tf.int32)
self.num_samples = int(1e7)
self.close_rtol = 3e-2 # tolerance depends on samples
self.zero_atol = 1e-3
self.not_zero_atol = 2e-3
@test_util.eager_mode_toggle
def test_self_vqt(self):
"""Confirms known value of the VQT loss of a model against itself."""
for num_qubits in self.num_qubits_list:
qubits = cirq.GridQubit.rect(1, num_qubits)
num_layers = 5
data_h, data_infer = test_util.get_random_hamiltonian_and_inference(
qubits,
num_layers,
f"data_objects_{num_qubits}",
self.num_samples,
ebm_seed=self.tfp_seed)
model_h, model_infer = test_util.get_random_hamiltonian_and_inference(
qubits,
num_layers,
f"hamiltonian_objects_{num_qubits}",
self.num_samples,
ebm_seed=self.tfp_seed)
# Set data equal to the model
data_h.set_weights(model_h.get_weights())
beta = 1.0 # Data and model are only the same if beta == 1
vqt = tf.function(inference.vqt)
# Trained loss is minus log partition of the data.
expected_loss = -1.0 * data_infer.e_inference.log_partition()
# Since this is the optimum, derivatives should all be zero.
expected_loss_derivative = [
tf.zeros_like(v) for v in model_h.trainable_variables
]
with tf.GradientTape() as tape:
actual_loss = vqt(model_infer, data_h, beta)
actual_loss_derivative = tape.gradient(actual_loss,
model_h.trainable_variables)
self.assertAllClose(actual_loss, expected_loss, self.close_rtol)
self.assertAllClose(
actual_loss_derivative, expected_loss_derivative, atol=self.zero_atol)
@test_util.eager_mode_toggle
def test_hamiltonian_vqt(self):
"""Tests derivatives of VQT with respect to both model and data."""
vqt_wrapper = tf.function(inference.vqt)
for num_qubits in self.num_qubits_list:
qubits = cirq.GridQubit.rect(1, num_qubits)
num_layers = 1
data_h, _ = test_util.get_random_hamiltonian_and_inference(
qubits,
num_layers,
f"data_objects_{num_qubits}",
self.num_samples,
initializer_seed=self.tf_random_seed,
ebm_seed=self.tfp_seed)
model_h, model_infer = test_util.get_random_hamiltonian_and_inference(
qubits,
num_layers,
f"hamiltonian_objects_{num_qubits}",
self.num_samples,
initializer_seed=self.tf_random_seed_alt,
ebm_seed=self.tfp_seed_alt)
beta = tf.random.uniform([], 0.1, 10, tf.float32, self.tf_random_seed)
with tf.GradientTape() as tape:
actual_loss = vqt_wrapper(model_infer, data_h, beta)
actual_gradient_model, actual_gradient_data = tape.gradient(
actual_loss,
(model_h.trainable_variables, data_h.trainable_variables))
expected_gradient_model, expected_gradient_data = test_util.approximate_gradient(
functools.partial(vqt_wrapper, model_infer, data_h, beta),
(model_h.trainable_variables, data_h.trainable_variables))
# Changing model parameters is working if finite difference derivatives
# are non-zero. Also confirms that model_h and data_h are different.
tf.nest.map_structure(
lambda x: self.assertAllGreater(tf.abs(x), self.not_zero_atol),
expected_gradient_model)
tf.nest.map_structure(
lambda x: self.assertAllGreater(tf.abs(x), self.not_zero_atol),
expected_gradient_data)
self.assertAllClose(
actual_gradient_model, expected_gradient_model, rtol=self.close_rtol)
self.assertAllClose(
actual_gradient_data, expected_gradient_data, rtol=self.close_rtol)
@test_util.eager_mode_toggle
def test_loss_value_x_rot(self):
"""Confirms correct values for a single qubit X rotation with H=Y.
# TODO(#159): remove colab link
See the colab notebook at the following link in for derivations:
https://colab.research.google.com/drive/14987JCMju_8AVvvVoojwe6hA7Nlw-Dhe?usp=sharing
Since each qubit is independent, the loss is the sum over the individual
qubit losses, and the gradients are the the per-qubit gradients.
"""
vqt = tf.function(inference.vqt)
for num_qubits in self.num_qubits_list:
# model definition
ebm_init = tf.keras.initializers.RandomUniform(
minval=-2.0, maxval=2.0, seed=self.tf_random_seed)
actual_energy = models.BernoulliEnergy(list(range(num_qubits)), ebm_init)
e_infer = inference.BernoulliEnergyInference(
actual_energy, self.num_samples, initial_seed=self.tfp_seed)
qubits = cirq.GridQubit.rect(1, num_qubits)
r_symbols = [sympy.Symbol(f"phi_{n}") for n in range(num_qubits)]
r_circuit = cirq.Circuit(
cirq.rx(r_s)(q) for r_s, q in zip(r_symbols, qubits))
qnn_init = tf.keras.initializers.RandomUniform(
minval=-1, maxval=1, seed=self.tf_random_seed)
actual_circuit = models.DirectQuantumCircuit(r_circuit, qnn_init)
q_infer = inference.AnalyticQuantumInference(actual_circuit)
qhbm_infer = inference.QHBM(e_infer, q_infer)
# TODO(#171): code around here seems like boilerplate.
model_h = qhbm_infer.modular_hamiltonian
# Generate remaining VQT arguments
test_h = tfq.convert_to_tensor(
[cirq.PauliSum.from_pauli_strings(cirq.Y(q) for q in qubits)])
test_beta = tf.random.uniform([], 0.01, 100.0, tf.float32,
self.tf_random_seed)
# Compute losses
# Bernoulli has only one tf.Variable
test_thetas = model_h.energy.trainable_variables[0]
# QNN has only one tf.Variable
test_phis = model_h.circuit.trainable_variables[0]
actual_expectation = qhbm_infer.expectation(test_h)[0]
expected_expectation = tf.reduce_sum(
tf.math.tanh(test_thetas) * tf.math.sin(test_phis))
self.assertAllClose(
actual_expectation, expected_expectation, rtol=self.close_rtol)
actual_entropy = qhbm_infer.e_inference.entropy()
expected_entropy = tf.reduce_sum(
-test_thetas * tf.math.tanh(test_thetas) +
tf.math.log(2 * tf.math.cosh(test_thetas)))
self.assertAllClose(
actual_entropy, expected_entropy, rtol=self.close_rtol)
with tf.GradientTape() as tape:
actual_loss = vqt(qhbm_infer, test_h, test_beta)
expected_loss = test_beta * expected_expectation - expected_entropy
self.assertAllClose(actual_loss, expected_loss, rtol=self.close_rtol)
actual_thetas_grads, actual_phis_grads = tape.gradient(
actual_loss, (test_thetas, test_phis))
expected_thetas_grads = (1 - tf.math.tanh(test_thetas)**2) * (
test_beta * tf.math.sin(test_phis) + test_thetas)
expected_phis_grads = test_beta * tf.math.tanh(test_thetas) * tf.math.cos(
test_phis)
self.assertAllClose(
actual_thetas_grads, expected_thetas_grads, rtol=self.close_rtol)
self.assertAllClose(
actual_phis_grads, expected_phis_grads, rtol=self.close_rtol)
if __name__ == "__main__":
print("Running vqt_loss_test.py ...")
tf.test.main()
|
{
"content_hash": "36f93ad4faf78ecda93dd3c039b27720",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 89,
"avg_line_length": 39.88775510204081,
"alnum_prop": 0.6506779227423893,
"repo_name": "google/qhbm-library",
"id": "3b95ae6c24ac8420022ec3acc2d1af8b518d7c34",
"size": "8510",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/inference/vqt_loss_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "332789"
},
{
"name": "Shell",
"bytes": "2991"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="opacity", parent_name="bar.unselected.marker", **kwargs
):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
{
"content_hash": "bdfd9226908b7283675237ae59025525",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 82,
"avg_line_length": 34.1875,
"alnum_prop": 0.5740402193784278,
"repo_name": "plotly/python-api",
"id": "5fe2476f48e212ae0989837a5b379b4feab29054",
"size": "547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/bar/unselected/marker/_opacity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
"""Module for the DocRef cog."""
import asyncio
import os
import pathlib
import re
import tempfile
import urllib.parse
from typing import Dict, Iterator, List, Match, Optional, Tuple, cast
import aiohttp
import discord
import sphinx.util.inventory as sphinx_inv
from redbot.core import Config, checks, commands, data_manager
from redbot.core.utils import chat_formatting as chatutils
from .errors import (
AlreadyUpToDate,
Forbidden,
HTTPError,
InternalError,
InvNotAvailable,
NoMoreRefs,
NotFound,
)
from .types import (
FilterFunc,
InvData,
InvMetaData,
MatchesDict,
NodeRef,
RawInvData,
RawInvMetaData,
RefDict,
RefSpec,
)
UNIQUE_ID = 0x178AC710
class DocRef(commands.Cog):
"""Search for references on documentation webpages.
I need to be able to embed links for this cog to be useful!
"""
def __init__(self):
super().__init__()
self.conf: Config = Config.get_conf(
self, identifier=UNIQUE_ID, force_registration=True
)
self.conf.register_global(sites={}, inv_metadata={})
self.conf.register_guild(sites={})
self.invs_data: Dict[str, InvData] = {}
self.invs_dir: pathlib.Path = data_manager.cog_data_path(self) / "invs"
self.invs_dir.mkdir(parents=True, exist_ok=True)
self.session: aiohttp.ClientSession = aiohttp.ClientSession()
@commands.command(aliases=["ref", "rtd", "rtfm"])
async def docref(self, ctx: commands.Context, sitename: str, *, node_ref: NodeRef):
"""Search for a reference in documentation webpages.
This will display a list hyperlinks to possible matches for the
provided node reference.
`<sitename>` is the name for the documentation webpage. This is set
when the webpage is added with `[p]addsite`.
`<node_ref>` is a reference to a sphinx node in reST syntax, however
most of the syntactic operators can be omitted for a more vague
reference.
For example, all of these commands will return the same result:
``[p]docref pydocs :py:class:`int`\u200B``
``[p]docref pydocs :class:`int`\u200B``
``[p]docref pydocs :class:int``
``[p]docref pydocs `int`\u200B``
``[p]docref pydocs int``
"""
# First we get the base URL and inventory data
try:
url, inv_data = await self.get_inv_data(sitename, ctx.guild)
except InvNotAvailable:
await ctx.send(f'Couldn\'t find the site name "{sitename}".')
return
except NotFound:
await ctx.send(
f'It appears as though the "{sitename}" site\'s URL is now a 404.'
)
return
# Now we need to filter the data according to our node_ref
filter_func: FilterFunc = self._get_filter_func(node_ref)
reftypes: Iterator[str] = filter(filter_func, inv_data.keys())
exact_matches: MatchesDict = {}
partial_matches: MatchesDict = {}
# If the reftype is bogus, the filter result will be empty
# Thus, we'll never enter the loop
valid_reftype = False
for reftype in reftypes:
valid_reftype = True
ref_dict = inv_data[reftype]
tup = self.get_matches(node_ref.refname, ref_dict)
matches: List[RefSpec] = tup[0]
exact: bool = tup[1]
if not matches:
continue
if exact is True:
assert matches # just double check our subroutine didn't do a poopoo
exact_matches[reftype] = matches
elif exact_matches:
# we've already found closer matches than these, discard
continue
else:
partial_matches[reftype] = matches
if not valid_reftype:
await ctx.send(
f"Couldn't find any references with the `:{node_ref.reftype}:` "
f"directive."
)
return
matches: MatchesDict = exact_matches or partial_matches
if not matches:
await ctx.send(
f"Couldn't find any references matching ``{node_ref}\u200B``."
)
return
metadata = await self.get_inv_metadata(url)
embed_list = self._new_match_embed(metadata, matches, bool(exact_matches))
for embed in embed_list:
await ctx.send(embed=embed)
@commands.command()
@checks.admin_or_permissions(administrator=True)
async def addsite(self, ctx: commands.Context, sitename: str, url: str, scope=None):
"""Add a new documentation site.
`<url>` must be resolved to an actual docs webpage, and not a redirect
URL. For example, `https://docs.python.org` is invalid, however the
URL it redirects to, `https://docs.python.org/3/`, is valid.
`<scope>` is an owner-only argument and specifies where this site can
be accessed from. Defaults to `server` for everyone except the bot
owner, whose scope defaults to `global`.
"""
if not url.startswith("https://"):
await ctx.send("Must be an HTTPS URL.")
return
if not url.endswith("/"):
url += "/"
is_owner = await ctx.bot.is_owner(ctx.author)
if scope is not None and not is_owner:
await ctx.send("Only bot owners can specify the scope.")
return
elif scope is None:
if is_owner:
scope = "global"
else:
scope = "guild"
scope = scope.lower()
if scope in ("server", "guild"):
if ctx.guild is None:
await ctx.send(f"Can't add to {scope} scope from DM.")
return
conf_group = self.conf.guild(ctx.guild).sites
elif scope == "global":
conf_group = self.conf.sites
else:
await ctx.send(f'Unknown scope "{scope}".')
return
try:
async with ctx.typing():
await self.update_inv(url)
except NotFound:
await ctx.send("Couldn't find an inventory from that URL.")
return
except HTTPError as exc:
await ctx.send(
f"Something went wrong whilst trying to download the "
f"inventory file. HTTP response code {exc.code}."
)
return
else:
existing_url = await conf_group.get_raw(sitename, default=None)
if existing_url is not None:
await self._decref(existing_url)
await conf_group.set_raw(sitename, value=url)
await self._incref(url)
await ctx.tick()
@commands.command(aliases=["removesite"])
@checks.admin_or_permissions(administrator=True)
async def delsite(self, ctx: commands.Context, sitename: str):
"""Remove a documentation site.
This command will remove just one site, and if there are multiple
sites with the same name, it will remove the most local one.
Only bot owners can delete global sites.
"""
is_owner = await ctx.bot.is_owner(ctx.author)
try:
await self.remove_site(sitename, ctx.guild, is_owner)
except InvNotAvailable:
await ctx.send(f"Couldn't find a site by the name `{sitename}`.")
except Forbidden as exc:
await ctx.send(exc.args[0])
else:
await ctx.tick()
@commands.command()
async def docsites(self, ctx: commands.Context):
"""List all installed and available documentation websites."""
sites = await self.conf.sites()
if ctx.guild is not None:
sites.update(await self.conf.guild(ctx.guild).sites())
lines: List[str] = []
for name, url in sites.items():
try:
metadata = await self.get_inv_metadata(url)
except InvNotAvailable:
continue
lines.append(f"`{name}` - [{metadata}]({url})")
if not lines:
await ctx.send("No sites are available.")
description = "\n".join(lines)
for page in chatutils.pagify(description, page_length=2048):
await ctx.send(embed=discord.Embed(description=page))
@commands.command()
@checks.is_owner()
async def forceupdate(self, ctx: commands.Context, sitename: str):
"""Force a documentation webpage to be updated.
Updates are checked for every time you use `[p]docref`. However,
the inventory cache isn't actually update unless we have an old
version number.
This command will force the site to be updated irrespective of the
version number.
"""
url: str = await self.get_url(sitename)
if url is None:
await ctx.send(f'Couldn\'t find the site name "{sitename}".')
return
try:
async with ctx.typing():
await self.update_inv(url, force=True)
except NotFound:
await ctx.send(
f'It appears as though the "{sitename}" site\'s URL is now a 404.'
)
else:
await ctx.tick()
@staticmethod
def get_matches(refname: str, ref_dict: RefDict) -> Tuple[List[RefSpec], bool]:
"""Get a list of matching references.
First this function will look for exact matches (for which there will
only be one), and if it can't find any, it will look for references
whose name ends with the given ``refname``.
Arguments
---------
refname
The name of the reference being looked for.
ref_dict
A mapping from references to `RefSpec` objects.
Returns
-------
Tuple[List[RefSpec], bool]
The `bool` will be ``True`` if the matches returned are exact.
"""
# first look for an exact match
if refname in ref_dict:
return [ref_dict[refname]], True
# look for references ending with the refname
return (
[
ref_spec
for cur_refname, ref_spec in ref_dict.items()
if cur_refname.endswith(refname)
],
False,
)
async def get_inv_data(
self, site: str, guild: Optional[discord.Guild] = None
) -> Tuple[str, InvData]:
"""Get data for an inventory by its user-defined name and scope.
Also updates the locally cached inventory if necessary.
Returns
-------
Tuple[str, InvData]
A tuple in the form (url, data).
"""
url = await self.get_url(site, guild)
if url is None:
raise InvNotAvailable()
await self.update_inv(url)
return url, self.invs_data[url]
async def get_url(
self, sitename: str, guild: Optional[discord.Guild] = None
) -> Optional[str]:
"""Get a URL by its sitename and scope.
Arguments
---------
sitename : str
The user-defined site name.
guild : Optional[discord.Guild]
The guild from who's data the URL is being retreived.
Returns
-------
Optional[str]
The URL for the requested site. ``None`` if no site is found.
"""
if guild is not None:
url = await self.conf.guild(guild).sites.get_raw(sitename, default=None)
if url is not None:
return url
return await self.conf.sites.get_raw(sitename, default=None)
async def remove_site(
self, sitename: str, guild: Optional[discord.Guild], is_owner: bool
) -> None:
"""Remove a site from the given scope.
Only removes one site at a time. If there is a site with the same name
in both the guild and global scope, only the guild one will be
removed.
Arguments
---------
sitename
The user-defined site name.
guild
The guild from who's data is being mutated.
is_owner
Whether or not the user doing the action is the bot owner.
Raises
------
InvNotAvailable
If no site with that name is available in the given scope.
Forbidden
If the user does not have the right privelages to remove the site.
"""
url = await self.get_url(sitename, guild)
if url is None:
raise InvNotAvailable()
if guild is not None:
sites = await self.conf.guild(guild).sites()
if sitename in sites:
del sites[sitename]
await self.conf.guild(guild).sites.set(sites)
await self._decref(url)
return
if not is_owner:
raise Forbidden("Only bot owners can delete global sites.")
async with self.conf.sites() as sites:
del sites[sitename]
await self._decref(url)
async def update_inv(self, url: str, *, force: bool = False) -> InvData:
"""Update a locally cached inventory.
Unless ``force`` is ``True``, this won't update the cache unless the
metadata for the inventory does not match.
Arguments
---------
url : str
The URL for the docs website. This is the path to the webpage, and
not to the inventory file.
force : bool
Whether or not we should force the update. Defaults to ``False``.
Returns
-------
InvData
The up-to-date data for the inventory.
"""
try:
data = await self.get_inv_from_url(url, force_update=force)
except AlreadyUpToDate:
try:
data = self.invs_data[url]
except KeyError:
path = self._get_inv_path(url)
data = self.load_inv_file(path, url)
self.invs_data[url] = data
else:
self.invs_data[url] = data
return data
def _get_inv_path(self, url: str) -> pathlib.Path:
return self.invs_dir / f"{safe_filename(url)}.inv"
async def get_inv_from_url(
self, url: str, *, force_update: bool = False
) -> InvData:
"""Gets inventory data from its URL.
Arguments
---------
url : str
The URL for the docs website.
force_update : bool
Whether or not the inventory should be force updated. Defaults to
``False``.
Returns
-------
InvData
The data for the requested inventory.
Raises
------
AlreadyUpToDate
If the inventory was already up to date, and ``force_update`` was
``False``.
"""
inv_path = await self.download_inv_file(url, force_update=force_update)
return self.load_inv_file(inv_path, url)
def load_inv_file(self, file_path: pathlib.Path, url: str) -> InvData:
"""Load an inventory file from its filepath.
Returns
-------
InvData
The data from the inventory file.
"""
inv_data = self._load_inv_file_raw(file_path, url)
return self._format_raw_inv_data(inv_data)
@staticmethod
def _load_inv_file_raw(file_path: pathlib.Path, url: str) -> RawInvData:
with file_path.open("rb") as stream:
inv_data = sphinx_inv.InventoryFile.load(stream, url, urllib.parse.urljoin)
return inv_data
async def download_inv_file(
self, url: str, *, force_update: bool = False
) -> pathlib.Path:
"""Download the inventory file from a URL.
Arguments
---------
url : str
The URL for the docs website. This is the path to the webpage, and
not to the inventory file.
force_update : bool
Whether or not the data should be forcibly updated. Defaults to
``False``.
Raises
------
AlreadyUpToDate
If the local version matches that of the remote, and
``force_update`` is False.
Returns
-------
pathlib.Path
The path to the local inventory file.
"""
inv_path = self._get_inv_path(url)
inv_url = urllib.parse.urljoin(url, "objects.inv")
async with self.session.get(inv_url) as resp:
self._check_response(resp)
# read header comments to get version
header_lines: List[bytes] = []
idx = 0
async for line in resp.content:
header_lines.append(cast(bytes, line))
idx += 1
if idx > 2:
break
projname = header_lines[1].rstrip()[11:].decode()
version = header_lines[2].rstrip()[11:].decode()
metadata = InvMetaData(projname, version)
if not force_update and await self._inv_metadata_matches(url, metadata):
raise AlreadyUpToDate()
fd, filename = tempfile.mkstemp()
with open(fd, "wb") as stream:
async with self.session.get(inv_url) as resp:
chunk = await resp.content.read(1024)
while chunk:
stream.write(chunk)
chunk = await resp.content.read(1024)
os.replace(filename, inv_path)
await self.set_inv_metadata(url, metadata)
return inv_path
@staticmethod
def _check_response(resp: aiohttp.ClientResponse) -> None:
"""Checks a response to an HTTP request and raises the appropriate error.
Raises
------
NotFound
If the response code is 404.
HTTPError
If there was an unexpected response code.
"""
if resp.status == 200:
return
elif resp.status == 404:
error_cls = NotFound
else:
error_cls = HTTPError
raise error_cls(resp.status, resp.reason, resp)
async def _inv_metadata_matches(self, url: str, metadata: InvMetaData) -> bool:
try:
existing_metadata: InvMetaData = await self.get_inv_metadata(url)
except InvNotAvailable:
return False
else:
return metadata == existing_metadata
async def get_inv_metadata(self, url: str) -> InvMetaData:
"""Get metadata for an inventory.
Arguments
---------
url : str
The URL for the docs website.
Returns
-------
InvMetaData
The metadata for the inventory.
Raises
------
InvNotAvailable
If there is no inventory matching that URL.
"""
try:
raw_metadata: RawInvMetaData = await self.conf.inv_metadata.get_raw(url)
except KeyError:
raise InvNotAvailable
else:
return InvMetaData(**raw_metadata)
async def set_inv_metadata(self, url: str, metadata: InvMetaData) -> None:
"""Set metadata for an inventory.
Arguments
---------
url : str
The URL for the docs website.
metadata : InvMetaData
The inventory's metadata.
"""
await self.conf.inv_metadata.set_raw(url, value=metadata.to_dict())
@staticmethod
def _format_raw_inv_data(inv_data: RawInvData) -> InvData:
ret: InvData = {}
for ref_type, refs_dict in inv_data.items():
new_refs_dict: RefDict = {}
for ref_name, raw_ref_spec in refs_dict.items():
ref_url: str = raw_ref_spec[2]
display_name: str = raw_ref_spec[3]
if display_name == "-":
display_name = ref_name
else:
display_name = f"{ref_name} - {display_name}"
new_refs_dict[ref_name] = RefSpec(ref_url, display_name)
ret[ref_type] = new_refs_dict
return ret
@staticmethod
def _new_match_embed(
metadata: InvMetaData, matches: MatchesDict, exact: bool
) -> List[discord.Embed]:
count = 0
match_type = "exact" if exact else "possible"
lines: List[str] = []
for reftype, refspec_list in matches.items():
lines.append(chatutils.bold(reftype))
for refspec in refspec_list:
count += 1
# The zero-width space is necessary to make sure discord doesn't remove
# leading spaces at the start of an embed.
lines.append(
"\u200b" + (" " * 4) + f"[{refspec.display_name}]({refspec.url})"
)
plural = "es" if count > 1 else ""
description = "\n".join(lines)
ret: List[discord.Embed] = []
for page in chatutils.pagify(description, page_length=2048):
# my little hack to make sure pagify doesn't strip the initial indent
if not page.startswith("**"):
page = " " * 4 + page
ret.append(discord.Embed(description=page))
ret[0].title = f"Found {count} {match_type} match{plural}."
ret[-1].set_footer(text=f"{metadata.projname} {metadata.version}")
return ret
@staticmethod
def _get_filter_func(node_ref: NodeRef) -> FilterFunc:
if node_ref.role == "any":
if node_ref.lang is not None:
# Some weirdo did a :lang:any: search
def _filter(reftype: str) -> bool:
lang_and_role = reftype.split(":")
# This should return a sequence in the form [lang, role]
# But we should check and make sure just in case
if len(lang_and_role) != 2:
raise InternalError(
f"Unexpected reftype in inventory data {reftype}"
)
lang = lang_and_role[0]
return lang == node_ref.lang
else:
# If the role is just :any: we don't filter at all
def _filter(_: str) -> bool:
return True
elif node_ref.role and node_ref.lang:
def _filter(reftype: str) -> bool:
return reftype == f"{node_ref.lang}:{node_ref.role}"
elif node_ref.role and not node_ref.lang:
def _filter(reftype: str) -> bool:
lang_and_role = reftype.split(":")
if len(lang_and_role) != 2:
raise InternalError(
f"Unexpected reftype in inventory data {reftype}"
)
role = lang_and_role[1]
return node_ref.role == role
else:
# We shouldn't have got here
raise InternalError(f"Unexpected NodeRef {node_ref!r}")
return _filter
async def _decref(self, url: str) -> None:
metadata = await self.get_inv_metadata(url)
try:
metadata.dec_refcount()
except NoMoreRefs:
await self._destroy_inv(url)
else:
await self.set_inv_metadata(url, metadata)
async def _incref(self, url: str) -> None:
metadata = await self.get_inv_metadata(url)
metadata.inc_refcount()
await self.set_inv_metadata(url, metadata)
async def _destroy_inv(self, url: str) -> None:
async with self.conf.inv_metadata() as inv_metadata:
del inv_metadata[url]
try:
del self.invs_data[url]
except KeyError:
pass
inv_file = self._get_inv_path(url)
if inv_file.exists():
inv_file.unlink()
def cog_unload(self) -> None:
asyncio.create_task(self.session.close())
_INVALID_CHARSET = re.compile("[^A-z0-9_]")
def _replace_invalid_char(match: Match[str]) -> str:
return str(ord(match[0]))
def safe_filename(instr: str) -> str:
"""Generates a filename-friendly string.
Useful for creating filenames unique to URLs.
"""
return "_" + _INVALID_CHARSET.sub(_replace_invalid_char, instr)
|
{
"content_hash": "68acfa32e87c27c0084ecf8e35ebc45e",
"timestamp": "",
"source": "github",
"line_count": 747,
"max_line_length": 88,
"avg_line_length": 32.57429718875502,
"alnum_prop": 0.5570624255126783,
"repo_name": "Tobotimus/Tobo-Cogs",
"id": "68c262ac27ae61c740657c0dd0c4d567e642857d",
"size": "24333",
"binary": false,
"copies": "1",
"ref": "refs/heads/V3",
"path": "docref/docref.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "171"
},
{
"name": "Python",
"bytes": "152578"
},
{
"name": "TSQL",
"bytes": "167"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2016 Keith Sterling
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from programy.dialog import Sentence
class Match(object):
WORD = 0
TOPIC = 2
THAT = 3
def __init__(self, type, node, word):
self._match_type = type
self._matched_node = node
self._matched_words = []
if word is not None:
self.add_word(word)
def add_word(self, word):
self._matched_words.append(word)
@property
def match_type(self):
return self._match_type
@property
def matched_node(self):
return self._matched_node
@property
def matched_words(self):
return self._matched_words
def joined_words(self, join_char=" "):
return join_char.join(self.matched_words)
@staticmethod
def type_to_string(type):
if type == Match.WORD:
return "Word"
elif type == Match.TOPIC:
return "Topic"
elif type == Match.THAT:
return "That"
else:
return "Unknown"
def to_string(self):
return "Match=(%s) Node=(%s) Matched=(%s)"%(Match.type_to_string(self._match_type), self._matched_node.to_string(verbose=False), self.joined_words())
class MatchContext(object):
MAX_SEARCH_DEPTH = 10000
def __init__(self, max_search_depth=MAX_SEARCH_DEPTH):
self._matched_nodes = []
self._template_node = None
self._max_search_depth = max_search_depth
@property
def max_search_depth(self):
return self._max_search_depth
def add_match(self, match):
self._matched_nodes.append(match)
def pop_match(self):
if len(self._matched_nodes) > 0:
self._matched_nodes.pop()
def pop_matches(self, matches_add):
for x in range(0, matches_add):
self.pop_match()
def set_template(self, template):
self._template_node = template
@property
def matched_nodes(self):
return self._matched_nodes
def template_node(self):
return self._template_node
def matched(self):
if self._template_node is not None:
return True
else:
return False
def _get_indexed_match_by_type(self, index, type):
count = 1
for match in self._matched_nodes:
if match._match_type == type and \
( match._matched_node.is_wildcard() or
match._matched_node.is_set() or
match._matched_node.is_bot()):
if count == index:
return match.joined_words()
count += 1
return None
def star(self, index):
return self._get_indexed_match_by_type(index, Match.WORD)
def topicstar(self, index):
return self._get_indexed_match_by_type(index, Match.TOPIC)
def thatstar(self, index):
return self._get_indexed_match_by_type(index, Match.THAT)
def list_matches(self, output_func=logging.info):
output_func("Matches...")
count = 1
for match in self._matched_nodes:
output_func("\t%d - %s"%(count, match.to_string()))
count += 1
if self.matched() is True:
output_func("\tT - %s"%(self._template_node.to_string()))
else:
output_func("\tT - None")
|
{
"content_hash": "1e7aab949e78d9b6693cd118fa517b78",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 157,
"avg_line_length": 31.201438848920862,
"alnum_prop": 0.6241641687802628,
"repo_name": "dkamotsky/program-y",
"id": "20e630a9deac617f91d83632772b0494bcc19387",
"size": "4337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/programy/parser/pattern/matcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "937"
},
{
"name": "HTML",
"bytes": "1583"
},
{
"name": "Python",
"bytes": "1131157"
},
{
"name": "Shell",
"bytes": "3481"
}
],
"symlink_target": ""
}
|
import functools
import operator
from nova import block_device
from nova.i18n import _
from nova.i18n import _LI
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import excutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.volume import encryptors
LOG = logging.getLogger(__name__)
class _NotTransformable(Exception):
pass
class _InvalidType(_NotTransformable):
pass
class _NoLegacy(Exception):
pass
def update_db(method):
@functools.wraps(method)
def wrapped(obj, context, *args, **kwargs):
ret_val = method(obj, context, *args, **kwargs)
obj.save(context)
return ret_val
return wrapped
class DriverBlockDevice(dict):
"""A dict subclass that represents block devices used by the virt layer.
Uses block device objects internally to do the database access.
_fields and _legacy_fields class attributes present a set of fields that
are expected on a certain DriverBlockDevice type. We may have more legacy
versions in the future.
If an attribute access is attempted for a name that is found in the
_proxy_as_attr set, it will be proxied to the underlying object. This
allows us to access stuff that is not part of the data model that all
drivers understand.
The save() method allows us to update the database using the underlying
object. _update_on_save class attribute dictionary keeps the following
mapping:
{'object field name': 'driver dict field name (or None if same)'}
These fields will be updated on the internal object, from the values in the
dict, before the actual database update is done.
"""
_fields = set()
_legacy_fields = set()
_proxy_as_attr = set()
_update_on_save = {'disk_bus': None,
'device_name': None,
'device_type': None}
def __init__(self, bdm):
# TODO(ndipanov): Remove this check when we have all the rpc methods
# use objects for block devices.
if isinstance(bdm, obj_base.NovaObject):
self.__dict__['_bdm_obj'] = bdm
else:
self.__dict__['_bdm_obj'] = objects.BlockDeviceMapping()
self._bdm_obj.update(block_device.BlockDeviceDict(bdm))
self._bdm_obj.obj_reset_changes()
if self._bdm_obj.no_device:
raise _NotTransformable()
self.update(dict((field, None)
for field in self._fields))
self._transform()
def __getattr__(self, name):
if name in self._proxy_as_attr:
return getattr(self._bdm_obj, name)
else:
super(DriverBlockDevice, self).__getattr__(name)
def __setattr__(self, name, value):
if name in self._proxy_as_attr:
return setattr(self._bdm_obj, name, value)
else:
super(DriverBlockDevice, self).__setattr__(name, value)
def _transform(self):
"""Transform bdm to the format that is passed to drivers."""
raise NotImplementedError()
def legacy(self):
"""Basic legacy transformation.
Basic method will just drop the fields that are not in
_legacy_fields set. Override this in subclass if needed.
"""
return dict((key, self.get(key)) for key in self._legacy_fields)
def attach(self, **kwargs):
"""Make the device available to be used by VMs.
To be overridden in subclasses with the connecting logic for
the type of device the subclass represents.
"""
raise NotImplementedError()
def save(self, context=None):
for attr_name, key_name in self._update_on_save.iteritems():
setattr(self._bdm_obj, attr_name, self[key_name or attr_name])
if context:
self._bdm_obj.save(context)
else:
self._bdm_obj.save()
class DriverSwapBlockDevice(DriverBlockDevice):
_fields = set(['device_name', 'swap_size', 'disk_bus'])
_legacy_fields = _fields - set(['disk_bus'])
_update_on_save = {'disk_bus': None,
'device_name': None}
def _transform(self):
if not block_device.new_format_is_swap(self._bdm_obj):
raise _InvalidType
self.update({
'device_name': self._bdm_obj.device_name,
'swap_size': self._bdm_obj.volume_size or 0,
'disk_bus': self._bdm_obj.disk_bus
})
class DriverEphemeralBlockDevice(DriverBlockDevice):
_new_only_fields = set(['disk_bus', 'device_type', 'guest_format'])
_fields = set(['device_name', 'size']) | _new_only_fields
_legacy_fields = (_fields - _new_only_fields |
set(['num', 'virtual_name']))
def _transform(self):
if not block_device.new_format_is_ephemeral(self._bdm_obj):
raise _InvalidType
self.update({
'device_name': self._bdm_obj.device_name,
'size': self._bdm_obj.volume_size or 0,
'disk_bus': self._bdm_obj.disk_bus,
'device_type': self._bdm_obj.device_type,
'guest_format': self._bdm_obj.guest_format
})
def legacy(self, num=0):
legacy_bdm = super(DriverEphemeralBlockDevice, self).legacy()
legacy_bdm['num'] = num
legacy_bdm['virtual_name'] = 'ephemeral' + str(num)
return legacy_bdm
class DriverVolumeBlockDevice(DriverBlockDevice):
_legacy_fields = set(['connection_info', 'mount_device',
'delete_on_termination'])
_new_fields = set(['guest_format', 'device_type',
'disk_bus', 'boot_index'])
_fields = _legacy_fields | _new_fields
_valid_source = 'volume'
_valid_destination = 'volume'
_proxy_as_attr = set(['volume_size', 'volume_id'])
_update_on_save = {'disk_bus': None,
'device_name': 'mount_device',
'device_type': None}
def _transform(self):
if (not self._bdm_obj.source_type == self._valid_source
or not self._bdm_obj.destination_type ==
self._valid_destination):
raise _InvalidType
self.update(
dict((k, v) for k, v in self._bdm_obj.iteritems()
if k in self._new_fields | set(['delete_on_termination']))
)
self['mount_device'] = self._bdm_obj.device_name
try:
self['connection_info'] = jsonutils.loads(
self._bdm_obj.connection_info)
except TypeError:
self['connection_info'] = None
def _preserve_multipath_id(self, connection_info):
if self['connection_info'] and 'data' in self['connection_info']:
if 'multipath_id' in self['connection_info']['data']:
connection_info['data']['multipath_id'] =\
self['connection_info']['data']['multipath_id']
LOG.info(_LI('preserve multipath_id %s'),
connection_info['data']['multipath_id'])
@update_db
def attach(self, context, instance, volume_api, virt_driver,
do_check_attach=True, do_driver_attach=False):
volume = volume_api.get(context, self.volume_id)
if do_check_attach:
volume_api.check_attach(context, volume, instance=instance)
volume_id = volume['id']
context = context.elevated()
connector = virt_driver.get_volume_connector(instance)
connection_info = volume_api.initialize_connection(context,
volume_id,
connector)
if 'serial' not in connection_info:
connection_info['serial'] = self.volume_id
self._preserve_multipath_id(connection_info)
# If do_driver_attach is False, we will attach a volume to an instance
# at boot time. So actual attach is done by instance creation code.
if do_driver_attach:
encryption = encryptors.get_encryption_metadata(
context, volume_api, volume_id, connection_info)
try:
virt_driver.attach_volume(
context, connection_info, instance,
self['mount_device'], disk_bus=self['disk_bus'],
device_type=self['device_type'], encryption=encryption)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
LOG.exception(_("Driver failed to attach volume "
"%(volume_id)s at %(mountpoint)s"),
{'volume_id': volume_id,
'mountpoint': self['mount_device']},
context=context, instance=instance)
volume_api.terminate_connection(context, volume_id,
connector)
self['connection_info'] = connection_info
mode = 'rw'
if 'data' in connection_info:
mode = connection_info['data'].get('access_mode', 'rw')
if volume['attach_status'] == "detached":
volume_api.attach(context, volume_id, instance['uuid'],
self['mount_device'], mode=mode)
@update_db
def refresh_connection_info(self, context, instance,
volume_api, virt_driver):
# NOTE (ndipanov): A no-op if there is no connection info already
if not self['connection_info']:
return
connector = virt_driver.get_volume_connector(instance)
connection_info = volume_api.initialize_connection(context,
self.volume_id,
connector)
if 'serial' not in connection_info:
connection_info['serial'] = self.volume_id
self._preserve_multipath_id(connection_info)
self['connection_info'] = connection_info
def save(self, context=None):
# NOTE(ndipanov): we might want to generalize this by adding it to the
# _update_on_save and adding a transformation function.
try:
self._bdm_obj.connection_info = jsonutils.dumps(
self.get('connection_info'))
except TypeError:
pass
super(DriverVolumeBlockDevice, self).save(context)
class DriverSnapshotBlockDevice(DriverVolumeBlockDevice):
_valid_source = 'snapshot'
_proxy_as_attr = set(['volume_size', 'volume_id', 'snapshot_id'])
def attach(self, context, instance, volume_api,
virt_driver, wait_func=None, do_check_attach=True):
if not self.volume_id:
snapshot = volume_api.get_snapshot(context,
self.snapshot_id)
vol = volume_api.create(context, self.volume_size,
'', '', snapshot)
if wait_func:
wait_func(context, vol['id'])
self.volume_id = vol['id']
# Call the volume attach now
super(DriverSnapshotBlockDevice, self).attach(
context, instance, volume_api, virt_driver,
do_check_attach=do_check_attach)
class DriverImageBlockDevice(DriverVolumeBlockDevice):
_valid_source = 'image'
_proxy_as_attr = set(['volume_size', 'volume_id', 'image_id'])
def attach(self, context, instance, volume_api,
virt_driver, wait_func=None, do_check_attach=True):
if not self.volume_id:
vol = volume_api.create(context, self.volume_size,
'', '', image_id=self.image_id)
if wait_func:
wait_func(context, vol['id'])
self.volume_id = vol['id']
super(DriverImageBlockDevice, self).attach(
context, instance, volume_api, virt_driver,
do_check_attach=do_check_attach)
class DriverBlankBlockDevice(DriverVolumeBlockDevice):
_valid_source = 'blank'
_proxy_as_attr = set(['volume_size', 'volume_id', 'image_id'])
def attach(self, context, instance, volume_api,
virt_driver, wait_func=None, do_check_attach=True):
if not self.volume_id:
vol_name = instance.uuid + '-blank-vol'
vol = volume_api.create(context, self.volume_size, vol_name, '')
if wait_func:
wait_func(context, vol['id'])
self.volume_id = vol['id']
super(DriverBlankBlockDevice, self).attach(
context, instance, volume_api, virt_driver,
do_check_attach=do_check_attach)
def _convert_block_devices(device_type, block_device_mapping):
def _is_transformable(bdm):
try:
device_type(bdm)
except _NotTransformable:
return False
return True
return [device_type(bdm)
for bdm in block_device_mapping
if _is_transformable(bdm)]
convert_swap = functools.partial(_convert_block_devices,
DriverSwapBlockDevice)
convert_ephemerals = functools.partial(_convert_block_devices,
DriverEphemeralBlockDevice)
convert_volumes = functools.partial(_convert_block_devices,
DriverVolumeBlockDevice)
convert_snapshots = functools.partial(_convert_block_devices,
DriverSnapshotBlockDevice)
convert_images = functools.partial(_convert_block_devices,
DriverImageBlockDevice)
convert_blanks = functools.partial(_convert_block_devices,
DriverBlankBlockDevice)
def attach_block_devices(block_device_mapping, *attach_args, **attach_kwargs):
def _log_and_attach(bdm):
context = attach_args[0]
instance = attach_args[1]
LOG.audit(_('Booting with volume %(volume_id)s at %(mountpoint)s'),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
bdm.attach(*attach_args, **attach_kwargs)
map(_log_and_attach, block_device_mapping)
return block_device_mapping
def refresh_conn_infos(block_device_mapping, *refresh_args, **refresh_kwargs):
map(operator.methodcaller('refresh_connection_info',
*refresh_args, **refresh_kwargs),
block_device_mapping)
return block_device_mapping
def legacy_block_devices(block_device_mapping):
def _has_legacy(bdm):
try:
bdm.legacy()
except _NoLegacy:
return False
return True
bdms = [bdm.legacy()
for bdm in block_device_mapping
if _has_legacy(bdm)]
# Re-enumerate ephemeral devices
if all(isinstance(bdm, DriverEphemeralBlockDevice)
for bdm in block_device_mapping):
for i, dev in enumerate(bdms):
dev['virtual_name'] = dev['virtual_name'][:-1] + str(i)
dev['num'] = i
return bdms
def get_swap(transformed_list):
"""Get the swap device out of the list context.
The block_device_info needs swap to be a single device,
not a list - otherwise this is a no-op.
"""
if not all(isinstance(device, DriverSwapBlockDevice) or
'swap_size' in device
for device in transformed_list):
return transformed_list
try:
return transformed_list.pop()
except IndexError:
return None
_IMPLEMENTED_CLASSES = (DriverSwapBlockDevice, DriverEphemeralBlockDevice,
DriverVolumeBlockDevice, DriverSnapshotBlockDevice,
DriverImageBlockDevice, DriverBlankBlockDevice)
def is_implemented(bdm):
for cls in _IMPLEMENTED_CLASSES:
try:
cls(bdm)
return True
except _NotTransformable:
pass
return False
|
{
"content_hash": "cbd662cbd0ab8d390ea5e23380a922b8",
"timestamp": "",
"source": "github",
"line_count": 454,
"max_line_length": 79,
"avg_line_length": 35.56167400881057,
"alnum_prop": 0.5822855373180551,
"repo_name": "srajag/nova",
"id": "1f8adbe0e6a555ae0880ebad456a879482e4fd8d",
"size": "16743",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/virt/block_device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
'''
validators.py simple validation and cleaning of model data structures
Copyright (c) 2017 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from som.logger import bot
def validate_model(fields):
'''remove_nones will remove any None fields from a passed dictionary
'''
keepers = dict()
for entry in fields:
if entry['value'] is not None:
keepers[entry['key']] = entry['value']
else:
if entry['required'] == True:
bot.error("Field %s is required for this entity." %entry['key'])
return keepers
|
{
"content_hash": "3f82398122130312da8ec40e757fb3eb",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 80,
"avg_line_length": 40.23076923076923,
"alnum_prop": 0.7431485022307202,
"repo_name": "radinformatics/som-tools",
"id": "4e95e9ad193100ae8d61910c6b7b438a957a8855",
"size": "1569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "som/api/google/datastore/validators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16452"
},
{
"name": "HTML",
"bytes": "780"
},
{
"name": "JavaScript",
"bytes": "163002"
},
{
"name": "Python",
"bytes": "115696"
},
{
"name": "Shell",
"bytes": "1092"
}
],
"symlink_target": ""
}
|
from django.db import connection
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.shortcuts import render
from django.views.generic import FormView
def load_template(request, template_name, extra_context=None):
if not extra_context: extra_context = dict()
return render(request, template_name, extra_context)
# Mixin to handle multiple form classses
class MultipleFormsView(FormView):
form_classes = {}
template_name = None
success_url = 'home'
def are_forms_valid(self, forms):
for key, form in forms.iteritems():
if not form.is_valid():
return False
return True
def forms_valid(self, forms):
return self.get_success_url(self)
def forms_invalid(self, forms):
context = self.get_context_data()
context['next'] = self.request.POST.get('next')
context.update(forms)
return render(self.request, self.template_name, context)
def get(self, request, username=None, **kwargs):
context = self.get_context_data()
context.update(self.get_forms())
# TODO: append next url to response?
return render(request, self.template_name, context=context)
def get_context_data(self, **kwargs):
context = super(MultipleFormsView, self).get_context_data(**kwargs)
context['next'] = self.request.GET.get('next')
context['request'] = self.request
return context
def get_forms(self):
forms = {}
initial = self.get_initial_data()
form_kwargs = self.get_form_kwargs()
for key, form_class in self.form_classes.iteritems():
forms[key] = form_class(initial=initial[key], **form_kwargs)
return forms
def get_form_kwargs(self):
kwargs = {}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_initial_data(self):
initial = {}
for key, form_class in self.form_classes.iteritems():
initial[key] = {}
return initial
def get_success_url(self):
success_url = self.request.POST.get('next', "")
if success_url and success_url != "None":
return HttpResponseRedirect(self.request.POST['next'])
else:
return redirect(self.success_url)
def post(self, request, **kwargs):
forms = self.get_forms()
if self.are_forms_valid(forms):
return self.forms_valid(forms)
else:
return self.forms_invalid(forms)
class MultipleModelFormsView(MultipleFormsView):
""" The object coresponding to the form must use the sam key """
def get_objects(self):
objects = {}
for key, form_class in self.form_classes.iteritems():
objects[key] = None
return objects
def get_forms(self):
forms = {}
objects = self.get_objects()
initial = self.get_initial_data()
form_kwargs = self.get_form_kwargs()
for key, form_class in self.form_classes.iteritems():
forms[key] = form_class(instance=objects[key], initial=initial[key], **form_kwargs)
return forms
def reset():
cursor = connection.cursor()
cursor.execute("SELECT setval('location_id_key', (SELECT MAX(id) FROM contact_location)+1)")
print "success"
|
{
"content_hash": "b2d3e0a8cfe5e128a7d95d5d43a178b3",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 97,
"avg_line_length": 33.699029126213595,
"alnum_prop": 0.6191299337366754,
"repo_name": "TimothyBest/Django_Boilerplate",
"id": "8e0c1c69cbcd2a7a05a13603df452974b7f36bc6",
"size": "3471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/utils/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "885963"
},
{
"name": "HTML",
"bytes": "4344"
},
{
"name": "JavaScript",
"bytes": "77703"
},
{
"name": "Python",
"bytes": "22064"
}
],
"symlink_target": ""
}
|
import sys
import os
import unittest
import logging
# Extend PYTHONPATH with local 'lib' folder
if __name__ == "__main__":
jasyroot = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir, os.pardir, os.pardir))
sys.path.insert(0, jasyroot)
print("Running from %s..." % jasyroot)
import jasy.script.parse.Parser as Parser
import jasy.script.parse.ScopeScanner as ScopeScanner
import jasy.script.output.Compressor as Compressor
import jasy.script.optimize.LocalVariables as LocalVariables
class Tests(unittest.TestCase):
def process(self, code):
node = Parser.parse(code)
ScopeScanner.scan(node)
LocalVariables.optimize(node)
return Compressor.Compressor().compress(node)
def test_basic(self):
self.assertEqual(self.process(
'function test(para1, para2) { var result = para1 + para2; return result; }'),
'function test(c,b){var a=c+b;return a}'
)
def test_args(self):
self.assertEqual(self.process(
'''
function wrapper(obj, foo, hello) {
obj[foo]().hello;
}
'''),
'function wrapper(a,b,c){a[b]().hello}'
)
def test_accessor_names(self):
self.assertEqual(self.process(
'''
function outer(alpha, beta, gamma)
{
function inner() {}
var result = alpha * beta + gamma;
var doNot = result.alpha.beta.gamma;
return result * outer(alpha, beta, gamma);
}
'''),
'function outer(d,c,b){function e(){}var a=d*c+b;var f=a.alpha.beta.gamma;return a*outer(d,c,b)}'
)
def test_bind(self):
self.assertEqual(self.process(
'''
function bind(func, self, varargs)
{
return this.create(func, {
self : self,
args : null
});
};
'''),
'function bind(b,a,c){return this.create(b,{self:a,args:null})};'
)
def test_closure(self):
self.assertEqual(self.process(
'''
(function(global)
{
var foo;
var bar = function()
{
var baz = foo;
}
})(this);
'''),
'(function(b){var a;var c=function(){var b=a}})(this);'
)
def test_conflict_generatedname(self):
self.assertEqual(self.process(
'''
function wrapper()
{
var first=4;
var a=5;
}
'''),
'function wrapper(){var a=4;var b=5}'
)
def test_conflict_param_var(self):
self.assertEqual(self.process(
'''
function x(config){
var config = 3;
}
'''),
'function x(a){var a=3}'
)
def test_conflict_same_name(self):
self.assertEqual(self.process(
'''
function wrapper()
{
var first=4;
var first=5;
}
'''),
'function wrapper(){var a=4;var a=5}'
)
def test_declaration(self):
self.assertEqual(self.process(
'''
function wrapper()
{
var first, second=5, third;
var [desFirst, desSecond]=destruct(), after;
}
'''),
'function wrapper(){var e,d=5,c;var [b,a]=destruct(),f}'
)
def test_exception_catchvar(self):
self.assertEqual(self.process(
'''
function wrapper()
{
var x = 1, y = x+2;
try
{
something();
}
catch(ex)
{
var inCatch = 3;
alert(ex);
}
}
'''),
'function wrapper(){var a=1,c=a+2;try{something()}catch(b){var d=3;alert(b)}}'
)
def test_exception(self):
self.assertEqual(self.process(
'''
function wrapper(param1)
{
var b = "hello";
try{
access.an.object[param1];
}
catch(except)
{
alert(except + param1)
}
}
'''),
'function wrapper(a){var c="hello";try{access.an.object[a]}catch(b){alert(b+a)}}'
)
def test_function(self):
self.assertEqual(self.process(
'''
(function(global)
{
var x = doScrollCheck();
function doScrollCheck() {
doScrollCheck();
}
})(window);
'''),
'(function(c){var b=a();function a(){a()}})(window);'
)
def test_inline_access(self):
self.assertEqual(self.process(
'''
function wrapper()
{
var d, a=d;
}
'''),
'function wrapper(){var a,b=a}'
)
def test_let_definition(self):
self.assertEqual(self.process(
'''
function wrapper()
{
if (x > y) {
let gamma = 12.7 + y;
i = gamma * x;
}
}
'''),
'function wrapper(){if(x>y){let a=12.7+y;i=a*x}}'
)
def test_let_expression(self):
self.assertEqual(self.process(
r'''
function wrapper()
{
var x = 5;
var y = 0;
document.write(let(x = x + 10, y = 12) x + y + "<br>\n");
document.write(x+y + "<br>\n");
}
'''),
r'function wrapper(){var a=5;var b=0;document.write(let(a=a+10,b=12)a+b+"<br>\n");document.write(a+b+"<br>\n")}'
)
def test_let_statement(self):
self.assertEqual(self.process(
r'''
function wrapper()
{
var x = 5;
var y = 0;
let (x = x+10, y = 12, z=3) {
print(x+y+z + "\n");
}
print((x + y) + "\n");
}
'''),
r'function wrapper(){var a=5;var b=0;let(a=a+10,b=12,c=3){print(a+b+c+"\n")}print((a+b)+"\n")}'
)
def test_reuse_different(self):
self.assertEqual(self.process(
'''
function run()
{
var first = function() {
var inFirst = 1;
};
var second = function() {
var inSecond = 2;
};
}
'''),
'function run(){var b=function(){var a=1};var a=function(){var a=2}}'
)
def test_reuse_names(self):
self.assertEqual(self.process(
'''
function run()
{
var first = function() {
var a = 1;
};
var second = function() {
var a = 2;
};
}
'''),
'function run(){var b=function(){var a=1};var a=function(){var a=2}}'
)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.ERROR)
suite = unittest.TestLoader().loadTestsFromTestCase(Tests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
{
"content_hash": "f78f620f04e6722ccc1a5765c6ae69b2",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 124,
"avg_line_length": 26.53684210526316,
"alnum_prop": 0.43012032262329764,
"repo_name": "sebastian-software/jasy",
"id": "c1a27805fa220aa4654b5cea6ec29eebb1e4c455",
"size": "7587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jasy/test/script/localvariables.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "913"
},
{
"name": "Python",
"bytes": "1043666"
},
{
"name": "Shell",
"bytes": "1926"
}
],
"symlink_target": ""
}
|
import importlib
import os
import sys
import warnings
from setuptools import setup
if sys.version_info[0:2] < (3, 6):
warnings.warn('This package is tested with Python version 3.6+')
root_path = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(root_path, 'README.rst')) as readme:
README = readme.read()
install_requires = ['Django>=2.2,<4', 'requests']
tests_require = [
'flake8', 'flake8-bugbear', 'flake8-quotes', 'flake8-blind-except', 'flake8-debugger', 'pep8-naming',
'responses',
]
package_info = importlib.import_module('moj_irat')
setup(
name='django-moj-irat',
version=package_info.__version__,
author=package_info.__author__,
author_email='dev@digital.justice.gov.uk',
url='https://github.com/ministryofjustice/django-moj-irat',
packages=['moj_irat'],
include_package_data=True,
license='MIT',
description='Tools to support adding a Django-based service to '
'Ministry of Justice’s Incidence Response and Tuning',
long_description=README,
keywords='moj django irat monitoring',
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Framework :: Django :: 3.1',
'Framework :: Django :: 3.2',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
install_requires=install_requires,
tests_require=tests_require,
test_suite='tests.run',
)
|
{
"content_hash": "d68d4e1eda5a91f23227f269601493d6",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 105,
"avg_line_length": 32.96551724137931,
"alnum_prop": 0.6276150627615062,
"repo_name": "ministryofjustice/django-moj-irat",
"id": "b861d5872c80e5573eb3ec5fd661a736ed2ee70f",
"size": "1936",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24777"
}
],
"symlink_target": ""
}
|
"""A library for cross-platform browser tests."""
import os
import sys
try:
# This enables much better stack upon native code crashes.
import faulthandler
faulthandler.enable()
except ImportError:
pass
# Ensure Python >= 2.7.
if sys.version_info < (2, 7):
print >> sys.stderr, 'Need Python 2.7 or greater.'
sys.exit(-1)
def _JoinPath(*path_parts):
return os.path.abspath(os.path.join(*path_parts))
def _InsertPath(path):
assert os.path.isdir(path), 'Not a valid path: %s' % path
if path not in sys.path:
# Some call sites that use Telemetry assume that sys.path[0] is the
# directory containing the script, so we add these extra paths to right
# after sys.path[0].
sys.path.insert(1, path)
def _AddDirToPythonPath(*path_parts):
path = _JoinPath(*path_parts)
_InsertPath(path)
# Add Catapult dependencies to our path.
# util depends on py_utils, so we can't use it to get the catapult dir.
_CATAPULT_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..')
_AddDirToPythonPath(_CATAPULT_DIR, 'common', 'py_utils')
_AddDirToPythonPath(_CATAPULT_DIR, 'dependency_manager')
_AddDirToPythonPath(_CATAPULT_DIR, 'devil')
_AddDirToPythonPath(_CATAPULT_DIR, 'systrace')
_AddDirToPythonPath(_CATAPULT_DIR, 'tracing')
_AddDirToPythonPath(_CATAPULT_DIR, 'common', 'py_trace_event')
_AddDirToPythonPath(_CATAPULT_DIR, 'common', 'py_vulcanize')
_AddDirToPythonPath(_CATAPULT_DIR, 'tracing', 'tracing_build')
# pylint: disable=wrong-import-position
from telemetry.core import util
from telemetry.internal.util import global_hooks
# pylint: enable=wrong-import-position
# Add Catapult third party dependencies into our path.
_AddDirToPythonPath(util.GetCatapultThirdPartyDir(), 'typ')
# Required by websocket-client.
_AddDirToPythonPath(util.GetCatapultThirdPartyDir(), 'six')
# Add Telemetry third party dependencies into our path.
_TELEMETRY_3P = util.GetTelemetryThirdPartyDir()
_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'altgraph')
_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'mock')
_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'modulegraph')
_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'mox3')
_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'png')
_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'pyfakefs')
_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'websocket-client')
# Install Telemtry global hooks.
global_hooks.InstallHooks()
|
{
"content_hash": "dd2b356880cdb5654f1e2d738d90f5a5",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 75,
"avg_line_length": 35.17142857142857,
"alnum_prop": 0.7546709991876523,
"repo_name": "endlessm/chromium-browser",
"id": "ff7225b79561a2f5618820dbff6cde7146c9390e",
"size": "2624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/catapult/telemetry/telemetry/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
'''
Copyright [2020] [Anoop Bhat]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
def quick_sort(sequence):
lenght = len(sequence)
if lenght <= 1:
return sequence
else:
pivot = sequence.pop()
items_lower = []
items_larger = []
for item in sequence:
if item < pivot:
items_lower.append(item)
else:
items_larger.append(item)
return quick_sort(items_lower) + [pivot] + quick_sort(items_larger)
print(quick_sort([5,4,2,1,7,9,3,1]))
|
{
"content_hash": "7f1c9588c6bf3999b0d258fecb0e7340",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 72,
"avg_line_length": 27.86111111111111,
"alnum_prop": 0.6819541375872383,
"repo_name": "amanmehara/programming-app-data",
"id": "a52c8cec171feae0eafd331bd5c5ae059af659c6",
"size": "1003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Algorithms/quicksort.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "61308"
},
{
"name": "C#",
"bytes": "14180"
},
{
"name": "C++",
"bytes": "144896"
},
{
"name": "CSS",
"bytes": "6375"
},
{
"name": "Groovy",
"bytes": "19568"
},
{
"name": "HTML",
"bytes": "7355"
},
{
"name": "Java",
"bytes": "52135"
},
{
"name": "JavaScript",
"bytes": "1578"
},
{
"name": "PHP",
"bytes": "15776"
},
{
"name": "Python",
"bytes": "10281"
},
{
"name": "Scala",
"bytes": "14985"
}
],
"symlink_target": ""
}
|
from horizon.test import helpers as test
class CoursesTests(test.TestCase):
# Unit tests for courses.
def test_me(self):
self.assertTrue(1 + 1 == 2)
|
{
"content_hash": "b836997724db3de127f67276a9946098",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 40,
"avg_line_length": 23.857142857142858,
"alnum_prop": 0.6706586826347305,
"repo_name": "sikessle/htwg-lab-cloud",
"id": "44def9390282bba55f372fbbf1bc6d82d517624b",
"size": "167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devstack/dashboard-panel-extension/prof/courses/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1020"
},
{
"name": "HTML",
"bytes": "1208"
},
{
"name": "JavaScript",
"bytes": "38"
},
{
"name": "Makefile",
"bytes": "487"
},
{
"name": "Python",
"bytes": "54036"
},
{
"name": "Shell",
"bytes": "15017"
},
{
"name": "TeX",
"bytes": "63599"
}
],
"symlink_target": ""
}
|
from migrate.changeset import UniqueConstraint
from sqlalchemy import MetaData, Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
t = Table('chassis', meta, autoload=True)
# NOTE: new name convention for UC
uc = UniqueConstraint('uuid', table=t, name='uniq_chassis0uuid')
uc.create()
def downgrade(migrate_engine):
raise NotImplementedError('Downgrade from version 011 is unsupported.')
|
{
"content_hash": "23884abae88bb4b401f130d001aee581",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 75,
"avg_line_length": 29.2,
"alnum_prop": 0.7374429223744292,
"repo_name": "citrix-openstack-build/ironic",
"id": "9d5d7c5423a977c59622c3f2d0827f021b8ba483",
"size": "1083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/db/sqlalchemy/migrate_repo/versions/011_add_chassis_uc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "19934"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "1142333"
}
],
"symlink_target": ""
}
|
from oslo_config import cfg
from nova.compute import api as compute_api
from nova.tests.functional.api_sample_tests import test_servers
from nova.tests.unit.image import fake
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class PreserveEphemeralOnRebuildJsonTest(test_servers.ServersSampleBase):
extension_name = 'os-preserve-ephemeral-rebuild'
extra_extensions_to_load = ["os-access-ips"]
_api_version = 'v2'
def _get_flags(self):
f = super(PreserveEphemeralOnRebuildJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.preserve_ephemeral_rebuild.'
'Preserve_ephemeral_rebuild')
return f
def _test_server_rebuild_preserve_ephemeral(self, value, resp_tpl=None):
uuid = self._post_server()
image = fake.get_valid_image_id()
subs = {'host': self._get_host(),
'uuid': image,
'name': 'foobar',
'pass': 'seekr3t',
'hostid': '[a-f0-9]+',
'preserve_ephemeral': str(value).lower(),
'action': 'rebuild',
'glance_host': self._get_glance_host(),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::'
}
old_rebuild = compute_api.API.rebuild
def fake_rebuild(self_, context, instance, image_href, admin_password,
files_to_inject=None, **kwargs):
self.assertEqual(kwargs['preserve_ephemeral'], value)
if resp_tpl:
return old_rebuild(self_, context, instance, image_href,
admin_password, files_to_inject=None,
**kwargs)
self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
response = self._do_post('servers/%s/action' % uuid,
'server-action-rebuild-preserve-ephemeral',
subs)
if resp_tpl:
subs.update(self._get_regexes())
self._verify_response(resp_tpl, subs, response, 202)
else:
self.assertEqual(response.status_code, 202)
def test_server_rebuild_preserve_ephemeral_true(self):
self._test_server_rebuild_preserve_ephemeral(True)
def test_server_rebuild_preserve_ephemeral_false(self):
self._test_server_rebuild_preserve_ephemeral(False,
resp_tpl='server-action-rebuild-preserve-ephemeral-resp')
|
{
"content_hash": "5da2f5936e7d1f36f1ff9df69640b1af",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 78,
"avg_line_length": 41.71875,
"alnum_prop": 0.5838951310861423,
"repo_name": "mmnelemane/nova",
"id": "1b52f61828825ab23af6b922e29946c8ed430ce8",
"size": "3272",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/functional/api_sample_tests/test_preserve_ephemeral_rebuild.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16496892"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "285480"
}
],
"symlink_target": ""
}
|
import torch
import filters_bank_pytorch as filters_bank
import scattering_pytorch as scattering
def run_filter_bank(M, N, J):
filters = filters_bank.filters_bank(M, N, J)
d_save = {}
# Save phi
d_save["phi"] = {}
for key in filters["phi"].keys():
val = filters["phi"][key]
if isinstance(val, torch.FloatTensor):
val_numpy = val.cpu().numpy()
d_save["phi"][key] = val_numpy
# Save psi
d_save["psi"] = []
for elem in filters["psi"]:
d = {}
for key in elem.keys():
val = elem[key]
if isinstance(val, torch.FloatTensor):
val_numpy = val.cpu().numpy()
d[key] = val_numpy
d_save["psi"].append(d)
return d_save
def run_scattering(X, use_cuda=False):
# Ensure NCHW format
assert X.shape[1] < min(X.shape[2:])
M, N = X.shape[2:]
if use_cuda:
scat = scattering.Scattering(M=M, N=N, J=2, check=True).cuda()
list_S = scat.forward(torch.FloatTensor(X).cuda())
else:
scat = scattering.Scattering(M=M, N=N, J=2, check=True)
list_S = scat.forward(torch.FloatTensor(X))
return list_S
|
{
"content_hash": "256a14ee2f82e217bf51363bddc3d13c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 70,
"avg_line_length": 25.361702127659573,
"alnum_prop": 0.5562080536912751,
"repo_name": "tdeboissiere/DeepLearningImplementations",
"id": "2a32a8c54905afa08cfc80739673dd10420f4ac2",
"size": "1192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ScatteringTransform/test/run_pytorch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "448200"
},
{
"name": "Shell",
"bytes": "2163"
}
],
"symlink_target": ""
}
|
import sys
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ApiManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for ApiManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2021-08-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(ApiManagementClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2021-08-01") # type: Literal["2021-08-01"]
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-apimanagement/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
|
{
"content_hash": "8d7fc93b337e16b3d2f87eaec6b49119",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 107,
"avg_line_length": 52.84615384615385,
"alnum_prop": 0.7231441048034934,
"repo_name": "Azure/azure-sdk-for-python",
"id": "5528490fd773b26528745f93901e554cf2fc3eea",
"size": "3903",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/aio/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
def makePalyerData(tokenId, name):
playerData = {
'player token id' : tokenId,
'player game channel id' : -1,
'player name' : name,
'player id' : -1
}
return playerData
def setChannelId(playerData, channelId):
playerData['player game channel id'] = channelId
# game data 관련 함수들
def makeGameData(gameChannelId):
# make structure
gameData = {
'game current scene' : 'no scene',
'game channel id' : gameChannelId,
'game map id' : -1,
'game channel current turn id' : -1,
'game channel turn list' : [],
'game turn start flag' : False,
'game recently connected line idx' : [0, 0],
'game channel void tile' : -1,
'game channel player number' : 0,
'game channel player list' : [],
'game channel map size' : [0, 0],
'game channel map' : []
}
# add players data
for i in range(4):
gameData['game channel player list'].append({})
gameData['game channel player list'][i]['player name'] = 'nothing'
gameData['game channel player list'][i]['player master flag'] = False
gameData['game channel player list'][i]['player connection flag'] = False
gameData['game channel player list'][i]['character id'] = -1
gameData['game channel player list'][i]['turn'] = -1
gameData['game channel player list'][i]['tile number'] = 0
gameData['game channel player list'][i]['gold number'] = 0
gameData['game channel player list'][i]['trash number'] = 0
gameData['game channel player list'][i]['score'] = 0
# add map data
for i in range(23):
gameData['game channel map'].append([])
for j in range(23):
gameData['game channel map'][i].append({})
gameData['game channel map'][i][j]['type'] = 'SENTINEL'
gameData['game channel map'][i][j]['owner'] = 'NOBODY'
gameData['game channel map'][i][j]['item'] = 'NOTHING'
gameData['game channel map'][i][j]['checkedFlag'] = False
gameData['game channel map'][i][j]['animation turn'] = 0
gameData['game channel map'][i][j]['direction'] = 'UP'
# return the result data
return gameData
def setMapSize(gameData, width, height):
gameData['game channel void tile'] = width * height
gameData['game channel map size'][0] = width
gameData['game channel map size'][0] = height
# map init code
for i in range(1, height * 2 + 2):
for j in range(1, width * 2 + 2):
if i % 2 == 0:
if j % 2 == 0:
gameData['game channel map'][i][j]['type'] = 'tile'
else:
gameData['game channel map'][i][j]['type'] = 'line'
else:
if j % 2 == 0:
gameData['game channel map'][i][j]['type'] = 'line'
else:
gameData['game channel map'][i][j]['type'] = 'dot'
def addPlayer(gameData, playerData):
for idx in range(4):
if gameData['game channel player list'][idx]['player connection flag'] == False:
setChannelId(playerData, gameData['game channel id'])
++gameData['game channel player number']
gameData['game channel player list'][idx]['player name'] = playerData['player name']
gameData['game channel player list'][idx]['player connection flag'] = True
return True
return False
def renderMap(gameData):
# for debug
for i in range(23):
thisLine = ''
for j in range(23):
if gameData['game channel map'][i][j]['type'] == 'dot':
thisLine += '*'
elif gameData['game channel map'][i][j]['type'] == 'line':
if i % 2 == 0:
thisLine += '|'
else:
thisLine += '-'
elif gameData['game channel map'][i][j]['type'] == 'tile':
thisLine += ' '
if not thisLine == '':
print thisLine
print '>>> current map'
if __name__ == '__main__':
# test : player creation
player_moon = makePalyerData(29, 'prof. moon')
player_jg = makePalyerData(67, 'JUNGGANG')
player_wooq = makePalyerData(80, 'wooq')
# test : game data creation
testGameData = makeGameData(4)
#insert player to game channel
result = addPlayer(testGameData, player_moon)
if result == True:
result = addPlayer(testGameData, player_jg)
if result == True:
result = addPlayer(testGameData, player_wooq)
for each in range(4):
if testGameData['game channel player list'][each]['player connection flag'] == True:
print "name : %s / score : %d" % (testGameData['game channel player list'][each]['player name'], testGameData['game channel player list'][each]['score'])
setMapSize(testGameData, 6, 5)
renderMap(testGameData)
|
{
"content_hash": "c2efee5e7b164f1d74c6cf7ccb4ddb19",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 156,
"avg_line_length": 31.028985507246375,
"alnum_prop": 0.6445586174684726,
"repo_name": "junggang/blackbags",
"id": "b6e89eaebfab81393691c57a21c2c2e6bf3c7ac6",
"size": "4348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_server/dataCreation.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "801"
},
{
"name": "C++",
"bytes": "329793"
},
{
"name": "Objective-C",
"bytes": "54341"
},
{
"name": "Objective-C++",
"bytes": "11517"
},
{
"name": "Python",
"bytes": "101037"
}
],
"symlink_target": ""
}
|
"""Installer for the ckm.sitetheme package."""
from setuptools import find_packages
from setuptools import setup
import os
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
long_description = read('README.rst')
setup(
name='ckm.sitetheme',
version='1.0.0',
description="Site theme for CK Marlene Site",
long_description=long_description,
# Get more from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
],
keywords='Plone Diazo',
author='Christoph Boehner',
author_email='cb@vorwaerts-werbung.de',
url='http://pypi.python.org/pypi/ckm.sitetheme',
license='BSD',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['ckm'],
include_package_data=True,
zip_safe=False,
install_requires=[
'plone.app.theming',
'plone.app.themingplugins',
'setuptools',
],
extras_require={
'test': [
'mock',
'plone.app.testing',
'unittest2',
],
'develop': [
'coverage',
'flake8',
'jarn.mkrelease',
'plone.app.debugtoolbar',
'plone.reload',
'Products.Clouseau',
'Products.DocFinderTab',
'Products.PDBDebugMode',
'Products.PrintingMailHost',
'Sphinx',
'zest.releaser',
'zptlint',
],
},
entry_points="""
[z3c.autoinclude.plugin]
target = plone
""",
)
|
{
"content_hash": "f4901070ab1befb3796540948262a5b3",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 74,
"avg_line_length": 25.571428571428573,
"alnum_prop": 0.5667287399130975,
"repo_name": "a25kk/ck",
"id": "f7e2b51d8f5666aa32015644cc4458baef152c11",
"size": "1635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ckm.sitetheme/setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "184097"
},
{
"name": "Dockerfile",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "289504"
},
{
"name": "JavaScript",
"bytes": "97539"
},
{
"name": "Makefile",
"bytes": "4773"
},
{
"name": "Python",
"bytes": "63764"
},
{
"name": "Shell",
"bytes": "3746"
}
],
"symlink_target": ""
}
|
import numpy as np
from numpy import cross
from numpy.linalg import norm
from poliastro.core.thrust.change_a_inc import (
beta,
compute_parameters,
extra_quantities,
)
def change_a_inc(k, a_0, a_f, inc_0, inc_f, f):
"""Guidance law from the Edelbaum/Kéchichian theory, optimal transfer between circular inclined orbits
(a_0, i_0) --> (a_f, i_f), ecc = 0.
Parameters
----------
k : float
Gravitational parameter.
a_0 : float
Initial semimajor axis.
a_f : float
Final semimajor axis.
inc_0 : float
Initial inclination.
inc_f : float
Final inclination.
f : float
Magnitude of constant acceleration
Notes
-----
Edelbaum theory, reformulated by Kéchichian.
References
----------
* Edelbaum, T. N. "Propulsion Requirements for Controllable
Satellites", 1961.
* Kéchichian, J. A. "Reformulation of Edelbaum's Low-Thrust
Transfer Problem Using Optimal Control Theory", 1997.
"""
V_0, beta_0_, _ = compute_parameters(k, a_0, a_f, inc_0, inc_f)
def a_d(t0, u_, k):
r = u_[:3]
v = u_[3:]
# Change sign of beta with the out-of-plane velocity
beta_ = beta(t0, V_0=V_0, f=f, beta_0=beta_0_) * np.sign(r[0] * (inc_f - inc_0))
t_ = v / norm(v)
w_ = cross(r, v) / norm(cross(r, v))
# n_ = cross(t_, w_)
accel_v = f * (np.cos(beta_) * t_ + np.sin(beta_) * w_)
return accel_v
delta_V, t_f = extra_quantities(k, a_0, a_f, inc_0, inc_f, f)
return a_d, delta_V, t_f
|
{
"content_hash": "2d23877f5968358cd91970bcc2f90acc",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 106,
"avg_line_length": 26.949152542372882,
"alnum_prop": 0.5679245283018868,
"repo_name": "Juanlu001/poliastro",
"id": "b13b564ca2d6ce3234f360c4d3af0c6359504b83",
"size": "1593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/poliastro/twobody/thrust/change_a_inc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "625"
},
{
"name": "HTML",
"bytes": "3053"
},
{
"name": "Jupyter Notebook",
"bytes": "4286"
},
{
"name": "Makefile",
"bytes": "373"
},
{
"name": "Python",
"bytes": "472566"
},
{
"name": "Shell",
"bytes": "150"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(name='django-geonames',
version=__import__('geonames').__version__,
description='Fork of official GeoDjango geonames application refactored and adopted for Django 1.2.1',
author='Justin Bronn',
author_email='jbronn@geodjango.org',
url='https://github.com/ramusus/django-geonames/',
packages = find_packages(),
include_package_data = True,
package_data = {
'geonames' : ['sql/*.sql']
}
)
|
{
"content_hash": "eda096a1020e41958554fd6ad8a8176b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 108,
"avg_line_length": 33.53333333333333,
"alnum_prop": 0.6441351888667992,
"repo_name": "ramusus/django-geonames",
"id": "17d8f5973496a60ccc5259aa66cfbb82218f6c8f",
"size": "526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "19728"
}
],
"symlink_target": ""
}
|
import json
import logging
import operator
from environment_wrappers import CreateUrlFetcher
import url_constants
class ChannelInfo(object):
'''Represents a Chrome channel with three pieces of information. |channel| is
one of 'stable', 'beta', 'dev', or 'master'. |branch| and |version| correspond
with each other, and represent different releases of Chrome. Note that
|branch| and |version| can occasionally be the same for separate channels
(i.e. 'beta' and 'dev'), so all three fields are required to uniquely
identify a channel.
'''
def __init__(self, channel, branch, version):
assert isinstance(channel, basestring), channel
assert isinstance(branch, basestring), branch
# TODO(kalman): Assert that this is a string. One day Chromium will probably
# be served out of a git repository and the versions will no longer be ints.
assert isinstance(version, int) or version == 'master', version
self.channel = channel
self.branch = branch
self.version = version
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return '%s%s' % (type(self).__name__, repr(self.__dict__))
def __str__(self):
return repr(self)
class BranchUtility(object):
'''Provides methods for working with Chrome channel, branch, and version
data served from OmahaProxy.
'''
def __init__(self, fetch_url, history_url, fetcher, object_store_creator):
self._fetcher = fetcher
def create_object_store(category):
return object_store_creator.Create(BranchUtility, category=category)
self._branch_object_store = create_object_store('branch')
self._version_object_store = create_object_store('version')
self._fetch_result = self._fetcher.FetchAsync(fetch_url)
self._history_result = self._fetcher.FetchAsync(history_url)
@staticmethod
def Create(object_store_creator):
return BranchUtility(url_constants.OMAHA_PROXY_URL,
url_constants.OMAHA_DEV_HISTORY,
CreateUrlFetcher(),
object_store_creator)
@staticmethod
def GetAllChannelNames():
return ('stable', 'beta', 'dev', 'master')
@staticmethod
def NewestChannel(channels):
channels = set(channels)
for channel in reversed(BranchUtility.GetAllChannelNames()):
if channel in channels:
return channel
def Newer(self, channel_info):
'''Given a ChannelInfo object, returns a new ChannelInfo object
representing the next most recent Chrome version/branch combination.
'''
if channel_info.channel == 'master':
return None
if channel_info.channel == 'stable':
stable_info = self.GetChannelInfo('stable')
if channel_info.version < stable_info.version:
return self.GetStableChannelInfo(channel_info.version + 1)
names = self.GetAllChannelNames()
return self.GetAllChannelInfo()[names.index(channel_info.channel) + 1]
def Older(self, channel_info):
'''Given a ChannelInfo object, returns a new ChannelInfo object
representing the previous Chrome version/branch combination.
'''
if channel_info.channel == 'stable':
if channel_info.version <= 5:
# BranchUtility can't access branch data from before Chrome version 5.
return None
return self.GetStableChannelInfo(channel_info.version - 1)
names = self.GetAllChannelNames()
return self.GetAllChannelInfo()[names.index(channel_info.channel) - 1]
@staticmethod
def SplitChannelNameFromPath(path):
'''Splits the channel name out of |path|, returning the tuple
(channel_name, real_path). If the channel cannot be determined then returns
(None, path).
'''
if '/' in path:
first, second = path.split('/', 1)
else:
first, second = (path, '')
if first in BranchUtility.GetAllChannelNames():
return (first, second)
return (None, path)
def GetAllBranches(self):
return tuple((channel, self.GetChannelInfo(channel).branch)
for channel in BranchUtility.GetAllChannelNames())
def GetAllVersions(self):
return tuple(self.GetChannelInfo(channel).version
for channel in BranchUtility.GetAllChannelNames())
def GetAllChannelInfo(self):
return tuple(self.GetChannelInfo(channel)
for channel in BranchUtility.GetAllChannelNames())
def GetChannelInfo(self, channel):
version = self._ExtractFromVersionJson(channel, 'version')
if version != 'master':
version = int(version)
return ChannelInfo(channel,
self._ExtractFromVersionJson(channel, 'branch'),
version)
def GetStableChannelInfo(self, version):
'''Given a |version| corresponding to a 'stable' version of Chrome, returns
a ChannelInfo object representing that version.
'''
return ChannelInfo('stable', self.GetBranchForVersion(version), version)
def _ExtractFromVersionJson(self, channel_name, data_type):
'''Returns the branch or version number for a channel name.
'''
if channel_name == 'master':
return 'master'
if data_type == 'branch':
object_store = self._branch_object_store
elif data_type == 'version':
object_store = self._version_object_store
data = object_store.Get(channel_name).Get()
if data is not None:
return data
try:
version_json = json.loads(self._fetch_result.Get().content)
except Exception as e:
# This can happen if omahaproxy is misbehaving, which we've seen before.
# Quick hack fix: just serve from master until it's fixed.
logging.error('Failed to fetch or parse branch from omahaproxy: %s! '
'Falling back to "master".' % e)
return 'master'
numbers = {}
for entry in version_json:
if entry['os'] not in ('win', 'linux', 'mac', 'cros'):
continue
for version in entry['versions']:
if version['channel'] != channel_name:
continue
if data_type == 'branch':
number = version['version'].split('.')[2]
elif data_type == 'version':
number = version['version'].split('.')[0]
if number not in numbers:
numbers[number] = 0
else:
numbers[number] += 1
sorted_numbers = sorted(numbers.iteritems(),
key=operator.itemgetter(1),
reverse=True)
object_store.Set(channel_name, sorted_numbers[0][0])
return sorted_numbers[0][0]
def GetBranchForVersion(self, version):
'''Returns the most recent branch for a given chrome version number using
data stored on omahaproxy (see url_constants).
'''
if version == 'master':
return 'master'
branch = self._branch_object_store.Get(str(version)).Get()
if branch is not None:
return branch
version_json = json.loads(self._history_result.Get().content)
for entry in version_json:
version_title = entry['version'].split('.')
if version_title[0] == str(version):
self._branch_object_store.Set(str(version), version_title[2])
return version_title[2]
raise ValueError('The branch for %s could not be found.' % version)
def GetChannelForVersion(self, version):
'''Returns the name of the development channel corresponding to a given
version number.
'''
for channel_info in self.GetAllChannelInfo():
if channel_info.channel == 'stable' and version <= channel_info.version:
return channel_info.channel
if version == channel_info.version:
return channel_info.channel
def GetLatestVersionNumber(self):
'''Returns the most recent version number found using data stored on
omahaproxy.
'''
latest_version = self._version_object_store.Get('latest').Get()
if latest_version is not None:
return latest_version
version_json = json.loads(self._history_result.Get().content)
latest_version = 0
for entry in version_json:
version_title = entry['version'].split('.')
version = int(version_title[0])
if version > latest_version:
latest_version = version
self._version_object_store.Set('latest', latest_version)
return latest_version
|
{
"content_hash": "69ffe1b6c6b14621af2de1aedac49d4d",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 80,
"avg_line_length": 35.71551724137931,
"alnum_prop": 0.662925416364953,
"repo_name": "ltilve/ChromiumGStreamerBackend",
"id": "955cfbb6b8abd1f50301fcf1502e78b75cfe21f3",
"size": "8453",
"binary": false,
"copies": "29",
"ref": "refs/heads/master",
"path": "chrome/common/extensions/docs/server2/branch_utility.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "37073"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9568645"
},
{
"name": "C++",
"bytes": "246813997"
},
{
"name": "CSS",
"bytes": "943687"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27371019"
},
{
"name": "Java",
"bytes": "15348315"
},
{
"name": "JavaScript",
"bytes": "20872607"
},
{
"name": "Makefile",
"bytes": "70983"
},
{
"name": "Objective-C",
"bytes": "2029825"
},
{
"name": "Objective-C++",
"bytes": "10156554"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "182741"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "494625"
},
{
"name": "Python",
"bytes": "8594611"
},
{
"name": "Shell",
"bytes": "486464"
},
{
"name": "Standard ML",
"bytes": "5106"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
from django.utils.translation import ugettext_lazy as _
from horizon import tabs
class NetworkProfileTab(tabs.Tab):
name = _("Network Profile")
slug = "network_profile"
template_name = 'router/nexus1000v/network_profile/index.html'
def get_context_data(self, request):
return None
class PolicyProfileTab(tabs.Tab):
name = _("Policy Profile")
slug = "policy_profile"
template_name = 'router/nexus1000v/policy_profile/index.html'
preload = False
class IndexTabs(tabs.TabGroup):
slug = "indextabs"
tabs = (NetworkProfileTab, PolicyProfileTab)
|
{
"content_hash": "4029253c86f9f856795e99f9161f52b6",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 66,
"avg_line_length": 24.833333333333332,
"alnum_prop": 0.7063758389261745,
"repo_name": "JioCloud/horizon",
"id": "d3d36900072f4b77168b59f5db3c4ed221f2b6d4",
"size": "1275",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/router/nexus1000v/tabs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "296932"
},
{
"name": "JavaScript",
"bytes": "713370"
},
{
"name": "Python",
"bytes": "3614755"
},
{
"name": "Shell",
"bytes": "15387"
}
],
"symlink_target": ""
}
|
from lastuserapp import db
import lastuser_core.models as models
from .test_db import TestDatabaseFixture
class TestClient(TestDatabaseFixture):
def setUp(self):
super(TestClient, self).setUp()
self.user = models.User.query.filter_by(username=u"user1").first()
class TestUserClientPermissions(TestDatabaseFixture):
def setUp(self):
super(TestUserClientPermissions, self).setUp()
self.user = models.User.query.filter_by(username=u"user1").first()
self.create_fixtures()
def create_fixtures(self):
# Add permission to the client
client = models.Client.query.filter_by(user=self.user).first()
self.permission = models.UserClientPermissions(user=self.user, client=client)
self.permission.permissions = u"admin"
db.session.add(self.permission)
db.session.commit()
class TestTeamClientPermissions(TestDatabaseFixture):
def setUp(self):
super(TestTeamClientPermissions, self).setUp()
self.user = models.User.query.filter_by(username=u"user1").first()
self.client = models.Client.query.filter_by(user=self.user).first()
self.create_fixtures()
def create_fixtures(self):
self.org = models.Organization(title=u"test", name=u"Test")
self.org.owners.users.append(self.user)
db.session.add(self.org)
self.team = models.Team(userid=self.user.userid, title=u"developers", org=self.org)
db.session.add(self.team)
self.team_client_permission = models.TeamClientPermissions(team=self.team, client=self.client, access_permissions=u"admin")
db.session.add(self.team_client_permission)
db.session.commit()
class TestResource(TestDatabaseFixture):
def setUp(self):
super(TestResource, self).setUp()
self.user = models.User.query.filter_by(username=u"user1").first()
self.client = models.Client.query.filter_by(user=self.user).first()
self.create_fixtures()
def create_fixtures(self):
resource = models.Resource(name=u"resource", title=u"Resource", client=self.client)
db.session.add(resource)
db.session.commit()
def test_find_all(self):
resources = self.client.resources
self.assertEqual(len(resources), 2)
self.assertEqual(set([r.name for r in resources]), set([u'test_resource', u'resource']))
class TestClientTeamAccess(TestDatabaseFixture):
def setUp(self):
super(TestClientTeamAccess, self).setUp()
self.user = models.User.query.filter_by(username=u"user1").first()
self.client = models.Client.query.filter_by(user=self.user).first()
self.client.team_access = True
db.session.commit()
self.create_fixtures()
def create_fixtures(self):
self.org = models.Organization(title=u"test", name=u"Test")
self.org.owners.users.append(self.user)
db.session.add(self.org)
self.team = models.Team(userid=self.user.userid, title=u"developers", org=self.org)
db.session.add(self.team)
self.team_client_permission = models.TeamClientPermissions(team=self.team, client=self.client, access_permissions=u"admin")
db.session.add(self.team_client_permission)
self.client_team_access = models.ClientTeamAccess(org=self.org, client=self.client, access_level=models.CLIENT_TEAM_ACCESS.ALL)
db.session.add(self.client_team_access)
db.session.commit()
def test_find_all(self):
self.assertIs(self.client.org_team_access[0], self.client_team_access)
class TestPermission(TestDatabaseFixture):
def setUp(self):
super(TestPermission, self).setUp()
self.user = models.User.query.filter_by(username=u"user1").first()
self.create_fixtures()
def create_fixtures(self):
self.org = models.Organization(title=u"test", name=u"Test")
self.org.owners.users.append(self.user)
db.session.add(self.org)
self.permission = models.Permission(user=self.user, org=self.org, name=u"admin", title=u"admin", allusers=True)
db.session.add(self.permission)
db.session.commit()
|
{
"content_hash": "d4f8e14a9128d279838e491979996717",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 135,
"avg_line_length": 41.51,
"alnum_prop": 0.6824861479161648,
"repo_name": "sindhus/lastuser",
"id": "d6867d26226f1afd3fa0a56ea3f5d86f1cbbe0df",
"size": "4176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_model_client.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3623"
},
{
"name": "HTML",
"bytes": "35810"
},
{
"name": "JavaScript",
"bytes": "145"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "349287"
},
{
"name": "Ruby",
"bytes": "404"
},
{
"name": "Shell",
"bytes": "40"
}
],
"symlink_target": ""
}
|
import json
import cPickle as pickle
from bubbly.model import Model, ModelGroup
from bubbly.extractors import MultiViewExtractor, ManyManyExtractors
from bubbly.dr1 import WideLocationGenerator
from bubbly.wiserf import WiseRF
def make_model(mod3):
params = {'max_features': 'auto',
'min_samples_split': 4,
'n_jobs': 2,
'criterion': 'infogain',
'n_estimators': 800}
ex = MultiViewExtractor(ManyManyExtractors())
loc = WideLocationGenerator(mod3)
clf = WiseRF(**params)
return Model(ex, loc, clf)
def train_model(model, mod3):
data = json.load(open('../models/training_data_%i.json' % mod3))
model.fit(data['pos'], data['neg'])
return model
def main():
models = [train_model(make_model(i), i) for i in [0, 1, 2]]
mg = ModelGroup(*models)
mg.save('../models/full_classifier.dat')
if __name__ == "__main__":
main()
|
{
"content_hash": "ea0c290499a04f4f09367822061f4833",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 68,
"avg_line_length": 26.428571428571427,
"alnum_prop": 0.6367567567567568,
"repo_name": "ChrisBeaumont/brut",
"id": "7d4a647654cea0e1eceaaa55bb138f68a8431d79",
"size": "925",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/build_full_classifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "148646"
},
{
"name": "Shell",
"bytes": "40"
},
{
"name": "TeX",
"bytes": "89337"
}
],
"symlink_target": ""
}
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
while root:
if root.val < p.val and root.val < q.val:
root = root.right
elif root.val > p.val and root.val > q.val:
root = root.left
else:
break
return root
|
{
"content_hash": "da0abef95ddf4413ab6409b66e1719bb",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 97,
"avg_line_length": 30.4375,
"alnum_prop": 0.5030800821355236,
"repo_name": "jiadaizhao/LeetCode",
"id": "d694595f14442339fe6f350f282cb418c34e930a",
"size": "524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "0201-0300/0235-Lowest Common Ancestor of a Binary Search Tree/0235-Lowest Common Ancestor of a Binary Search Tree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1140864"
},
{
"name": "Java",
"bytes": "34062"
},
{
"name": "Python",
"bytes": "758800"
},
{
"name": "Shell",
"bytes": "698"
},
{
"name": "TSQL",
"bytes": "774"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import click
from jinja2.exceptions import UndefinedError
from djay.application import get_current_application
class GenerateCommand(click.MultiCommand):
@property
def application(self):
if not hasattr(self, "_application"):
self._application = get_current_application()
return self._application
def list_commands(self, context):
return self.application.blueprints.keys()
def get_command(self, context, name):
return self.application.blueprints[name].load_context()
def invoke(self, context):
given_args = context.protected_args + context.args
interactive = True
args = []
for arg in given_args:
if arg == "--interactive":
interactive = True
elif arg == "--not-interactive":
interactive = False
else:
args.append(arg)
name = args[0]
application = self.application
if not application:
raise click.ClickException("Could not locate application")
blueprint = application.blueprints.get(name)
if not blueprint:
raise click.ClickException("Could not locate blueprint")
command = blueprint.load_context()
args = args[1:]
ctx = command.main(args, standalone_mode=False)
try:
return application.generate(blueprint, ctx, interactive=interactive)
except UndefinedError as e:
raise click.ClickException(
"%s.\n"
"The blueprint's context may be invalid.\n"
"Blueprint: %s\n"
"Context: %s" % (str(e), str(blueprint), str(ctx))
)
@click.command(cls=GenerateCommand)
@click.option("--interactive/--not-interactive", default=True)
def generate(*args, **kwargs):
"""Generate a code stub."""
pass
|
{
"content_hash": "4946560b27c6a6aad775c878c3be744d",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 80,
"avg_line_length": 32,
"alnum_prop": 0.6067708333333334,
"repo_name": "aleontiev/dj",
"id": "eab79ad4c681a946a70687b9775e04b1b7bd4566",
"size": "1920",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "djay/commands/generate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1300"
},
{
"name": "Python",
"bytes": "67003"
}
],
"symlink_target": ""
}
|
import cx_Oracle
from django.db.backends.oracle.introspection import DatabaseIntrospection
class OracleIntrospection(DatabaseIntrospection):
# Associating any OBJECTVAR instances with GeometryField. Of course,
# this won't work right on Oracle objects that aren't MDSYS.SDO_GEOMETRY,
# but it is the only object type supported within Django anyways.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[cx_Oracle.OBJECT] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying USER_SDO_GEOM_METADATA to get the SRID and dimension information.
try:
cursor.execute(
'SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" '
'WHERE "TABLE_NAME"=%s AND "COLUMN_NAME"=%s',
(table_name.upper(), geo_col.upper())
)
row = cursor.fetchone()
except Exception as exc:
raise Exception(
'Could not find entry in USER_SDO_GEOM_METADATA '
'corresponding to "%s"."%s"' % (table_name, geo_col)
) from exc
# TODO: Research way to find a more specific geometry field type for
# the column's contents.
field_type = 'GeometryField'
# Getting the field parameters.
field_params = {}
dim, srid = row
if srid != 4326:
field_params['srid'] = srid
# Length of object array ( SDO_DIM_ARRAY ) is number of dimensions.
dim = len(dim)
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
|
{
"content_hash": "09cda2b9f4c1cafc9de9fc4ef763a224",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 88,
"avg_line_length": 40.02173913043478,
"alnum_prop": 0.5714285714285714,
"repo_name": "intgr/django",
"id": "886441a9d0349115a0ef89f6ecf01c0dfaed7baf",
"size": "1841",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "django/contrib/gis/db/backends/oracle/introspection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55935"
},
{
"name": "HTML",
"bytes": "182977"
},
{
"name": "JavaScript",
"bytes": "252645"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11824885"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
import os
import math
import logging
import sys
logger = logging.getLogger(__name__)
logger.debug('Logging in {0} started.'.format(__name__))
try:
import dynamixel_functions as dynamixel
logger.debug('Imported dynamixel_functions.')
except Exception as e:
logger.critical("Importing dynamixel_functions failed!")
logger.debug(e)
class servo:
# =======================================
# Public class attributes
# =======================================
#TODO: configure debug-structure (servo)
#TODO: maybe build a dictionary?
# Control table address
ADDR_PRO_MAX_POSITION_LIMIT = 36
ADDR_PRO_MIN_POSITION_LIMIT = 40
ADDR_PRO_TORQUE_ENABLE = 562
ADDR_PRO_GOAL_POSITION = 596
ADDR_PRO_GOAL_TORQUE = 604
ADDR_PRO_GOAL_VELOCITY = 600
ADDR_PRO_PRESENT_POSITION = 611
ADDR_PRO_PRESENT_VELOCITY = 615
ADDR_PRO_PRESENT_CURRENT = 621
# Movement values
TORQUE_ENABLE = 1
TORQUE_DISABLE = 0
DXL_MOVING_STATUS_THRESHOLD = 20
# Protocol version
PROTOCOL_VERSION = 2
# Communication values
COMM_SUCCESS = 0
COMM_TX_FAIL = -1001
port_num = -1 # Port-No. will be set in 'initialize_port'
# For Dynamixel H42-20-S300-R
ticks_per_turn = 303750
ticks_per_half_turn= ticks_per_turn/2
# Set True to get debug-info
debug = True
# =======================================
# Private methods
# =======================================
# Constructor saves motor-specific settings
def __init__(self, DXL_ID, BAUDRATE, POS_MIN, POS_MAX, CLOCKWISE, DEVICENAME):
#TODO: optimize initialization
self.ID = DXL_ID
self.BAUDRATE = BAUDRATE
self.POS_MIN = POS_MIN
self.POS_MAX = POS_MAX
self.CLOCKWISE = CLOCKWISE
self.DEVICENAME = DEVICENAME
# =======================================
# Public methods
# =======================================
# Establishes a connection to the motor and transmits motor-specific settings
def initialize_port(self):
try:
servo.port_num = dynamixel.portHandler(self.DEVICENAME)
except Exception as e:
logger.critical('Working with dynamixel porthandler failed. Exiting...')
logger.debug(e)
quit()
dynamixel.packetHandler()
success_open_port = dynamixel.openPort(servo.port_num)
if servo.debug:
if success_open_port:
logger.info("Succeeded to open the port!")
else:
logger.critical("Failed to open the port! Exiting...")
quit()
if success_open_port:
success_set_baudrate = dynamixel.setBaudRate(servo.port_num, self.BAUDRATE)
if servo.debug:
if success_set_baudrate:
logger.info("Succeeded to change the baudrate!")
else:
logger.critical("Failed to change the baudrate! Exiting...")
quit()
# Close communication with USB-to-Dynamixel
def close_port(self):
dynamixel.closePort(servo.port_num)
# Activates power consumption for halting position
def enable_torque(self):
dynamixel.write1ByteTxRx(servo.port_num, servo.PROTOCOL_VERSION, self.ID, servo.ADDR_PRO_TORQUE_ENABLE,
self.TORQUE_ENABLE)
dxl_comm_result = dynamixel.getLastTxRxResult(servo.port_num, servo.PROTOCOL_VERSION)
dxl_error = dynamixel.getLastRxPacketError(servo.port_num, servo.PROTOCOL_VERSION)
#-
if dxl_comm_result != servo.COMM_SUCCESS:
logger.debug(dynamixel.getTxRxResult(servo.PROTOCOL_VERSION, dxl_comm_result))
elif dxl_error != 0:
logger.error(dynamixel.getRxPacketError(servo.PROTOCOL_VERSION, dxl_error))
# Deactivates power consumption for manual operation
def disable_torque(self):
dynamixel.write1ByteTxRx(servo.port_num, servo.PROTOCOL_VERSION, self.ID, servo.ADDR_PRO_TORQUE_ENABLE,
servo.TORQUE_DISABLE)
dxl_comm_result = dynamixel.getLastTxRxResult(servo.port_num, servo.PROTOCOL_VERSION)
dxl_error = dynamixel.getLastRxPacketError(servo.port_num, servo.PROTOCOL_VERSION)
#-
if dxl_comm_result != servo.COMM_SUCCESS:
logger.debug(dynamixel.getTxRxResult(servo.PROTOCOL_VERSION, dxl_comm_result))
elif dxl_error != 0:
logger.error(dynamixel.getRxPacketError(servo.PROTOCOL_VERSION, dxl_error))
# Moves to target position
def write_position(self, dxl_goal_position):
if not self.CLOCKWISE:
dxl_goal_position=dxl_goal_position*(-1)
dynamixel.write4ByteTxRx(servo.port_num, servo.PROTOCOL_VERSION, self.ID, servo.ADDR_PRO_GOAL_POSITION,
dxl_goal_position)
dxl_comm_result = dynamixel.getLastTxRxResult(servo.port_num, servo.PROTOCOL_VERSION)
dxl_error = dynamixel.getLastRxPacketError(servo.port_num, servo.PROTOCOL_VERSION)
#-
if dxl_comm_result != servo.COMM_SUCCESS:
logger.debug(dynamixel.getTxRxResult(servo.PROTOCOL_VERSION, dxl_comm_result))
elif dxl_error != 0:
logger.error(dynamixel.getRxPacketError(servo.PROTOCOL_VERSION, dxl_error))
if dxl_goal_position > self.POS_MAX or dxl_goal_position < self.POS_MIN:
logger.error('Goalposition of Servo {0} out of range!'.format(self.ID))
# Returns present position
def read_present_position(self):
dxl_present_position = dynamixel.read4ByteTxRx(servo.port_num, servo.PROTOCOL_VERSION, self.ID,
servo.ADDR_PRO_PRESENT_POSITION)
dxl_comm_result = dynamixel.getLastTxRxResult(servo.port_num, servo.PROTOCOL_VERSION)
dxl_error = dynamixel.getLastRxPacketError(servo.port_num, servo.PROTOCOL_VERSION)
if dxl_comm_result == servo.COMM_SUCCESS:
if not self.CLOCKWISE:
dxl_present_position=dxl_present_position*(-1)
return dxl_present_position
#-
else:
if dxl_comm_result != servo.COMM_SUCCESS:
logger.debug(dynamixel.getTxRxResult(servo.PROTOCOL_VERSION, dxl_comm_result))
elif dxl_error != 0:
logger.error(dynamixel.getRxPacketError(servo.PROTOCOL_VERSION, dxl_error))
return servo.read_present_position(self)
# Set desired velocity of movement
def write_velocity(self, dxl_goal_velocity):
dynamixel.write4ByteTxRx(servo.port_num, servo.PROTOCOL_VERSION, self.ID, servo.ADDR_PRO_GOAL_VELOCITY,
dxl_goal_velocity)
dxl_comm_result = dynamixel.getLastTxRxResult(servo.port_num, servo.PROTOCOL_VERSION)
dxl_error = dynamixel.getLastRxPacketError(servo.port_num, servo.PROTOCOL_VERSION)
#-
if dxl_comm_result != servo.COMM_SUCCESS:
logger.debug(dynamixel.getTxRxResult(servo.PROTOCOL_VERSION, dxl_comm_result))
elif dxl_error != 0:
logger.error(dynamixel.getRxPacketError(servo.PROTOCOL_VERSION, dxl_error))
# Set maximum and minimum of possible position
# Positions given in ticks
def write_position_limits(self):
#try to change maximum position
dynamixel.write4ByteTxRx(servo.port_num, servo.PROTOCOL_VERSION, self.ID, servo.ADDR_PRO_MAX_POSITION_LIMIT, self.POS_MAX )
dxl_comm_result = dynamixel.getLastTxRxResult(servo.port_num, servo.PROTOCOL_VERSION)
dxl_error = dynamixel.getLastRxPacketError(servo.port_num, servo.PROTOCOL_VERSION)
#-
if dxl_comm_result != servo.COMM_SUCCESS:
logger.info("successfully changed maximum position")
logger.debug(dynamixel.getTxRxResult(servo.PROTOCOL_VERSION, dxl_comm_result))
elif dxl_error != 0:
logger.error(dynamixel.getRxPacketError(servo.PROTOCOL_VERSION, dxl_error))
# try to change minimum position
dynamixel.write4ByteTxRx(servo.port_num, servo.PROTOCOL_VERSION, self.ID, servo.ADDR_PRO_MIN_POSITION_LIMIT, self.POS_MIN)
dxl_comm_result = dynamixel.getLastTxRxResult(servo.port_num, servo.PROTOCOL_VERSION)
dxl_error = dynamixel.getLastRxPacketError(servo.port_num, servo.PROTOCOL_VERSION)
#-
if dxl_comm_result != servo.COMM_SUCCESS:
logger.info("successfully changed minimum position")
logger.debug(dynamixel.getTxRxResult(servo.PROTOCOL_VERSION, dxl_comm_result))
elif dxl_error != 0:
logger.error(dynamixel.getRxPacketError(servo.PROTOCOL_VERSION, dxl_error))
# Convert ticks to degrees
def tick_to_deg(self, tick):
deg = tick*(180/(self.ticks_per_turn/2))
return deg
# Convert degrees to ticks
def deg_to_tick(self, deg):
tick = int(float(deg)*(151875/180))
return tick
|
{
"content_hash": "2a8d01849b9ed84e3d7a7c3322510a9b",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 131,
"avg_line_length": 41.10091743119266,
"alnum_prop": 0.6332589285714286,
"repo_name": "shorlee/felix",
"id": "4a2fd6f27d92a97439e477654b2a7071b7592e03",
"size": "9330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "felix/servo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "41549"
}
],
"symlink_target": ""
}
|
from listenclosely.services.base import BaseMessageServiceBackend
import random
import string
class DummyMessageService(BaseMessageServiceBackend):
def __init__(self, caller, *args, **kwargs):
super(DummyMessageService, self).__init__(caller, *args, **kwargs)
self.incoming_messages = []
self.outgoing_messages = []
def listen(self):
pass
def _message_id(self):
return ''.join(random.choice(string.ascii_lowercase) for i in range(10))
def send_message(self, id_service, content):
msg_id = self._message_id()
self.outgoing_messages.append((msg_id, id_service, content))
return msg_id
def on_message(self, id_service, content):
self.caller.on_message(id_service, content)
self.incoming_messages.append((id_service, content))
def disconnect(self):
pass
|
{
"content_hash": "f027c4a327f69bd08c08041aa4ed1b70",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 80,
"avg_line_length": 30.862068965517242,
"alnum_prop": 0.6435754189944134,
"repo_name": "jlmadurga/listenclosely",
"id": "2b5a5c73aaa5964506f137f6bfc1772c49fd7bb9",
"size": "895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "listenclosely/services/dummy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1322"
},
{
"name": "Python",
"bytes": "51044"
}
],
"symlink_target": ""
}
|
from optparse import make_option
import sys
from django.conf import settings
from django.core.management.base import NoArgsCommand
from django.core.management.color import no_style
from django.core.management.sql import custom_sql_for_model, emit_post_sync_signal
from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
)
help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created."
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive')
show_traceback = options.get('traceback', False)
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError, exc:
# This is slightly hackish. We want to ignore ImportErrors
# if the "management" module itself is missing -- but we don't
# want to ignore the exception if the management module exists
# but raises an ImportError for some reason. The only way we
# can do this is to check the text of the exception. Note that
# we're a bit broad in how we check the text, because different
# Python implementations may not use the same text.
# CPython uses the text "No module named management"
# PyPy uses "No module named myproject.myapp.management"
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
db = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[db]
cursor = connection.cursor()
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app.__name__.split('.')[-2],
[m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)])
for app in models.get_apps()
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = SortedDict(
(app_name, filter(model_installed, model_list))
for app_name, model_list in all_models
)
# Create the tables for each model
if verbosity >= 1:
print "Creating tables ..."
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if verbosity >= 3:
print "Processing %s.%s model" % (app_name, model._meta.object_name)
sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
if verbosity >= 1 and sql:
print "Creating table %s" % model._meta.db_table
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
transaction.commit_unless_managed(using=db)
# Send the post_syncdb signal, so individual apps can do whatever they need
# to do at this point.
emit_post_sync_signal(created_models, verbosity, interactive, db)
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
# Install custom SQL for the app (but only if this
# is a model we've just created)
if verbosity >= 1:
print "Installing custom SQL ..."
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, self.style, connection)
if custom_sql:
if verbosity >= 2:
print "Installing custom SQL for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in custom_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
if show_traceback:
import traceback
traceback.print_exc()
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
else:
if verbosity >= 3:
print "No custom SQL for %s.%s model" % (app_name, model._meta.object_name)
if verbosity >= 1:
print "Installing indexes ..."
# Install SQL indicies for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, self.style)
if index_sql:
if verbosity >= 2:
print "Installing index for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in index_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install index for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
from django.core.management import call_command
call_command('loaddata', 'initial_data', verbosity=verbosity, database=db)
|
{
"content_hash": "04e4bc98eabea8649588472212a04b6b",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 121,
"avg_line_length": 50.58860759493671,
"alnum_prop": 0.5536094082322032,
"repo_name": "hunch/hunch-gift-app",
"id": "5fa6e3f6fd8eb63630bcb8713ee38c49b0f44f38",
"size": "7993",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "django/core/management/commands/syncdb.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "130167"
},
{
"name": "Python",
"bytes": "4088295"
},
{
"name": "Shell",
"bytes": "46"
}
],
"symlink_target": ""
}
|
import numpy
import time
import ctypes
from ..kdtree import kdtree
from . import expr
__all__ = [
'Points',
'points',
'zeros',
'ones',
'empty',
'zeros_like',
'ones_like',
'empty_like',
'rand',
'load'
]
class Final(type):
# copied from https://stackoverflow.com/questions/16056574
def __new__(cls, name, bases, classdict):
for b in bases:
if isinstance(b, Final):
raise TypeError("type " + b.__name__ +
" is not an acceptable base type")
return type.__new__(cls, name, bases, dict(classdict))
def _memaddr(obj):
return obj.ctypes.get_data()
def points(object, dtype=None, copy=True):
if not copy and isinstance(object, numpy.ndarray) \
and (dtype is None or dtype == object.dtype):
ret = object.view(Points)
else:
temp = numpy.array(object, dtype=dtype, copy=False)
ret = empty_like(temp)
ret[:] = temp
return ret
def zeros(shape, dtype=float):
ret = Points(shape=shape, dtype=dtype)
ret[:] = 0
return ret
def ones(shape, dtype=float):
ret = Points(shape=shape, dtype=dtype)
ret[:] = 1
return ret
def empty(shape, dtype=float):
return Points(shape=shape, dtype=dtype)
def zeros_like(a, dtype=None):
return zeros(a.shape, dtype=(a.dtype if dtype is None else dtype))
def ones_like(a, dtype=None):
return zeros(a.shape, dtype=(a.dtype if dtype is None else dtype))
def empty_like(a, dtype=None):
return zeros(a.shape, dtype=(a.dtype if dtype is None else dtype))
def rand(*dims):
ret = empty(shape=dims, dtype=float)
ret[:] = numpy.random.rand(*dims)
return ret
def load(file, **kwargs):
# wrapper around numpy.load
# TODO: this copies to numpy array, then to a Points object;
# find way to avoid this extra copy
return points(numpy.load(file, **kwargs))
class Points (numpy.ndarray):
_last_modified = dict()
# make Points non subclass-able to simplify write control
# TODO: are there any use cases for subclassing Points?
__metaclass__ = Final
def __new__(cls, *args, **kwargs):
return super(Points, cls).__new__(cls, *args, **kwargs)
def __array_finalize__(self, obj):
self._last_updated = None
self._tree = None
if obj is not None and not isinstance(obj, Points):
# arrived at here via view() of a non-Points object
raise TypeError('Detected attempt at creating Points-type '
'view on non-Points object.')
if obj is None and not self.flags.owndata:
raise TypeError('Detected attempt at creating Points-type '
'view on buffer object via __new__(buffer=...)')
if obj is None:
# arrived at here via __new__
self._memsize = self.size * self.dtype.itemsize
self._memloc = _memaddr(self)
elif _memaddr(self) < obj._memloc or \
_memaddr(self) >= obj._memloc + obj._memsize:
# arrived at here via copy()
self._memsize = self.size * self.dtype.itemsize
self._memloc = _memaddr(self)
else:
# arrived at here via slicing/indexing
# or view() of a Points object
self._memsize = obj._memsize
self._memloc = obj._memloc
# cannot set writeable flag to False here,
# because copy() performs assignment after __array_finalize__
def __init__(self, *args, **kwargs):
self.flags.writeable = False
def copy(self):
x = super(Points, self).copy()
x.flags.writeable = False
return x
def _record_modify_time(self):
Points._last_modified[self._memloc] = time.time()
def _update_kd_tree(self):
# if there is no recorded last modify time for self._memloc,
# then self has either not been modified yet since creation,
# or _last_modified dictionary has been cleared. Either way,
# the k-d tree needs updating; we set the last modify time to
# the current time to trigger this.
if Points._last_modified.get(self._memloc) is None:
Points._last_modified[self._memloc] = time.time()
# note: None < x, for any number x
build_time = None
if self._last_updated is None \
or self._last_updated <= Points._last_modified[self._memloc]:
# note: do not need to explicitly call __del__()
# as it is automatically called when overwritten
build_time = time.time()
self._tree = kdtree._build(self)
build_time = time.time() - build_time
self._last_updated = time.time() # record time *after* build
return build_time
def nbhds(self, queries=None, k=1, r=None, verbose=False):
self._update_kd_tree()
return kdtree._query(self._tree, queries=queries, k=k, dmax=r)
def NBHDS(self, queries=None, k=1, r=None, verbose=False):
return expr.nbhds_op(self, queries, k, r)
def _guard(self, f):
def f_guarded(*args, **kwargs):
if self.base is not None:
self.base.flags.writeable = True
self.flags.writeable = True
ret = None
try:
ret = f(*args, **kwargs)
finally:
self.flags.writeable = False
if self.base is not None:
self.base.flags.writeable = False
self._record_modify_time() # record time *after* computation
return ret
return f_guarded
# override methods that modify object content to
# record timestamp, signalling need for k-d tree update
# inplace arithmetic methods
# e.g. +=, -=, *=, /=, //=, %=, **=, <<=, >>=, &=, ^=, |=
def __iadd__(self, other):
return self._guard(super(Points, self).__iadd__)(other)
def __isub__(self, other):
return self._guard(super(Points, self).__isub__)(other)
def __imul__(self, other):
return self._guard(super(Points, self).__imul__)(other)
def __idiv__(self, other):
return self._guard(super(Points, self).__idiv__)(other)
def __itruediv__(self, other):
return self._guard(super(Points, self).__itruediv__)(other)
def __ifloordiv__(self, other):
return self._guard(super(Points, self).__ifloordiv__)(other)
def __imod__(self, other):
return self._guard(super(Points, self).__imod__)(other)
def __ipow__(self, other):
return self._guard(super(Points, self).__ipow__)(other)
def __ilshift__(self, other):
return self._guard(super(Points, self).__ilshift__)(other)
def __irshift__(self, other):
return self._guard(super(Points, self).__irshift__)(other)
def __iand__(self, other):
return self._guard(super(Points, self).__iand__)(other)
def __ixor__(self, other):
return self._guard(super(Points, self).__ixor__)(other)
def __ior__(self, other):
return self._guard(super(Points, self).__ior__)(other)
# indexing and slicing operator
def __setslice__(self, i, j, sequence):
return self._guard(super(Points, self).__setslice__)(i, j, sequence)
def __delslice__(self, i, j):
return self._guard(super(Points, self).__delslice__)(i, j)
def __getslice__(self, i, j):
return super(Points, self).__getslice__(i, j)
def __setitem__(self, key, value):
return self._guard(super(Points, self).__setitem__)(key, value)
def __delitem__(self, key):
return self._guard(super(Points, self).__delitem__)(key)
def __getitem__(self, key):
if isinstance(key, expr.expression):
return expr.index_op(
expr._make_expression(self), key, slice(None, None, None))
elif (isinstance(key, tuple) or isinstance(key, list)) \
and any([isinstance(x, expr.expression) for x in key]):
# key is a sequence containing at least one expression object
if len(key) == 2 and (
isinstance(key[0], expr.expression)
and isinstance(key[1], slice)
or isinstance(key[0], slice)
and isinstance(key[1], expr.expression)):
return expr.index_op(
expr._make_expression(self), key[0], key[1])
else:
raise TypeError(
'unsupported combination of types in index tuple: %s'
% repr((type(x) for x in key)))
else:
return super(Points, self).__getitem__(key)
|
{
"content_hash": "384275feabd3e6d29beb077e34650dbb",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 77,
"avg_line_length": 33.14448669201521,
"alnum_prop": 0.5725593667546174,
"repo_name": "heremaps/pptk",
"id": "b40e4d117dd629c54f20ac9dc4a0fe9bd9cb7613",
"size": "8717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pptk/points/points.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1170"
},
{
"name": "C++",
"bytes": "327139"
},
{
"name": "CMake",
"bytes": "16817"
},
{
"name": "Python",
"bytes": "84666"
}
],
"symlink_target": ""
}
|
"""This code example updates a creative wrapper to the 'OUTER' wrapping order.
To determine which creative wrappers exist, run get_all_creative_wrappers.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
# Set the ID of the creative wrapper to update.
CREATIVE_WRAPPER_ID = 'INSERT_CREATIVE_WRAPPER_ID_HERE'
def main(client, creative_wrapper_id):
# Initialize appropriate service.
creative_wrapper_service = client.GetService('CreativeWrapperService',
version='v201502')
# Create statement to get a creative wrapper by ID.
values = [{
'key': 'creativeWrapperId',
'value': {
'xsi_type': 'NumberValue',
'value': creative_wrapper_id
}
}]
query = 'WHERE id = :creativeWrapperId'
statement = dfp.FilterStatement(query, values)
# Get creative wrappers.
response = creative_wrapper_service.getCreativeWrappersByStatement(
statement.ToStatement())
if 'results' in response:
updated_creative_wrappers = []
for creative_wrapper in response['results']:
creative_wrapper['ordering'] = 'OUTER'
updated_creative_wrappers.append(creative_wrapper)
# Update the creative wrappers on the server.
creative_wrappers = creative_wrapper_service.updateCreativeWrappers(
updated_creative_wrappers)
# Display results.
for creative_wrapper in creative_wrappers:
print (('Creative wrapper with ID \'%s\' and wrapping order \'%s\' '
'was updated.') % (creative_wrapper['id'],
creative_wrapper['ordering']))
else:
print 'No creative wrappers found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CREATIVE_WRAPPER_ID)
|
{
"content_hash": "e7d44f8628ac21763297ea362201c065",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 78,
"avg_line_length": 34.16393442622951,
"alnum_prop": 0.685700575815739,
"repo_name": "haveal/googleads-python-lib",
"id": "177ef7462f4f028ddc6865ec84bf216df515deed",
"size": "2702",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/dfp/v201502/creative_wrapper_service/update_creative_wrappers.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168602"
}
],
"symlink_target": ""
}
|
import pytest
class TestClass(object):
@pytest.fixture
def something(self, request):
return request.instance
def test_method(self, something):
assert something is self
|
{
"content_hash": "d6419236905734e951bc1bad0c4a1894",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 37,
"avg_line_length": 19.9,
"alnum_prop": 0.6834170854271356,
"repo_name": "kawamon/hue",
"id": "1004d5d1352ca00f839781cf25e52c4f661f3e59",
"size": "223",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/pytest-4.6.11/testing/example_scripts/fixtures/fill_fixtures/test_funcarg_lookup_classlevel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
}
|
from rospy import init_node, Subscriber, Publisher, get_param
from rospy import Rate, is_shutdown, ROSInterruptException, spin, on_shutdown
from barc.msg import ECU
from numpy import pi
import rospy
import time
motor_pwm = 1500
servo_pwm = 1600
def arduino_interface():
global ecu_pub, motor_pwm, servo_pwm
init_node('arduino_interface')
# set node rate
loop_rate = 50
dt = 1.0 / loop_rate
rate = rospy.Rate(loop_rate)
time_prev = time.time()
ecu_pub = Publisher('ecu_pwm', ECU, queue_size = 10)
motor_pwm = 1500
servo_pwm = 1600
flag = 0
while not rospy.is_shutdown():
if time.time()-time_prev>=12:
servo_pwm = 1600; #reset to straight ahead
ecu_cmd = ECU(motor_pwm, servo_pwm)
ecu_pub.publish(ecu_cmd)
break
elif time.time()-time_prev >=7:
servo_pwm = 1625; #send new steering angle command: [1450, 1500, 1575, 1625, 1700,1750]
ecu_cmd = ECU(motor_pwm, servo_pwm)
ecu_pub.publish(ecu_cmd)
# wait
rate.sleep()
#############################################################
if __name__ == '__main__':
try:
arduino_interface()
except ROSInterruptException:
pass
|
{
"content_hash": "752a722df3c6c08daed8b86ab9599257",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 99,
"avg_line_length": 26.244897959183675,
"alnum_prop": 0.5645412130637636,
"repo_name": "MPC-Berkeley/barc",
"id": "8d96866bab283280f7e8fe0a0074bde17b44bca4",
"size": "2288",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "workspace/src/labs/src/lab4/SteeringDynamics.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "37857"
},
{
"name": "C++",
"bytes": "34556"
},
{
"name": "CMake",
"bytes": "25703"
},
{
"name": "CSS",
"bytes": "143"
},
{
"name": "HTML",
"bytes": "27848"
},
{
"name": "JavaScript",
"bytes": "10764902"
},
{
"name": "Julia",
"bytes": "117617"
},
{
"name": "Less",
"bytes": "69047"
},
{
"name": "MATLAB",
"bytes": "9115"
},
{
"name": "Python",
"bytes": "343196"
},
{
"name": "SCSS",
"bytes": "69934"
},
{
"name": "Shell",
"bytes": "13578"
},
{
"name": "Vim script",
"bytes": "370"
}
],
"symlink_target": ""
}
|
from cliff import show
from gnocchiclient import utils
class CliStatusShow(show.ShowOne):
"""Show the status of measurements processing."""
def take_action(self, parsed_args):
status = utils.get_client(self).status.get()
d = {
"storage/total number of measures to process":
status['storage']['summary']['measures'],
"storage/number of metric having measures to process":
status['storage']['summary']['metrics'],
}
if 'metricd' in status:
d["metricd/processors"] = status['metricd']['processors']
return self.dict2columns(d)
|
{
"content_hash": "bc975c7f0182806d07b4ea8124957108",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 69,
"avg_line_length": 29.045454545454547,
"alnum_prop": 0.6118935837245696,
"repo_name": "gnocchixyz/python-gnocchiclient",
"id": "95d569eccc91e231909f707ed2f2e2dc506c93b2",
"size": "1214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gnocchiclient/v1/status_cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "196841"
},
{
"name": "Shell",
"bytes": "645"
}
],
"symlink_target": ""
}
|
import subprocess
import signal
import sys
import time
import os
from uart_bridge import UartBridge
from threading import Thread
from fake_xbee import FakeXbee
include_path = os.path.dirname(os.path.realpath(__file__)) + "/../../src"
sys.path.insert(0, include_path)
from xbee_gateway import XBeeGateway
from decoder import Decoder
test_packet_proc = None
# Setup this handler so that the script ends gracefully
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
signal.signal(signal.SIGINT, signal_handler)
#
# SETUP UART BRIDGE
#
# The UART bridge will open up two connections;
# one on ttyV1 and the other on ttyV2
#
# Here is an example data flow:
#
# |Fake Device| --> ttyV1 --> SOCAT Bridge --> ttyV2 --> |packet tester|
#
#
uart_bridge = UartBridge()
uart_bridge.start()
print("Waiting for the UART bridge to come up...")
time.sleep(2)
#
# SETUP FAKE DEVICE
#
def start_test_packet():
fake_xbee = FakeXbee('./ttyV1')
fake_xbee.connect()
fake_xbee.start_loop()
test_packet_thread = Thread(target=start_test_packet)
test_packet_thread.start()
#
# SETUP PACKET TESTER
#
# This is the python script that normally runs on the laptop
#
# subprocess.call("cd ../src && python packet_tester.py", shell=True)
def print_data(data, timestamp):
print(timestamp)
print(data)
decoder = Decoder()
decoder.register_callback(decoder.print_dictionary)
gateway = XBeeGateway()
gateway.register_callback(decoder.decode_data)
gateway.setup_xbee('./ttyV2', 9600)
gateway.begin_loop()
#
# BLOCK UNTIL CONTROL-C
#
test_packet_thread.join()
uart_bridge.wait()
|
{
"content_hash": "0668fbd5b37b17e3087002c760268a96",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 73,
"avg_line_length": 21.513513513513512,
"alnum_prop": 0.7267587939698492,
"repo_name": "scel-hawaii/data-gateway",
"id": "b97b7868ad55be06bd5962a8c841b36a3d7dc9e0",
"size": "1964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/integration_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21901"
},
{
"name": "Shell",
"bytes": "290"
}
],
"symlink_target": ""
}
|
'''
Examines log generated by ccid_ctid.test.py, returns 0 if valid, 1 if not.
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import csv
ccid = []
ctid = []
# Read in ccid and ctid fields from each line of the generated report.
#
for ln in csv.reader(sys.stdin, delimiter=' '):
if len(ln) != 2:
exit(code=1)
i = int(ln[0])
if i < 0:
exit(code=1)
ccid.append(i)
i = int(ln[1])
if i < 0:
exit(code=1)
ctid.append(i)
# Validate contents of report.
#
if (ccid[0] != ccid[1] and
ccid[1] != ccid[2] and
ccid[2] == ccid[3] and
ctid[2] != ctid[3] and
ccid[3] != ccid[4] and
ccid[4] == ccid[5] and
ctid[4] != ctid[5]):
exit(code=0)
# Failure exit if report was not valid.
#
exit(code=1)
|
{
"content_hash": "f47150e313c96dfabeb398a62b618ed6",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 75,
"avg_line_length": 28.830188679245282,
"alnum_prop": 0.6668848167539267,
"repo_name": "clearswift/trafficserver",
"id": "1b4cee5e885894d14be49a2d62f2541b2f0b4006",
"size": "1528",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/gold_tests/logging/ccid_ctid_observer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13053"
},
{
"name": "C",
"bytes": "3363353"
},
{
"name": "C++",
"bytes": "11399274"
},
{
"name": "CSS",
"bytes": "8089"
},
{
"name": "HTML",
"bytes": "238770"
},
{
"name": "Java",
"bytes": "9881"
},
{
"name": "JavaScript",
"bytes": "1609"
},
{
"name": "Lex",
"bytes": "4029"
},
{
"name": "Lua",
"bytes": "380105"
},
{
"name": "M4",
"bytes": "271199"
},
{
"name": "Makefile",
"bytes": "196400"
},
{
"name": "Objective-C",
"bytes": "13254"
},
{
"name": "Perl",
"bytes": "67408"
},
{
"name": "Perl 6",
"bytes": "1163"
},
{
"name": "Protocol Buffer",
"bytes": "4013"
},
{
"name": "Python",
"bytes": "365710"
},
{
"name": "Roff",
"bytes": "2339"
},
{
"name": "Shell",
"bytes": "87299"
},
{
"name": "Vim script",
"bytes": "192"
},
{
"name": "Yacc",
"bytes": "3251"
}
],
"symlink_target": ""
}
|
import event_confirm
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
{
"content_hash": "5c422039370d643c01004e3fbfa6d3d5",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 65,
"avg_line_length": 43.5,
"alnum_prop": 0.8275862068965517,
"repo_name": "diogocs1/comps",
"id": "92e33960484f8b3f5e9ba21fba294898109e5129",
"size": "1067",
"binary": false,
"copies": "427",
"ref": "refs/heads/master",
"path": "web/addons/event/wizard/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "701"
},
{
"name": "CSS",
"bytes": "856533"
},
{
"name": "HTML",
"bytes": "299671"
},
{
"name": "Java",
"bytes": "620166"
},
{
"name": "JavaScript",
"bytes": "5844302"
},
{
"name": "Makefile",
"bytes": "21002"
},
{
"name": "PHP",
"bytes": "14259"
},
{
"name": "Python",
"bytes": "10647376"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "17746"
},
{
"name": "XSLT",
"bytes": "120278"
}
],
"symlink_target": ""
}
|
import core.implant
class EnumUsersJob(core.job.Job):
def create(self):
self.users = []
def report(self, handler, data, sanitize = False):
user = data.decode()
handler.reply(200)
if user == "Complete":
super(EnumUsersJob, self).report(handler, data, False)
if user.lower() not in [u.lower() for u in self.users]:
self.users.append(user)
def done(self):
if self.shell.domain_info:
all_domain_admins = [da for das in [[k[0].lower()+"\\"+da.lower(), k[1].lower()+"\\"+da.lower()] for k in self.shell.domain_info for da in self.shell.domain_info[k]["Domain Admins"]] for da in das]
self.users = [user+"*" if user.lower() in all_domain_admins else user for user in self.users]
header = "Logged in users on "+self.ip
self.results = "\n\n"+header+"\n"+"="*len(header)+"\n"
self.results += "\n".join(self.users)
self.results += "\n"
self.display()
def display(self):
self.print_good(self.results)
class EnumUsersImplant(core.implant.Implant):
NAME = "Enum Users"
DESCRIPTION = "Enumerates user sessions on the target system."
AUTHORS = ["zerosum0x0", "TheNaterz"]
STATE = "implant/gather/enum_users"
def load(self):
pass
def job(self):
return EnumUsersJob
def run(self):
payloads = {}
payloads["js"] = "data/implant/gather/enum_users.js"
self.dispatch(payloads, self.job)
|
{
"content_hash": "3a338874491c5c527df3dbb023ae417b",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 209,
"avg_line_length": 29.01923076923077,
"alnum_prop": 0.5931080185553347,
"repo_name": "zerosum0x0/koadic",
"id": "7981f9dbacec562f9a45197dff4523f0f17aa822",
"size": "1509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/implant/gather/enum_users.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1153"
},
{
"name": "C",
"bytes": "152727"
},
{
"name": "C#",
"bytes": "4074"
},
{
"name": "C++",
"bytes": "17602"
},
{
"name": "Dockerfile",
"bytes": "192"
},
{
"name": "JavaScript",
"bytes": "99522"
},
{
"name": "Python",
"bytes": "2958758"
},
{
"name": "VBA",
"bytes": "1700"
},
{
"name": "VBScript",
"bytes": "14154"
},
{
"name": "XSLT",
"bytes": "295"
}
],
"symlink_target": ""
}
|
"""TensorBoard Summary Writer for TensorFlow Eager Execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid
from tensorflow.contrib.summary import gen_summary_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_op_util
from tensorflow.python.ops import variable_scope
def _maybe_as_cpu_tensor(v):
if isinstance(v, (ops.EagerTensor, ops.Tensor)):
return v.as_cpu_tensor()
else:
return v
def _summary_writer_function(name, tensor, function, family=None):
def record():
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
function(tag, scope)
return True
return record
class SummaryWriter(object):
"""Writes summaries for TensorBoard, compatible with eager execution.
This class is the supported way of writing TensorBoard summaries under
eager execution.
"""
_CPU_DEVICE = "cpu:0"
def __init__(self,
logdir,
max_queue=10,
flush_secs=120,
filename_suffix=""):
"""Summary writer for TensorBoard, compatible with eager execution.
If necessary, multiple instances of `SummaryWriter` can be created, with
distinct `logdir`s and `name`s. Each `SummaryWriter` instance will retain
its independent `global_step` counter and data writing destination.
Example:
```python
writer = tfe.SummaryWriter("my_model")
# ... Code that sets up the model and data batches ...
for _ in xrange(train_iters):
loss = model.train_batch(batch)
writer.scalar("loss", loss)
writer.step()
```
Args:
logdir: Directory in which summary files will be written.
max_queue: Number of summary items to buffer before flushing to
filesystem. If 0, summaries will be flushed immediately.
flush_secs: Number of secondsbetween forced commits to disk.
filename_suffix: Suffix of the event protobuf files in which the summary
data are stored.
Raises:
ValueError: If this constructor is called not under eager execution.
"""
# TODO(apassos, ashankar): Make this class and the underlying
# contrib.summary_ops compatible with graph model and remove this check.
if not context.in_eager_mode():
raise ValueError(
"Use of SummaryWriter is currently supported only with eager "
"execution enabled. File an issue at "
"https://github.com/tensorflow/tensorflow/issues/new to express "
"interest in fixing this.")
# TODO(cais): Consider adding name keyword argument, which if None or empty,
# will register the global global_step that training_util.get_global_step()
# can find.
with context.device(self._CPU_DEVICE):
self._name = uuid.uuid4().hex
self._global_step = 0
self._global_step_tensor = variable_scope.get_variable(
"global_step/summary_writer/" + self._name,
shape=[], dtype=dtypes.int64,
initializer=init_ops.zeros_initializer())
self._global_step_dirty = False
self._resource = gen_summary_ops.summary_writer(shared_name=self._name)
gen_summary_ops.create_summary_file_writer(
self._resource, logdir, max_queue, flush_secs, filename_suffix)
def __del__(self):
if self._resource:
resource_variable_ops.destroy_resource_op(self._resource)
self._resource = None
def step(self):
"""Increment the global step counter of this SummaryWriter instance."""
self._global_step += 1
self._global_step_dirty = True
@property
def global_step(self):
"""Obtain the current global_step value of this SummaryWriter instance.
Returns:
An `int` representing the current value of the global_step of this
`SummaryWriter` instance.
"""
return self._global_step
def _update_global_step_tensor(self):
with context.device(self._CPU_DEVICE):
if self._global_step_dirty:
self._global_step_dirty = False
return state_ops.assign(self._global_step_tensor, self._global_step)
else:
return self._global_step_tensor
def generic(self, name, tensor, metadata, family=None):
"""Write a generic-type summary.
Args:
name: A name for the generated node. Will also serve as the series name in
TensorBoard.
tensor: A `Tensor` or compatible value type containing the value of the
summary.
metadata: Metadata about the summary.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
"""
with context.device(self._CPU_DEVICE):
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
gen_summary_ops.write_summary(
self._resource,
self._update_global_step_tensor(),
_maybe_as_cpu_tensor(tensor),
tag,
_maybe_as_cpu_tensor(metadata),
name=scope)
def scalar(self, name, tensor, family=None):
"""Write a scalar summary.
Args:
name: A name for the generated node. Will also serve as the series name in
TensorBoard.
tensor: A real numeric `Tensor` or compatible value type containing a
single value.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A summary writer function for scalars.
"""
with context.device(self._CPU_DEVICE):
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
gen_summary_ops.write_scalar_summary(
self._resource, self._update_global_step_tensor(),
tag, _maybe_as_cpu_tensor(tensor), name=scope)
def histogram(self, name, tensor, family=None):
"""Write a histogram summary.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A real numeric `Tensor` or compatible value type. Any shape.
Values to use to build the histogram.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
"""
with context.device(self._CPU_DEVICE):
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
gen_summary_ops.write_histogram_summary(
self._resource, self._update_global_step_tensor(),
tag, _maybe_as_cpu_tensor(tensor), name=scope)
def image(self, name, tensor, bad_color=None, max_images=3, family=None):
"""Write an image summary."""
with context.device(self._CPU_DEVICE):
if bad_color is None:
bad_color_ = constant_op.constant([255, 0, 0, 255], dtype=dtypes.uint8)
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
gen_summary_ops.write_image_summary(
self._resource, self._update_global_step_tensor(),
tag, _maybe_as_cpu_tensor(tensor), bad_color_, max_images,
name=scope)
def audio(self, name, tensor, sample_rate, max_outputs, family=None):
"""Write an audio summary.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`, or
compatible value type.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
"""
with context.device(self._CPU_DEVICE):
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
gen_summary_ops.write_audio_summary(
self._resource, self._update_global_step_tensor(),
tag,
_maybe_as_cpu_tensor(tensor),
sample_rate=_maybe_as_cpu_tensor(sample_rate),
max_outputs=max_outputs,
name=scope)
|
{
"content_hash": "1fdd66b189f9032ed6e5d5d93b47f466",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 80,
"avg_line_length": 37.86086956521739,
"alnum_prop": 0.6632981166743225,
"repo_name": "mdrumond/tensorflow",
"id": "39993558e33d9f88c9f642db2273fb81fd7be9e9",
"size": "9397",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/eager/python/summary_writer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "87912"
},
{
"name": "C++",
"bytes": "12683412"
},
{
"name": "CMake",
"bytes": "72419"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "26396"
},
{
"name": "HTML",
"bytes": "486716"
},
{
"name": "Java",
"bytes": "50995"
},
{
"name": "JavaScript",
"bytes": "12972"
},
{
"name": "Jupyter Notebook",
"bytes": "1882397"
},
{
"name": "Makefile",
"bytes": "23413"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "135251"
},
{
"name": "Python",
"bytes": "11583592"
},
{
"name": "Shell",
"bytes": "268408"
},
{
"name": "TypeScript",
"bytes": "668194"
}
],
"symlink_target": ""
}
|
'''
Created on Aug 10, 2009
@author: bud
'''
import logging
import logging.handlers
import optparse
import os
import pwd
import sys
import atexit
import signal
import tornado
import subprocess
#
# Init singleton object holding reloadable config values
# Must be done in the very first place because some import might ask for config value
#
from octopus.dispatcher import settings
from octopus.core import singletonconfig
singletonconfig.load(settings.CONFDIR + "/config.ini")
#
# Init the rest of dispatcher app
#
from octopus.core.framework import WSAppFramework
from octopus.dispatcher.webservice.webservicedispatcher import WebServiceDispatcher
from octopus.dispatcher.dispatcher import Dispatcher
def daemonize(username=""):
if os.fork() != 0:
os._exit(0)
os.setsid()
if username:
uid = pwd.getpwnam(username)[2]
os.setuid(uid)
if os.fork() != 0:
os._exit(0)
# create the pidfile
pidfile = file(settings.PIDFILE, "w")
pidfile.write("%d\n" % os.getpid())
pidfile.close()
# register a cleanup callback
pidfile = os.path.abspath(settings.PIDFILE)
def delpidfile():
os.remove(pidfile)
atexit.register(delpidfile)
def delpidfileonSIGTERM(a, b):
sys.exit()
signal.signal(signal.SIGTERM, delpidfileonSIGTERM)
#
os.chdir("/")
f = os.open(os.devnull, os.O_RDONLY)
os.dup2(f, sys.stdin.fileno())
os.close(f)
f = os.open(os.devnull, os.O_WRONLY)
os.dup2(f, sys.stdout.fileno())
os.close(f)
f = os.open(os.devnull, os.O_WRONLY)
os.dup2(f, sys.stderr.fileno())
os.close(f)
def process_args():
parser = optparse.OptionParser()
parser.add_option("-P", "--pid-file", action="store", dest="PIDFILE", help="change the pid file")
parser.add_option("-d", "--daemon", action="store_true", dest="DAEMONIZE", default=False, help="daemonize the dispatcher")
parser.add_option("-b", "--bind", action="store", type="string", dest="ADDRESS", metavar="HOST", help="change the HOST the web service is bound on")
parser.add_option("-p", "--port", action="store", type="int", dest="PORT", metavar="PORT", help="change the PORT the web service is listening on")
parser.add_option("-u", "--run-as", action="store", type="string", dest="RUN_AS", metavar="USER", help="run the dispatcher as USER")
parser.add_option("-D", "--debug", action="store_true", dest="DEBUG", help="changes the default log level to DEBUG")
parser.add_option("-C", "--console", action="store_true", dest="CONSOLE", default=False, help="output logs to the console")
options, args = parser.parse_args()
# override defaults with settings from file
if args:
settings.loadSettingsFile(args[0])
# override settings with options
for setting in dir(settings):
if hasattr(options, setting) and getattr(options, setting) is not None:
setattr(settings, setting, getattr(options, setting))
return options
def setup_logging(options):
if not os.path.exists(settings.LOGDIR):
os.makedirs(settings.LOGDIR, 0755)
mainLog = os.path.join(settings.LOGDIR, "dispatcher.log")
assignLog = os.path.join(settings.LOGDIR, "assign.log")
fileHandler = logging.handlers.RotatingFileHandler(
mainLog,
maxBytes=singletonconfig.get('CORE', 'LOG_SIZE'),
backupCount=singletonconfig.get('CORE', 'LOG_BACKUPS'),
encoding="UTF-8")
assignHandler = logging.handlers.RotatingFileHandler(
assignLog,
maxBytes=singletonconfig.get('CORE', 'LOG_SIZE'),
backupCount=singletonconfig.get('CORE', 'LOG_BACKUPS'),
encoding="UTF-8")
fileHandler.setFormatter(logging.Formatter("%(asctime)s %(name)10s %(levelname)s %(message)s"))
assignHandler.setFormatter(logging.Formatter("%(asctime)s %(name)10s %(levelname)s %(message)s"))
logLevel = logging.DEBUG if options.DEBUG else singletonconfig.get('CORE', 'LOG_LEVEL')
# Must be set otherwise it will receive the statsLog data, but not higher than DEBUG otherwise we might loose some info if reconfig with higher lvl
fileHandler.setLevel(logging.DEBUG)
# Create main logger
logging.getLogger().addHandler(fileHandler)
logging.getLogger().setLevel(logLevel)
# Create a specific logger for assignment information (force level to INFO)
logging.getLogger('assign').addHandler(assignHandler)
logging.getLogger('assign').setLevel(logging.DEBUG)
logging.getLogger('assign').propagate = False # cut event to avoid getting this to the root log
if options.CONSOLE and not options.DAEMONIZE:
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logging.Formatter("%(asctime)s %(name)10s %(levelname)6s %(message)s", '%Y-%m-%d %H:%M:%S'))
consoleHandler.setLevel(logLevel)
logging.getLogger().addHandler(consoleHandler)
logging.getLogger('main.dispatcher').setLevel(logLevel)
logging.getLogger('main.webservice').setLevel(logging.ERROR)
def make_dispatcher():
return WSAppFramework(applicationClass=Dispatcher, webServiceClass=WebServiceDispatcher, port=settings.PORT)
def main():
options = process_args()
setup_logging(options)
logging.getLogger('main').warning("")
logging.getLogger('main').warning("-----------------------------------------------")
logging.getLogger('main').warning("Starting PULI server on port:%d.", settings.PORT)
logging.getLogger('main').warning("-----------------------------------------------")
logging.getLogger('main').warning(" version = %s" % settings.VERSION)
logging.getLogger('main').warning(" command = %s" % " ".join(sys.argv))
logging.getLogger('main').warning(" daemon = %r" % options.DAEMONIZE)
logging.getLogger('main').warning(" console = %r" % options.CONSOLE)
logging.getLogger('main').warning(" port = %s" % settings.PORT)
logging.getLogger('main').warning("--")
if options.DAEMONIZE:
logging.getLogger('main').warning("make current process a daemon and redirecting stdout/stderr to logfile")
daemonize(settings.RUN_AS)
try:
# Redirect stdout and stderr to log file (using the first handler set in logging)
sys.stdout = logging.getLogger().handlers[0].stream
sys.stderr = logging.getLogger().handlers[0].stream
except Exception:
logging.getLogger('main').error("Unexpected error occured when redirecting stdout/stderr to logfile")
logging.getLogger('main').warning("creating dispatcher main application")
server = make_dispatcher()
# Define a periodic callback to process DB/COMPLETION/ASSIGNMENT updates
periodic = tornado.ioloop.PeriodicCallback(server.loop, singletonconfig.get('CORE', 'MASTER_UPDATE_INTERVAL'))
periodic.start()
try:
logging.getLogger('main').warning("starting tornado main loop")
tornado.ioloop.IOLoop.instance().start()
except (KeyboardInterrupt, SystemExit):
server.application.shutdown()
# If restart flag is set (via /restart webservice)
if server.application.restartService:
logging.getLogger('main').warning("Restarting service...")
try:
# Restart server using a specific command
subprocess.check_call(settings.RESTART_COMMAND.split())
except subprocess.CalledProcessError, e:
logging.getLogger('main').warning("Impossible to restart systemd unit (error: %s)" % e)
except AttributeError, e:
logging.getLogger('main').warning("Dispatcher settings do not define: RESTART_COMMAND")
logging.getLogger('main').warning("Bye.")
if __name__ == '__main__':
main()
|
{
"content_hash": "523fda722cc24a18f1c6ee69f3fe6bcf",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 152,
"avg_line_length": 39.16243654822335,
"alnum_prop": 0.6785482825664291,
"repo_name": "mikrosimage/OpenRenderManagement",
"id": "4966bcf0e10c50512dea8f068d6cbac8bc4313e4",
"size": "7737",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/octopus/dispatcherd.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "878623"
},
{
"name": "Shell",
"bytes": "5347"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from rest_framework.response import Response
from sentry.api.base import EnvironmentMixin
from sentry.api.bases.organization import OrganizationEndpoint, OrganizationPermission
from sentry.api.helpers.releases import get_group_ids_resolved_in_release
from sentry.api.serializers import serialize
from sentry.api.serializers.models.group import GroupSerializerSnuba
from sentry.models import Group
class OrganizationIssuesResolvedInReleaseEndpoint(OrganizationEndpoint, EnvironmentMixin):
permission_classes = (OrganizationPermission,)
def get(self, request, organization, version):
"""
List issues to be resolved in a particular release
``````````````````````````````````````````````````
Retrieve a list of issues to be resolved in a given release.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string version: the version identifier of the release.
:auth: required
"""
group_ids = get_group_ids_resolved_in_release(organization, version)
groups = Group.objects.filter(
project__in=self.get_projects(request, organization), id__in=group_ids
)
context = serialize(
list(groups),
request.user,
GroupSerializerSnuba(
environment_ids=[e.id for e in self.get_environments(request, organization)]
),
)
return Response(context)
|
{
"content_hash": "00cb65e4b50a21ab7950aa8de2c36172",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 92,
"avg_line_length": 38.875,
"alnum_prop": 0.6630225080385852,
"repo_name": "beeftornado/sentry",
"id": "7f74116304eec45fe688f631c6b130b8a9c3dfb5",
"size": "1555",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/api/endpoints/organization_issues_resolved_in_release.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
}
|
import sys
from PyQt4 import QtCore, QtGui
from addServerKey import Ui_addServerKey
import ConfigParser
import Tkinter, tkFileDialog
import shutil
import os
class ServerKeyDialog(QtGui.QMainWindow):
def setIconImage(self):
app_icon = QtGui.QIcon()
app_icon.addFile('images/cryptoknocker_resize.png', QtCore.QSize(16, 16))
app_icon.addFile('images/cryptoknocker_resize.png', QtCore.QSize(24, 24))
app_icon.addFile('images/cryptoknocker_resize.png', QtCore.QSize(32, 32))
app_icon.addFile('images/cryptoknocker_resize.png', QtCore.QSize(48, 48))
app_icon.addFile('images/cryptoknocker_resize.png', QtCore.QSize(256, 256))
app.setWindowIcon(app_icon)
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle("Add server's public key")
self.ui = Ui_addServerKey()
self.ui.setupUi(self)
self.ui.pushButton.clicked.connect(self.uploadServerKey);
def writeToINI(self, ServerAddress, result):
cfg = ConfigParser.ConfigParser()
cfg.read('server.ini')
if not cfg.has_section('Server public key'):
cfg.add_section('Server public key')
cfg.set('Server public key', str(ServerAddress), result)
f = open('server.ini', 'w')
cfg.write(f)
f.close()
def uploadServerKey(self):
serverName = self.ui.lineEdit.text()
root = Tkinter.Tk()
root.withdraw()
file_path = tkFileDialog.askopenfilename()
dest_path = os.path.abspath('./keys/Server/'+serverName+'.key')
shutil.copy2(file_path, dest_path)
self.writeToINI(serverName, dest_path)
self.ui.statusMsg.setText("Server public key uploaded")
QtCore.QTimer.singleShot(1500, self.closeServerKeyDialog)
def closeServerKeyDialog(self):
self.close()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
serverKeyDialog = ServerKeyDialog()
serverKeyDialog.show()
sys.exit(app.exec_())
|
{
"content_hash": "23321857706e44224b9532f8825ca250",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 83,
"avg_line_length": 37.666666666666664,
"alnum_prop": 0.660275319567355,
"repo_name": "bb111189/CryptoKnocker",
"id": "31e5616c8a8cc5987a2ccf3cf65b134f822d7cec",
"size": "2034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Client/ServerKeyDialog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3958"
},
{
"name": "JavaScript",
"bytes": "14329"
},
{
"name": "Python",
"bytes": "112165"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, absolute_import
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import AbstractUser
@python_2_unicode_compatible
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_("Name of User"), blank=True, max_length=255)
is_doctor = models.BooleanField(_("Is a Doctor"), default=False)
# Email settings
email_notifications = models.BooleanField(_("Email Notifications"), default=True)
display_email = models.BooleanField(_("Display email"), default=False)
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
|
{
"content_hash": "c9b0e3e8a8010feed74daf21fea99cd2",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 85,
"avg_line_length": 35.76923076923077,
"alnum_prop": 0.7322580645161291,
"repo_name": "Uran198/med",
"id": "6166ab1ea9c1c48582f372b8dac216f65cee0854",
"size": "954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "med/users/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8906"
},
{
"name": "HTML",
"bytes": "43613"
},
{
"name": "JavaScript",
"bytes": "8672"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "110287"
},
{
"name": "Shell",
"bytes": "4535"
}
],
"symlink_target": ""
}
|
import wx
import armid
import WidgetFactory
class TaskPersonaDialog(wx.Dialog):
def __init__(self,parent,setPersonas,currentEnvironmentName,dp,pName='',pDur='',pFreq='',pDem='',pGsup=''):
wx.Dialog.__init__(self,parent,armid.TASKPERSONA_ID,'Add Task Persona',style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(400,300))
self.theName = pName
self.theDuration = pDur
self.theFrequency = pFreq
self.theDemands = pDem
self.theGoalSupport = pGsup
mainSizer = wx.BoxSizer(wx.VERTICAL)
personaList = dp.getDimensionNames('persona',currentEnvironmentName)
mainSizer.Add(WidgetFactory.buildComboSizerList(self,'Name',(87,30),armid.TASKPERSONA_COMBONAME_ID,personaList),0,wx.EXPAND)
suList = ['None','Low','Medium','High']
durationList = ['Seconds','Minutes','Hours or longer']
mainSizer.Add(WidgetFactory.buildComboSizerList(self,'Duration',(87,30),armid.TASKPERSONA_COMBODURATION_ID,durationList),0,wx.EXPAND)
freqList = ['Hourly or more','Daily - Weekly','Monthly or less']
mainSizer.Add(WidgetFactory.buildComboSizerList(self,'Frequency',(87,30),armid.TASKPERSONA_COMBOFREQUENCY_ID,freqList),0,wx.EXPAND)
mainSizer.Add(WidgetFactory.buildComboSizerList(self,'Demands',(87,30),armid.TASKPERSONA_COMBODEMANDS_ID,suList),0,wx.EXPAND)
mainSizer.Add(WidgetFactory.buildComboSizerList(self,'Goal Conflict',(87,30),armid.TASKPERSONA_COMBOGOALSUPPORT_ID,suList),0,wx.EXPAND)
mainSizer.Add(WidgetFactory.buildAddCancelButtonSizer(self,armid.TASKPERSONA_BUTTONADD_ID),0,wx.ALIGN_CENTER)
if (self.theName != ''):
self.SetLabel('Edit Task Persona')
nameCtrl = self.FindWindowById(armid.TASKPERSONA_COMBONAME_ID)
nameCtrl.SetValue(self.theName)
durCtrl = self.FindWindowById(armid.TASKPERSONA_COMBODURATION_ID)
durCtrl.SetStringSelection(self.theDuration)
freqCtrl = self.FindWindowById(armid.TASKPERSONA_COMBOFREQUENCY_ID)
freqCtrl.SetStringSelection(self.theFrequency)
demCtrl = self.FindWindowById(armid.TASKPERSONA_COMBODEMANDS_ID)
demCtrl.SetStringSelection(self.theDemands)
gsupCtrl = self.FindWindowById(armid.TASKPERSONA_COMBOGOALSUPPORT_ID)
gsupCtrl.SetStringSelection(self.theGoalSupport)
self.SetSizer(mainSizer)
wx.EVT_BUTTON(self,armid.TASKPERSONA_BUTTONADD_ID,self.onAdd)
def onAdd(self,evt):
nameCtrl = self.FindWindowById(armid.TASKPERSONA_COMBONAME_ID)
durCtrl = self.FindWindowById(armid.TASKPERSONA_COMBODURATION_ID)
freqCtrl = self.FindWindowById(armid.TASKPERSONA_COMBOFREQUENCY_ID)
demCtrl = self.FindWindowById(armid.TASKPERSONA_COMBODEMANDS_ID)
gsupCtrl = self.FindWindowById(armid.TASKPERSONA_COMBOGOALSUPPORT_ID)
self.theName = nameCtrl.GetValue()
self.theDuration = durCtrl.GetValue()
self.theFrequency = freqCtrl.GetValue()
self.theDemands = demCtrl.GetValue()
self.theGoalSupport = gsupCtrl.GetValue()
if len(self.theName) == 0:
dlg = wx.MessageDialog(self,'No name selected','Add Task Persona',wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
elif (len(self.theDuration) == 0):
dlg = wx.MessageDialog(self,'No duration selected','Add Task Persona',wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
elif (len(self.theFrequency) == 0):
dlg = wx.MessageDialog(self,'No frequency selected','Add Task Persona',wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
elif (len(self.theDemands) == 0):
dlg = wx.MessageDialog(self,'No demands selected','Add Task Persona',wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
elif (len(self.theGoalSupport) == 0):
dlg = wx.MessageDialog(self,'No goal support selected','Add Task Persona',wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
else:
self.EndModal(armid.TASKPERSONA_BUTTONADD_ID)
def persona(self): return self.theName
def duration(self): return self.theDuration
def frequency(self): return self.theFrequency
def demands(self): return self.theDemands
def goalsupport(self): return self.theGoalSupport
|
{
"content_hash": "9e80450273320eca639b23c9f11c7e11",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 168,
"avg_line_length": 47.848837209302324,
"alnum_prop": 0.7287970838396112,
"repo_name": "RobinQuetin/CAIRIS-web",
"id": "1440aed4e87b3c9ef70fa871820d8d088ed17b25",
"size": "4914",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cairis/cairis/TaskPersonaDialog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11265"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "Python",
"bytes": "3313365"
},
{
"name": "Shell",
"bytes": "19461"
},
{
"name": "XSLT",
"bytes": "35522"
}
],
"symlink_target": ""
}
|
from trove.datastore import models as datastore_models
from trove.module import models
class ModuleView(object):
def __init__(self, module):
self.module = module
def data(self):
module_dict = dict(
id=self.module.id,
name=self.module.name,
type=self.module.type,
description=self.module.description,
tenant_id=self.module.tenant_id,
datastore_id=self.module.datastore_id,
datastore_version_id=self.module.datastore_version_id,
auto_apply=bool(self.module.auto_apply),
priority_apply=bool(self.module.priority_apply),
apply_order=self.module.apply_order,
is_admin=bool(self.module.is_admin),
md5=self.module.md5,
visible=bool(self.module.visible),
created=self.module.created,
updated=self.module.updated)
# add extra data to make results more legible
if self.module.tenant_id:
# This should be the tenant name, but until we figure out where
# to get it from, use the tenant_id
tenant = self.module.tenant_id
else:
tenant = models.Modules.MATCH_ALL_NAME
module_dict["tenant"] = tenant
datastore = self.module.datastore_id
datastore_version = self.module.datastore_version_id
if datastore:
if datastore_version:
ds, ds_ver = (
datastore_models.get_datastore_version(
type=datastore, version=datastore_version))
datastore = ds.name
datastore_version = ds_ver.name
else:
ds = datastore_models.Datastore.load(datastore)
datastore = ds.name
datastore_version = models.Modules.MATCH_ALL_NAME
else:
datastore = models.Modules.MATCH_ALL_NAME
datastore_version = models.Modules.MATCH_ALL_NAME
module_dict["datastore"] = datastore
module_dict["datastore_version"] = datastore_version
return {"module": module_dict}
class ModulesView(object):
def __init__(self, modules):
self.modules = modules
def data(self):
data = []
for module in self.modules:
data.append(self.data_for_module(module))
return {"modules": data}
def data_for_module(self, module):
view = ModuleView(module)
return view.data()['module']
class DetailedModuleView(ModuleView):
def __init__(self, module):
super(DetailedModuleView, self).__init__(module)
def data(self, include_contents=False):
return_value = super(DetailedModuleView, self).data()
module_dict = return_value["module"]
module_dict["live_update"] = bool(self.module.live_update)
if hasattr(self.module, 'instance_count'):
module_dict["instance_count"] = self.module.instance_count
if include_contents:
if not hasattr(self.module, 'encrypted_contents'):
self.module.encrypted_contents = self.module.contents
self.module.contents = models.Module.deprocess_contents(
self.module.contents)
module_dict['contents'] = self.module.contents
return {"module": module_dict}
def convert_modules_to_list(modules):
module_list = []
for module in modules:
module_info = DetailedModuleView(module).data(include_contents=True)
module_list.append(module_info)
return module_list
|
{
"content_hash": "8c3547c33d8b54683401a5584f86b995",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 76,
"avg_line_length": 35.8,
"alnum_prop": 0.6041899441340782,
"repo_name": "zhangg/trove",
"id": "20134a81674b5a16356040819774168ecec02c2a",
"size": "4210",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "trove/module/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4546016"
},
{
"name": "Shell",
"bytes": "145524"
}
],
"symlink_target": ""
}
|
import sys
TOTAL_MONEY = 500
fruit_combos = set()
fruit_combos.add( (0, ) )
fruit_menu = []
for line in sys.stdin:
fruit_name, fruit_price = line.strip().split()
fruit_menu.append( (int(fruit_price), fruit_name ))
fruit_menu.sort()
for fruit_price, fruit_name in fruit_menu:
new_combos = set()
for cost, *purchases in fruit_combos:
max_buy = (TOTAL_MONEY - cost) // fruit_price
for amount_buy in range(1, max_buy+1):
new_cost = cost + fruit_price * amount_buy
purchase_string = "{count} {name}{plural}".format(
count=amount_buy,
name=fruit_name,
plural="s" if amount_buy > 1 else ""
)
new_purchases = tuple(purchases) + (purchase_string ,)
if new_cost == TOTAL_MONEY: # Solution!
print(', '.join(new_purchases))
if new_cost >= TOTAL_MONEY:
break
new_combos.add( (new_cost,) + new_purchases )
fruit_combos.update(new_combos)
|
{
"content_hash": "122c6a49e56625675ebe6eec9f3289a9",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 66,
"avg_line_length": 27.710526315789473,
"alnum_prop": 0.5460588793922128,
"repo_name": "fsufitch/dailyprogrammer",
"id": "a0dabe29790a5b1a08568ac43b356c804fc66be7",
"size": "1053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "243_intermediate/solution_opt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "274"
},
{
"name": "Go",
"bytes": "55952"
},
{
"name": "HTML",
"bytes": "1759"
},
{
"name": "JavaScript",
"bytes": "1806"
},
{
"name": "PHP",
"bytes": "1006"
},
{
"name": "Python",
"bytes": "87430"
},
{
"name": "TypeScript",
"bytes": "1527"
}
],
"symlink_target": ""
}
|
"""
homeassistant.components.device_tracker.asuswrt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Device tracker platform that supports scanning a ASUSWRT router for device
presence.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.asuswrt/
"""
import logging
import re
import telnetlib
import threading
from datetime import timedelta
from homeassistant.components.device_tracker import DOMAIN
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import validate_config
from homeassistant.util import Throttle
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
_LEASES_REGEX = re.compile(
r'\w+\s' +
r'(?P<mac>(([0-9a-f]{2}[:-]){5}([0-9a-f]{2})))\s' +
r'(?P<ip>([0-9]{1,3}[\.]){3}[0-9]{1,3})\s' +
r'(?P<host>([^\s]+))')
_IP_NEIGH_REGEX = re.compile(
r'(?P<ip>([0-9]{1,3}[\.]){3}[0-9]{1,3})\s' +
r'\w+\s' +
r'\w+\s' +
r'(\w+\s(?P<mac>(([0-9a-f]{2}[:-]){5}([0-9a-f]{2}))))?\s' +
r'(?P<status>(\w+))')
# pylint: disable=unused-argument
def get_scanner(hass, config):
""" Validates config and returns an ASUS-WRT scanner. """
if not validate_config(config,
{DOMAIN: [CONF_HOST, CONF_USERNAME, CONF_PASSWORD]},
_LOGGER):
return None
scanner = AsusWrtDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class AsusWrtDeviceScanner(object):
"""
This class queries a router running ASUSWRT firmware
for connected devices. Adapted from DD-WRT scanner.
"""
def __init__(self, config):
self.host = config[CONF_HOST]
self.username = str(config[CONF_USERNAME])
self.password = str(config[CONF_PASSWORD])
self.lock = threading.Lock()
self.last_results = {}
# Test the router is accessible
data = self.get_asuswrt_data()
self.success_init = data is not None
def scan_devices(self):
"""
Scans for new devices and return a list containing found device IDs.
"""
self._update_info()
return [client['mac'] for client in self.last_results]
def get_device_name(self, device):
""" Returns the name of the given device or None if we don't know. """
if not self.last_results:
return None
for client in self.last_results:
if client['mac'] == device:
return client['host']
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""
Ensures the information from the ASUSWRT router is up to date.
Returns boolean if scanning successful.
"""
if not self.success_init:
return False
with self.lock:
_LOGGER.info("Checking ARP")
data = self.get_asuswrt_data()
if not data:
return False
active_clients = [client for client in data.values() if
client['status'] == 'REACHABLE' or
client['status'] == 'DELAY' or
client['status'] == 'STALE']
self.last_results = active_clients
return True
def get_asuswrt_data(self):
""" Retrieve data from ASUSWRT and return parsed result. """
try:
telnet = telnetlib.Telnet(self.host)
telnet.read_until(b'login: ')
telnet.write((self.username + '\n').encode('ascii'))
telnet.read_until(b'Password: ')
telnet.write((self.password + '\n').encode('ascii'))
prompt_string = telnet.read_until(b'#').split(b'\n')[-1]
telnet.write('ip neigh\n'.encode('ascii'))
neighbors = telnet.read_until(prompt_string).split(b'\n')[1:-1]
telnet.write('cat /var/lib/misc/dnsmasq.leases\n'.encode('ascii'))
leases_result = telnet.read_until(prompt_string).split(b'\n')[1:-1]
telnet.write('exit\n'.encode('ascii'))
except EOFError:
_LOGGER.exception("Unexpected response from router")
return
except ConnectionRefusedError:
_LOGGER.exception("Connection refused by router," +
" is telnet enabled?")
return
devices = {}
for lease in leases_result:
match = _LEASES_REGEX.search(lease.decode('utf-8'))
if not match:
_LOGGER.warning("Could not parse lease row: %s", lease)
continue
# For leases where the client doesn't set a hostname, ensure
# it is blank and not '*', which breaks the entity_id down
# the line
host = match.group('host')
if host == '*':
host = ''
devices[match.group('ip')] = {
'host': host,
'status': '',
'ip': match.group('ip'),
'mac': match.group('mac').upper(),
}
for neighbor in neighbors:
match = _IP_NEIGH_REGEX.search(neighbor.decode('utf-8'))
if not match:
_LOGGER.warning("Could not parse neighbor row: %s", neighbor)
continue
if match.group('ip') in devices:
devices[match.group('ip')]['status'] = match.group('status')
return devices
|
{
"content_hash": "5631f5e14601b1dcfd10b032a5aefd39",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 79,
"avg_line_length": 34.425925925925924,
"alnum_prop": 0.5589026358257128,
"repo_name": "nnic/home-assistant",
"id": "d9b6d1a809e1448ea0899af03676daac5dde850e",
"size": "5577",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/device_tracker/asuswrt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1482064"
},
{
"name": "Python",
"bytes": "1790232"
},
{
"name": "Shell",
"bytes": "3570"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import json
from math import floor
import sqlite3
import string
try:
from itertools import izip
except ImportError: # Python 3
izip = zip
import supybot.conf as conf
import supybot.log as log
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('Weather')
except ImportError:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x:x
class WeatherDB():
"""WeatherDB class to store our users and their settings."""
def __init__(self):
self.filename = conf.supybot.directories.data.dirize("Weather.db")
self.log = log.getPluginLogger('Weather')
self._conn = sqlite3.connect(self.filename, check_same_thread=False)
self._conn.text_factory = str
self.makeDb()
def makeDb(self):
"""Create our DB."""
self.log.info("Weather: Checking/Creating DB.")
with self._conn as conn:
cursor = conn.cursor()
cursor.execute("""CREATE TABLE IF NOT EXISTS users (
nick TEXT PRIMARY KEY,
location TEXT NOT NULL,
metric INTEGER DEFAULT 0,
alerts INTEGER DEFAULT 0,
almanac INTEGER DEFAULT 0,
astronomy INTEGER DEFAULT 0,
forecast INTEGER DEFAULT 0,
pressure INTEGER DEFAULT 0,
wind INTEGER DEFAULT 0,
uv INTEGER DEFAULT 0,
visibility INTEGER DEFAULT 0,
dewpoint INTEGER DEFAULT 0,
humidity INTEGER DEFAULT 0,
updated INTEGER DEFAULT 0)""")
self._conn.commit() # this fails silently if already there.
# next, we see if we need to upgrade the old table structure.
cursor = conn.cursor() # the old table is 4.
tablelength = len([l[1] for l in cursor.execute("pragma table_info('users')").fetchall()])
if tablelength == 4: # old table is 4: users, location, metric, colortemp.
self.log.info("Weather: Upgrading database version.")
columns = ['alerts', 'almanac', 'astronomy', 'forecast', 'pressure', 'wind', 'uv', 'visibility', 'dewpoint', 'humidity', 'updated']
for column in columns:
try:
cursor.execute('ALTER TABLE users ADD COLUMN %s INTEGER DEFAULT 0' % column)
self._conn.commit()
except: # fail silently.
pass
def setweather(self, username, location):
"""Stores or update a user's location. Adds user if not found."""
with self._conn as conn:
cursor = conn.cursor()
if self.getuser(username): # username exists.
cursor.execute("""UPDATE users SET location=? WHERE nick=?""", (location, username,))
else: # username does not exist so add it in.
cursor.execute("""INSERT OR REPLACE INTO users (nick, location) VALUES (?,?)""", (username, location,))
self._conn.commit() # commit.
def setsetting(self, username, setting, value):
"""Set one of the user settings."""
with self._conn as conn:
cursor = conn.cursor()
query = "UPDATE users SET %s=? WHERE nick=?" % setting
cursor.execute(query, (value, username,))
self._conn.commit()
def getsettings(self):
"""Get all 'user' settings that can be set."""
with self._conn as conn:
cursor = conn.cursor() # below, we get all column names that are settings (INTEGERS)
settings = [str(l[1]) for l in cursor.execute("pragma table_info('users')").fetchall() if l[2] == "INTEGER"]
return settings
def getweather(self, user):
"""Return a dict of user's settings."""
self._conn.row_factory = sqlite3.Row
with self._conn as conn:
cursor = conn.cursor()
cursor.execute("""SELECT * from users where nick=?""", (user,))
row = cursor.fetchone()
if not row: # user does not exist.
return None
else: # user exists.
rowdict = dict(izip(row.keys(), row))
return rowdict
def getuser(self, user):
"""Returns a boolean if a user exists."""
with self._conn as conn:
cursor = conn.cursor()
cursor.execute("""SELECT location from users where nick=?""", (user,))
row = cursor.fetchone()
if row:
return True
else:
return False
class Weather(callbacks.Plugin):
"""This plugin provides access to information from Weather Underground."""
threaded = True
def __init__(self, irc):
self.__parent = super(Weather, self)
self.__parent.__init__(irc)
self.db = WeatherDB()
##############
# FORMATTING #
##############
def _bold(self, string):
return ircutils.bold(string)
def _bu(self, string):
return ircutils.underline(ircutils.bold(string))
############################
# INTERNAL WEATHER HELPERS #
############################
def _weatherSymbol(self, code):
"""Return a unicode symbol based on weather status."""
table = {'partlycloudy': '~☁',
'cloudy': '☁',
'tstorms': '⚡',
'sunny': '☀',
'snow': '❄',
'sleet': '☄',
'rain': '☔',
'mostlysunny': '~☀',
'mostlycloudy': '~☁',
'hazy': '♒',
'fog': '♒',
'flurries': '❄',
'clear': '☼',
'chanceflurries': '?❄',
'chancerain': '?☔',
'chancesleet': '?❄',
'chancesnow': '?❄',
'chancetstorms': '?☔'}
# return symbol from table.
try:
return table[code]
except KeyError:
return "unknown"
def _temp(self, channel, f, c=None):
"""Returns a colored string based on the temperature."""
# lets be safe and wrap in a try/except because we can't always trust data purity.
try:
if str(f).startswith('NA'): # Wunderground sends a field that's not available
return f
f = int(f)
if not c:
c = int((f - 32) * 5/9)
s = "{0}F/{1}C".format(f, c)
# determine color.
if not self.registryValue('disableColoredTemp', channel):
if f < 10.0:
color = 'light blue'
elif 10.0 <= f <= 32.0:
color = 'teal'
elif 32.1 <= f <= 50.0:
color = 'blue'
elif 50.1 <= f <= 60.0:
color = 'light green'
elif 60.1 <= f <= 70.0:
color = 'green'
elif 70.1 <= f <= 80.0:
color = 'yellow'
elif 80.1 <= f <= 90.0:
color = 'orange'
elif f > 90.0:
color = 'red'
else:
color = 'light grey'
s = ircutils.mircColor(s, color)
# return.
return s
except (TypeError, ValueError) as e:
self.log.info("Weather: ValueError trying to convert temp: {0} message: {1}".format(f, e))
return "N/A"
def _wind(self, angle, useSymbols=False):
"""Converts degrees to direction for wind. Can optionally return a symbol."""
if not useSymbols: # ordinal names.
direction_names = ["N", "NE", "E", "SE", "S", "SW", "W", "NW"]
else: # symbols.
direction_names = ['↑', '↗', '→', '↘', '↓', '↙', '←', '↖']
# do math below to figure the angle->direction out.
directions_num = len(direction_names)
directions_step = 360./directions_num
index = int(round((angle/360. - floor(angle/360.)*360.)/directions_step))
index %= directions_num
# return.
return direction_names[index]
##############################################
# PUBLIC FUNCTIONS TO WORK WITH THE DATABASE #
##############################################
def setuser(self, irc, msg, args, optset, optbool):
"""<setting> <True|False>
Sets a user's <setting> to True or False.
Valid settings include: alerts, almanac, astronomy, forecast, pressure,
wind, uv, visibility, dewpoint, humidity, and updated.
"""
# first, lower
optset = optset.lower()
# grab a list of valid settings.
validset = self.db.getsettings()
if optset not in validset:
irc.error(format("%r is an invalid setting. Must be one of: %L.", optset,
sorted(validset)), Raise=True)
if optbool: # True.
value = 1
else: # False.
value = 0
# check user first.
if not self.db.getuser(msg.nick.lower()): # user exists
irc.error("You are not in the database; you must use 'setweather' first.", Raise=True)
else: # user is valid. perform the op.
self.db.setsetting(msg.nick.lower(), optset, value)
irc.replySuccess()
setuser = wrap(setuser, [('somethingWithoutSpaces'), ('boolean')])
def setweather(self, irc, msg, args, optlocation):
"""<location code>
Set's weather location code for your nick as location code.
Use your zip/postal code to keep it simple.
Ex: setweather 10012
"""
self.db.setweather(msg.nick.lower(), optlocation)
irc.replySuccess()
setweather = wrap(setweather, [('text')])
##########################
# WUNDERGROUND API CALLS #
##########################
def _wuac(self, irc, q):
"""Internal helper to find a location via Wunderground's autocomplete API."""
url = 'http://autocomplete.wunderground.com/aq?query=%s' % utils.web.urlquote(q)
self.log.debug("Weather: Autocomplete URL: %s", url)
try:
page = utils.web.getUrl(url)
except utils.web.Error as e:
irc.error("Failed to load location data for %r." % q, Raise=True)
data = json.loads(page.decode('utf-8'))
loc = ''
# ZMW is in some ways a lot like Wunderground's location ID Codes, for when locations
# are too ambiguous. (e.g. looking up "France", which is a country with many different
# locations!)
for item in data['RESULTS']:
# Sometimes the autocomplete will lead us to more disambiguation pages...
# which cause lots of errors in processing!
if item['tz'] != 'MISSING':
loc = "zmw:%s" % item['zmw']
break
else:
irc.error("Failed to find a valid location for: %r" % q, Raise=True)
return loc
def _wunderjson(self, url, location):
"""Fetch wunderground JSON and return."""
# first, construct the url properly.
if url.endswith('/'): # cheap way to strip the trailing /
url = '%sq/%s.json' % (url, utils.web.urlquote(location))
else:
url = '%s/q/%s.json' % (url, utils.web.urlquote(location))
# now actually fetch the url.
try:
self.log.debug("Weather URL: {0}".format(url))
page = utils.web.getUrl(url)
return page
except Exception as e: # something didn't work.
self.log.info("Weather: (_wunderjson) Error trying to open {0} message: {1}".format(url, e))
return None
####################
# PUBLIC FUNCTIONS #
####################
@wrap([getopts({'user': 'nick'}), optional('text')])
def weather(self, irc, msg, args, optlist, location):
"""[<location>] [--user <othernick>]
Fetches weather and forecast information for <location>. <location> can be left blank if you have a previously set location (via 'setweather').
If the --user option is specified, show weather for the saved location of that nick, instead of the caller.
Location can take many forms, including a simple city name, US state/city (CA/San_Francisco), zip code, country/city (Australia/Sydney), or an airport code (KJFK).
Ex: 10021 or Sydney, Australia or KJFK
"""
apikey = self.registryValue('apiKey')
if not apikey:
irc.error("No Wunderground API key was defined; set 'config plugins.Weather.apiKey'.",
Raise=True)
channel = msg.args[0]
optlist = dict(optlist)
# Default to looking at the caller's saved info, but optionally they can look at someone else's weather too.
nick = optlist.get('user') or msg.nick
# urlargs will be used to build the url to query the API.
# besides lang, these are preset values that should not be changed.
urlArgs = {'features': ['conditions', 'forecast'],
'lang': self.registryValue('lang'),
'bestfct': '1',
'pws': '0' }
loc = None
args = {'imperial': self.registryValue('useImperial', msg.args[0]),
'alerts': self.registryValue('alerts'),
'almanac': self.registryValue('almanac'),
'astronomy': self.registryValue('astronomy'),
'pressure': self.registryValue('showPressure'),
'wind': self.registryValue('showWind'),
'updated': self.registryValue('showUpdated'),
'forecast': False,
'humidity': False,
'uv': False,
'visibility': False,
'dewpoint': False}
usersetting = self.db.getweather(nick.lower())
if usersetting:
for (k, v) in usersetting.items():
args[k] = v
loc = usersetting["location"]
args['imperial'] = (not usersetting["metric"])
else:
if not location: # location was also not specified, so we must bail.
if nick != msg.nick:
irc.error("I did not find a preset location for %s." % nick, Raise=True)
else:
irc.error("I did not find a preset location for you. Set one via 'setweather <location>'.", Raise=True)
loc = self._wuac(irc, location or loc)
url = 'http://api.wunderground.com/api/%s/' % (apikey)
for check in ['alerts', 'almanac', 'astronomy']:
if args[check]:
urlArgs['features'].append(check) # append to dict->key (list)
# now, we use urlArgs dict to append to url.
for (key, value) in urlArgs.items():
if key == "features": # will always be at least conditions.
url += "".join([item + '/' for item in value]) # listcmp the features/
if key in ("lang", "bestfct", "pws"): # rest added with key:value
url += "{0}:{1}/".format(key, value)
page = self._wunderjson(url, loc)
try:
data = json.loads(page.decode('utf-8'))
except Exception as e:
self.log.error("Weather: Error processing JSON from: {0} :: {1}".format(url, e))
irc.error("Could not process JSON from Weather Underground. Check the logs.", Raise=True)
outdata = {'weather': data['current_observation']['weather'],
'location': data['current_observation']['display_location']['full'],
'humidity': data['current_observation']['relative_humidity'],
'uv': data['current_observation']['UV']}
if data['current_observation']['wind_mph'] < 1: # no wind.
outdata['wind'] = "None"
else:
if args['imperial']:
outdata['wind'] = "{0}@{1}mph".format(self._wind(data['current_observation']['wind_degrees']), data['current_observation']['wind_mph'])
if int(data['current_observation']['wind_gust_mph']) > 0:
outdata['wind'] += " ({0}mph gusts)".format(data['current_observation']['wind_gust_mph'])
else:
outdata['wind'] = "{0}@{1}kph".format(self._wind(data['current_observation']['wind_degrees']),data['current_observation']['wind_kph'])
if int(data['current_observation']['wind_gust_kph']) > 0:
outdata['wind'] += " ({0}kph gusts)".format(data['current_observation']['wind_gust_kph'])
# Show the last updated time if available.
observationTime = data['current_observation'].get('observation_epoch')
localTime = data['current_observation'].get('local_epoch')
if not observationTime or not localTime:
outdata['observation'] = data.get('observation_time', 'unknown').lstrip('Last Updated on ')
else: # Prefer relative times, if available
s = int(localTime) - int(observationTime) # format into seconds.
if s <= 1:
outdata['observation'] = 'just now'
elif s < 60:
outdata['observation'] = '{0}s ago'.format(s)
elif s < 120:
outdata['observation'] = '1m ago'
elif s < 3600:
outdata['observation'] = '{0}m ago'.format(s/60)
elif s < 7200:
outdata['observation'] = '1hr ago'
else:
outdata['observation'] = '{0}hrs ago'.format(s/3600)
outdata['temp'] = self._temp(channel, data['current_observation']['temp_f'])
# pressure.
pin = str(data['current_observation']['pressure_in']) + 'in'
pmb = str(data['current_observation']['pressure_mb']) + 'mb'
outdata['pressure'] = "{0}/{1}".format(pin, pmb)
# dewpoint.
outdata['dewpoint'] = self._temp(channel, data['current_observation']['dewpoint_f'])
# heatindex.
outdata['heatindex'] = self._temp(channel, data['current_observation']['heat_index_f'])
# windchill.
outdata['windchill'] = self._temp(channel, data['current_observation']['windchill_f'])
# feels like
outdata['feelslike'] = self._temp(channel, data['current_observation']['feelslike_f'])
# visibility.
vmi = str(data['current_observation']['visibility_mi']) + 'mi'
vkm = str(data['current_observation']['visibility_km']) + 'km'
outdata['visibility'] = "{0}/{1}".format(vmi, vkm)
# handle forecast data. This is internally stored as a dict with integer keys (days from now)
# with the forecast text as values.
forecastdata = {}
for forecastday in data['forecast']['txt_forecast']['forecastday']:
# Slightly different wording and results (e.g. rainfall for X inches vs. X cm) are given
# depending on whether imperial or metric units are the same.
if args['imperial']:
text = forecastday['fcttext']
else:
text = forecastday['fcttext_metric']
forecastdata[int(forecastday['period'])] = {'day': forecastday['title'],
'text': text}
output = "{0} :: {1} ::".format(self._bold(outdata['location']), outdata['weather'])
output += " {0} ".format(outdata['temp'])
# humidity.
if args['humidity']:
output += "(Humidity: {0}) ".format(outdata['humidity'])
# windchill/heatindex are conditional on season but test with startswith to see what to include
# NA means not available, so ignore those fields
if not outdata['windchill'].startswith("NA"):
output += "| {0} {1} ".format(self._bold('Wind Chill:'), outdata['windchill'])
if not outdata['heatindex'].startswith("NA"):
output += "| {0} {1} ".format(self._bold('Heat Index:'), outdata['heatindex'])
# Iterate over the args dict for what extra data to include
for k in ('wind', 'visibility', 'uv', 'pressure', 'dewpoint'):
if args[k]:
output += "| {0}: {1} ".format(self._bold(k.title()), outdata[k])
# Add in the first two forecasts item in conditions + the "last updated" time.
output += "| {0}: {1}".format(self._bold(forecastdata[0]['day']), forecastdata[0]['text'])
output += " {0}: {1}".format(self._bold(forecastdata[1]['day']), forecastdata[1]['text'])
if args['updated']:
# Round updated time (given as a string) to the nearest unit.
# This is annoying because Wunderground sends these as raw strings, in the form
# "1hr ago" or "2.7666666666666666m ago"
tailstr = outdata['observation'].lstrip(string.digits + '.')
updated_time = outdata['observation'].rstrip(string.ascii_letters + ' ')
updated_time = round(float(updated_time))
output += " | Updated %s%s" % (ircutils.bold(updated_time), tailstr)
# finally, output the basic weather.
irc.reply(output)
# handle alerts - everything here and below sends as separate replies if enabled
if args['alerts']: # only look for alerts if present.
if data['alerts']: # alerts is a list. it can also be empty.
outdata['alerts'] = data['alerts'][0]['message'] # need to do some formatting below.
outdata['alerts'] = outdata['alerts'].replace('\n', ' ')
outdata['alerts'] = utils.str.normalizeWhitespace(outdata['alerts']) # fix pesky double whitespacing.
else: # no alerts found (empty).
outdata['alerts'] = "No alerts."
irc.reply("{0} {1}".format(self._bu("Alerts:"), outdata['alerts']))
# handle almanac
if args['almanac']:
try:
outdata['highyear'] = data['almanac']['temp_high'].get('recordyear')
outdata['lowyear'] = data['almanac']['temp_low'].get('recordyear')
outdata['highaverage'] = self._temp(channel, data['almanac']['temp_high']['normal']['F'])
outdata['lowaverage'] = self._temp(channel, data['almanac']['temp_low']['normal']['F'])
if outdata['highyear'] != "NA" and outdata['lowyear'] != "NA":
outdata['highrecord'] = self._temp(channel, data['almanac']['temp_high']['record']['F'])
outdata['lowrecord'] = self._temp(channel, data['almanac']['temp_low']['record']['F'])
else:
outdata['highrecord'] = outdata['lowrecord'] = "NA"
except KeyError:
output = "%s Not available." % self._bu('Almanac:')
else:
output = ("{0} Average High: {1} (Record: {2} in {3}) | Average Low: {4} (Record: {5} in {6})".format(
self._bu('Almanac:'), outdata['highaverage'], outdata['highrecord'], outdata['highyear'],
outdata['lowaverage'], outdata['lowrecord'], outdata['lowyear']))
irc.reply(output)
# handle astronomy
if args['astronomy']:
sunriseh = data['moon_phase']['sunrise']['hour']
sunrisem = data['moon_phase']['sunrise']['minute']
sunseth = data['moon_phase']['sunset']['hour']
sunsetm = data['moon_phase']['sunset']['minute']
sunrise = "{0}:{1}".format(sunriseh, sunrisem)
sunset = "{0}:{1}".format(sunseth, sunsetm)
# Oh god, this one-liner... -GLolol
lengthofday = "%dh%dm" % divmod((((int(sunseth)-int(sunriseh))+float((int(sunsetm)-int(sunrisem))/60.0))*60 ),60)
astronomy = {'Moon illum:': str(data['moon_phase']['percentIlluminated']) + "%",
'Moon age:': str(data['moon_phase']['ageOfMoon']) + "d",
'Sunrise:': sunrise,
'Sunset:': sunset,
'Length of Day:': lengthofday}
output = [format('%s %s', self._bold(k), v) for k, v in sorted(astronomy.items())]
output = format("%s %s", self._bu('Astronomy:'), " | ".join(output))
irc.reply(output)
# handle forecast
if args['forecast']:
fullforecastdata = {} # key = day (int), value = dict of forecast data.
for forecastday in data['forecast']['simpleforecast']['forecastday']:
high = self._temp(channel, forecastday['high']['fahrenheit'])
low = self._temp(channel, forecastday['low']['fahrenheit'])
tmpdict = {'day': forecastday['date']['weekday_short'],
'symbol': self._weatherSymbol(forecastday['icon']),
'text': forecastday['conditions'],
'low': low,
'high': high}
fullforecastdata[int(forecastday['period'])] = tmpdict
outforecast = [] # prep string for output.
for (k, v) in fullforecastdata.items(): # iterate through forecast data.
outforecast.append("{0}: {1} (High: {2} Low: {3})".format(self._bold(v['day']),
v['text'], v['high'], v['low']))
output = "{0} {1}".format(self._bu('Forecast:'), " | ".join(outforecast))
irc.reply(output)
Class = Weather
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=250:
|
{
"content_hash": "546c3bdc8c78cf7720da23671fc183c8",
"timestamp": "",
"source": "github",
"line_count": 576,
"max_line_length": 171,
"avg_line_length": 45.095486111111114,
"alnum_prop": 0.5332050048123196,
"repo_name": "GLolol/Supybot-Weather",
"id": "0d3213e51aa583fb4f9eecb1839aa5f2f5bf6cb6",
"size": "27209",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31781"
}
],
"symlink_target": ""
}
|
import pickle
def store(obj, identifier):
file_name = get_file_name(identifier)
with open(file_name, 'w+') as handle:
pickle.dump(obj, handle)
def load(identifier):
file_name = get_file_name(identifier)
file = open(file_name,'r')
obj = pickle.load(file)
file.close()
return obj
def get_file_name(identifier):
return str(identifier)
|
{
"content_hash": "fc8f4368fefcc71aea4db94ba0dfbb4c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 45,
"avg_line_length": 20.894736842105264,
"alnum_prop": 0.6221662468513854,
"repo_name": "mohammadkrb/Fittak",
"id": "3b0323311fdc8bd9655e7fb64815cc46e8dc5975",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36988"
}
],
"symlink_target": ""
}
|
from importlib import import_module
from django.shortcuts import render
from django.urls import URLResolver
def home(request):
apps = __get_apps()
return render(request, "learning_django/learning_django.html", {"apps":apps})
def __get_apps():
apps = {}
urls_module = import_module("learning_django.urls")
urlpatterns = getattr(urls_module, "urlpatterns", None)
for urlpattern in urlpatterns:
if isinstance(urlpattern, URLResolver):
app_name = urlpattern.app_name
apps[app_name] = urlpattern.pattern._route
return apps
|
{
"content_hash": "037b3f4240a20524f104cfe709fbfaad",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 81,
"avg_line_length": 29.2,
"alnum_prop": 0.684931506849315,
"repo_name": "matija94/show-me-the-code",
"id": "3239b1d81237c9d85f2845843c672f4e5642a0fc",
"size": "584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "learning_django/learning_django/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "360945"
},
{
"name": "Batchfile",
"bytes": "5439"
},
{
"name": "CSS",
"bytes": "1535"
},
{
"name": "Clojure",
"bytes": "26019"
},
{
"name": "Dockerfile",
"bytes": "120"
},
{
"name": "HTML",
"bytes": "60877"
},
{
"name": "Hack",
"bytes": "1680"
},
{
"name": "Java",
"bytes": "1094411"
},
{
"name": "JavaScript",
"bytes": "21619"
},
{
"name": "Jupyter Notebook",
"bytes": "1339056"
},
{
"name": "Kotlin",
"bytes": "3918"
},
{
"name": "Pascal",
"bytes": "1125"
},
{
"name": "Python",
"bytes": "291744"
},
{
"name": "Scala",
"bytes": "161887"
},
{
"name": "Scilab",
"bytes": "129306"
},
{
"name": "Shell",
"bytes": "8449"
},
{
"name": "XSLT",
"bytes": "3508"
}
],
"symlink_target": ""
}
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mamase.settings.dev")
import django
django.setup()
from apps.utils.models import Field, Channel, ChannelField
from apps.utils.api import (aggregateDailyFeedData, aggregateMonthlyFeedData,
createAggregateDailyData, createAggregateMonthlyData)
search_fields = ['rain', 'pressure', 'wind speed', 'wind direction',
'moisture', 'humidity'] # Ignore temp for now. The keys words used to search
station_type = ''
complexargs = {}
excludeargs = {}
kwargs = {}
#Aggregate on all and store on all. Makes it easier despite the value checked
def fix(field):
fields = Field.objects.filter(name__icontains=field)
print "Found all similar fields for " + field
channels = Channel.objects.all()
for item in fields:
#Do a loop for each channel
for channel in channels:
#Get all channel fields for this channel
cfs = ChannelField.objects.filter(channel=channel, field=item)
print cfs
for channelfield in cfs:
#Too many nested loops
kwargs['channelfield__in'] = cfs
ddata = aggregateDailyFeedData(station_type, kwargs, complexargs, excludeargs)
mdata = aggregateMonthlyFeedData(station_type, kwargs, complexargs, excludeargs)
createAggregateDailyData(ddata, channelfield)
createAggregateMonthlyData(mdata, channelfield)
print "done with channel " + channel.name
print "all done"
if __name__ == "__main__":
for item in search_fields:
print "Now fixing " + item
fix(item)
|
{
"content_hash": "1396a4fec06cd3f5f46cf1fb1e3a5369",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 96,
"avg_line_length": 34.875,
"alnum_prop": 0.6541218637992832,
"repo_name": "Upande/MaMaSe",
"id": "570812ab282a58a9a0935e53ba67bec6045da3aa",
"size": "1674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixes/fix_aggregation_fields.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "192165"
},
{
"name": "HTML",
"bytes": "1244832"
},
{
"name": "Java",
"bytes": "305368"
},
{
"name": "JavaScript",
"bytes": "919803"
},
{
"name": "PHP",
"bytes": "569"
},
{
"name": "Python",
"bytes": "263206"
},
{
"name": "Shell",
"bytes": "4681"
}
],
"symlink_target": ""
}
|
import json
from django.contrib.auth.decorators import login_required
from cpan2repo.tasks import start_build
from django.shortcuts import render, get_object_or_404
from webui.models import Branch, BuildConfiguration, PackageNameMapping
from django.http import HttpResponseRedirect, HttpResponse
from webui.forms import BranchForm, BuildConfigurationForm, PackageNameMappingForm, RemoteBuildConfigurationForm
# Build configurations views
@login_required
def index(request):
return render(request, 'index.html', {
}, content_type="text/html")
def branches(request):
return HttpResponse(
json.dumps(
list(Branch.objects.values("pk", "name", "path", "maintainer", "is_virtual"))
),
content_type="application/json"
)
def build_confs(request):
data = []
for build_conf in BuildConfiguration.objects.all():
if build_conf.last_build_date:
build_conf.last_build_date = build_conf.last_build_date.strftime("%Y-%m-%dT%H:%M:%S.%f")
data.append({
"pk": build_conf.pk,
"name": build_conf.name,
"pkg_branch__name": build_conf.pkg_branch.name,
"version": build_conf.version,
"status": build_conf.status,
"last_build_date": build_conf.last_build_date,
"auto_build": build_conf.auto_build
})
return HttpResponse(
json.dumps(data),
content_type="application/json"
)
def mapping(request):
return HttpResponse(
json.dumps(
list(PackageNameMapping.objects.values("pk", "orig_name", "to_name"))
),
content_type="application/json"
)
@login_required
def rebuild_package(request, build_conf_id):
build_conf = get_object_or_404(BuildConfiguration, pk=build_conf_id)
build_conf.status = 3
build_conf.save()
try:
start_build.delay(build_conf.pk)
message = {
"level": "success",
"text": 'Task for rebuild package "%s" in branch "%s" sent.' % (build_conf.name, build_conf.pkg_branch),
}
except Exception as e:
message = {
"level": "danger",
"text": 'Error send task for build package "%s" in branch "%s: %s".' % (
build_conf.name, build_conf.pkg_branch, e)
}
return HttpResponse(
json.dumps(message),
content_type="application/json"
)
@login_required
def autobuild_on_off(request, build_conf_id):
build_conf = get_object_or_404(BuildConfiguration, pk=build_conf_id)
build_conf.auto_build = not build_conf.auto_build
build_conf.save()
return HttpResponse(
json.dumps({"autobuild_status": build_conf.auto_build}),
content_type="application/json"
)
@login_required
def global_autobuild_on(request):
BuildConfiguration.objects.update(auto_build=True)
return HttpResponse(
json.dumps({"gobal_autobuild_status": True}),
content_type="application/json"
)
@login_required
def global_autobuild_off(request):
BuildConfiguration.objects.update(auto_build=False)
return HttpResponse(
json.dumps({"gobal_autobuild_status": False}),
content_type="application/json"
)
@login_required
def remove_build_conf(request, build_conf_id):
build_conf = get_object_or_404(BuildConfiguration, pk=build_conf_id)
message = {
"level": "success",
"text": "Package '%s' from branch '%s' removed!" % (build_conf.name, build_conf.pkg_branch)
}
build_conf.delete()
return HttpResponse(
json.dumps(message),
content_type="application/json"
)
@login_required
def remove_branch(request, branch_id):
branch = get_object_or_404(Branch, pk=branch_id)
message = {
"level": "success",
"text": "Branch '%s' removed!" % (branch.name)
}
branch.delete()
return HttpResponse(
json.dumps(message),
content_type="application/json"
)
@login_required
def remove_mapping(request, mapping_id):
mapping = get_object_or_404(PackageNameMapping, pk=mapping_id)
message = {
"level": "success",
"text": "Mapping '%s -> %s' removed!" % (mapping.orig_name, mapping.to_name)
}
mapping.delete()
return HttpResponse(
json.dumps(message),
content_type="application/json"
)
@login_required
def add_build_conf(request, conf_type="deb"):
print conf_type
if conf_type == "remote":
build_conf_form = RemoteBuildConfigurationForm
else:
build_conf_form = BuildConfigurationForm
if request.method == "POST":
form = build_conf_form(request.POST, initial={"conf_type": conf_type})
else:
form = build_conf_form(initial={"conf_type": conf_type})
if form.is_valid():
build_conf = form.save()
if conf_type == "remote":
# Set Remote Build virtual branch
build_conf.pkg_branch = Branch.objects.get(pk=1)
build_conf.save()
return HttpResponseRedirect("/")
return render(request, 'edit_form.html', {
'form': form,
'title': "Add Build Configuration",
'current_page': 'build_conf',
}, content_type="text/html")
@login_required
def edit_build_conf(request, build_conf_id):
build_conf = get_object_or_404(BuildConfiguration, pk=build_conf_id)
if build_conf.pkg_branch_id == 1:
build_conf_form = RemoteBuildConfigurationForm
else:
build_conf_form = BuildConfigurationForm
if request.method == "POST":
form = build_conf_form(request.POST, instance=build_conf)
else:
form = build_conf_form(instance=build_conf)
del form.fields["conf_type"]
if form.is_valid():
build_conf = form.save()
return HttpResponseRedirect("/")
return render(request, 'edit_form.html', {
'form': form,
'current_page': 'build_conf',
'title': "Change Build Configuration: %s" % build_conf.name,
}, content_type="text/html")
@login_required
def view_log(request, build_conf_id):
build_conf = get_object_or_404(BuildConfiguration, pk=build_conf_id)
return render(request, 'view_log.html', {
'current_page': 'build_conf',
'build_conf': build_conf,
}, content_type="text/html")
@login_required
def rebuild_all_packages(request):
try:
for build_conf in BuildConfiguration.objects.all():
start_build.delay(build_conf.pk)
except:
pass
return HttpResponseRedirect("/")
# Branches views
@login_required
def add_branch(request):
if request.method == "POST":
form = BranchForm(request.POST)
else:
form = BranchForm()
if form.is_valid():
form.save()
return HttpResponseRedirect('/#/branches')
return render(request, 'edit_form.html', {
'form': form,
'title': "Add Branch",
'current_page': 'branch',
}, content_type="text/html")
@login_required
def edit_branch(request, branch_id):
branch = get_object_or_404(Branch, pk=branch_id)
if request.method == "POST":
form = BranchForm(request.POST, instance=branch)
else:
form = BranchForm(instance=branch)
if form.is_valid():
form.save()
return HttpResponseRedirect('/#/branches')
return render(request, 'edit_form.html', {
'form': form,
'current_page': 'branch',
'title': "Change Branch Configuration: %s" % branch.name,
}, content_type="text/html")
@login_required
def add_mapping(request):
if request.method == "POST":
form = PackageNameMappingForm(request.POST)
else:
form = PackageNameMappingForm()
if form.is_valid():
mapping = form.save()
return HttpResponseRedirect("/#/mapping")
return render(request, 'edit_form.html', {
'form': form,
'title': "Add Mapping",
'current_page': 'mapping',
}, content_type="text/html")
@login_required
def edit_mapping(request, mapping_id):
mapping = get_object_or_404(PackageNameMapping, pk=mapping_id)
if request.method == "POST":
form = PackageNameMappingForm(request.POST, instance=mapping)
else:
form = PackageNameMappingForm(instance=mapping)
if form.is_valid():
form.save()
return HttpResponseRedirect("/#/mapping")
return render(request, 'edit_form.html', {
'form': form,
'current_page': 'mapping',
'title': "Change Mapping Configuration: %s => %s" % (mapping.orig_name, mapping.to_name),
}, content_type="text/html")
def get_pkg_version(request, branch_name, pkg_name):
branch = get_object_or_404(Branch, name=branch_name)
pkg = get_object_or_404(BuildConfiguration, name=pkg_name, pkg_branch=branch)
return HttpResponse(str(pkg.version))
|
{
"content_hash": "bc0765c258178676987e77fc29befb7c",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 116,
"avg_line_length": 28.418006430868168,
"alnum_prop": 0.6280832767594479,
"repo_name": "daniel-yavorovich/cpan2repo",
"id": "f4ee096a060c16642a0448d2daf3a35238677e2a",
"size": "8862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webui/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "98"
},
{
"name": "JavaScript",
"bytes": "9402"
},
{
"name": "Perl",
"bytes": "558"
},
{
"name": "Python",
"bytes": "70320"
},
{
"name": "Shell",
"bytes": "1320"
}
],
"symlink_target": ""
}
|
import os
import sys
import socket
import signal
import struct
import thread
from bisect import insort
from cStringIO import StringIO
from traceback import print_exc
from errno import EWOULDBLOCK, ENOBUFS, EINTR
from BitTorrent.platform import bttime
from BitTorrent import WARNING, CRITICAL, FAQ_URL
from BitTorrent.defer import Deferred
try:
from select import poll, error, POLLIN, POLLOUT, POLLERR, POLLHUP
timemult = 1000
except ImportError:
from BitTorrent.selectpoll import poll, error, POLLIN, POLLOUT, POLLERR, POLLHUP
timemult = 1
NOLINGER = struct.pack('ii', 1, 0)
class Handler(object):
# there is only a semantic difference between "made" and "started".
# I prefer "started"
def connection_started(self, s):
self.connection_made(s)
def connection_made(self, s):
pass
def connection_lost(self, s):
pass
# Maybe connection_lost should just have a default 'None' exception parameter
def connection_failed(self, addr, exception):
pass
def connection_flushed(self, s):
pass
def data_came_in(self, addr, datagram):
pass
class SingleSocket(object):
def __init__(self, rawserver, sock, handler, context, addr=None):
self.rawserver = rawserver
self.socket = sock
self.handler = handler
self.buffer = []
self.last_hit = bttime()
self.fileno = sock.fileno()
self.connected = False
self.context = context
self.ip = None
self.port = None
if isinstance(addr, basestring):
# UNIX socket, not really ip
self.ip = addr
else:
peername = (None, None)
try:
peername = self.socket.getpeername()
except socket.error, e:
# UDP raises (107, 'Transport endpoint is not connected')
# but so can a TCP socket we just got from start_connection,
# in which case addr is set and we use it later.
if (e[0] == 107) and (addr == None):
# lies.
# the peer endpoint should be gathered from the
# tuple passed to data_came_in
try:
peername = self.socket.getsockname()
except socket.error, e:
pass
# this is awesome!
# max prefers a value over None, so this is a common case:
# max(('ip', None), ('ip', 1234)) => ('ip', 1234)
# or the default case:
# max(('ip', None), None) => ('ip', None)
self.ip, self.port = max(peername, addr)
def close(self):
sock = self.socket
self.socket = None
self.buffer = []
del self.rawserver.single_sockets[self.fileno]
self.rawserver.poll.unregister(sock)
self.handler = None
if self.rawserver.config['close_with_rst']:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, NOLINGER)
sock.close()
def shutdown(self, val):
self.socket.shutdown(val)
def is_flushed(self):
return len(self.buffer) == 0
def write(self, s):
assert self.socket is not None
self.buffer.append(s)
if len(self.buffer) == 1:
self.try_write()
def try_write(self):
if self.connected:
try:
while self.buffer != []:
amount = self.socket.send(self.buffer[0])
if amount != len(self.buffer[0]):
if amount != 0:
self.buffer[0] = self.buffer[0][amount:]
break
del self.buffer[0]
except socket.error, e:
code, msg = e
if code != EWOULDBLOCK:
self.rawserver.dead_from_write.append(self)
return
if self.buffer == []:
self.rawserver.poll.register(self.socket, POLLIN)
else:
self.rawserver.poll.register(self.socket, POLLIN | POLLOUT)
def default_error_handler(level, message):
print message
class RawServer(object):
def __init__(self, doneflag, config, noisy=True,
errorfunc=default_error_handler, tos=0):
self.config = config
self.tos = tos
self.poll = poll()
# {socket: SingleSocket}
self.single_sockets = {}
self.udp_sockets = {}
self.dead_from_write = []
self.doneflag = doneflag
self.noisy = noisy
self.errorfunc = errorfunc
self.funcs = []
self.externally_added_tasks = []
self.listening_handlers = {}
self.serversockets = {}
self.live_contexts = {None : True}
self.ident = thread.get_ident()
self.to_start = []
self.add_task(self.scan_for_timeouts, config['timeout_check_interval'])
if sys.platform.startswith('win'):
# Windows doesn't support pipes with select(). Just prevent sleeps
# longer than a second instead of proper wakeup for now.
self.wakeupfds = (None, None)
self._wakeup()
else:
self.wakeupfds = os.pipe()
self.poll.register(self.wakeupfds[0], POLLIN)
def _wakeup(self):
self.add_task(self._wakeup, 1)
def add_context(self, context):
self.live_contexts[context] = True
def remove_context(self, context):
del self.live_contexts[context]
self.funcs = [x for x in self.funcs if x[3] != context]
def add_task(self, func, delay, args=(), context=None):
assert thread.get_ident() == self.ident
assert type(args) == list or type(args) == tuple
if context in self.live_contexts:
insort(self.funcs, (bttime() + delay, func, args, context))
def external_add_task(self, func, delay, args=(), context=None):
assert type(args) == list or type(args) == tuple
self.externally_added_tasks.append((func, delay, args, context))
# Wake up the RawServer thread in case it's sleeping in poll()
if self.wakeupfds[1] is not None:
os.write(self.wakeupfds[1], 'X')
def scan_for_timeouts(self):
self.add_task(self.scan_for_timeouts,
self.config['timeout_check_interval'])
t = bttime() - self.config['socket_timeout']
tokill = []
for s in [s for s in self.single_sockets.values() if s not in self.udp_sockets.keys()]:
if s.last_hit < t:
tokill.append(s)
for k in tokill:
if k.socket is not None:
self._close_socket(k)
def create_unixserversocket(filename):
server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server.setblocking(0)
server.bind(filename)
server.listen(5)
return server
create_unixserversocket = staticmethod(create_unixserversocket)
def create_serversocket(port, bind='', reuse=False, tos=0):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if reuse and os.name != 'nt':
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.setblocking(0)
if tos != 0:
try:
server.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, tos)
except:
pass
server.bind((bind, port))
server.listen(5)
return server
create_serversocket = staticmethod(create_serversocket)
def create_udpsocket(port, bind='', reuse=False, tos=0):
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if reuse and os.name != 'nt':
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.setblocking(0)
if tos != 0:
try:
server.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, tos)
except:
pass
server.bind((bind, port))
return server
create_udpsocket = staticmethod(create_udpsocket)
def start_listening(self, serversocket, handler, context=None):
self.listening_handlers[serversocket.fileno()] = (handler, context)
self.serversockets[serversocket.fileno()] = serversocket
self.poll.register(serversocket, POLLIN)
def start_listening_udp(self, serversocket, handler, context=None):
self.listening_handlers[serversocket.fileno()] = (handler, context)
nss = SingleSocket(self, serversocket, handler, context)
self.single_sockets[serversocket.fileno()] = nss
self.udp_sockets[nss] = 1
self.poll.register(serversocket, POLLIN)
def stop_listening(self, serversocket):
del self.listening_handlers[serversocket.fileno()]
del self.serversockets[serversocket.fileno()]
self.poll.unregister(serversocket)
def stop_listening_udp(self, serversocket):
del self.listening_handlers[serversocket.fileno()]
del self.single_sockets[serversocket.fileno()]
l = [s for s in self.udp_sockets.keys() if s.socket == serversocket]
del self.udp_sockets[l[0]]
self.poll.unregister(serversocket)
def start_connection(self, dns, handler=None, context=None, do_bind=True):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(0)
bindaddr = do_bind and self.config['bind']
if bindaddr:
sock.bind((bindaddr, 0))
if self.tos != 0:
try:
sock.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, self.tos)
except:
pass
try:
sock.connect_ex(dns)
except socket.error:
sock.close()
raise
except Exception, e:
sock.close()
raise socket.error(str(e))
self.poll.register(sock, POLLIN)
s = SingleSocket(self, sock, handler, context, dns)
self.single_sockets[sock.fileno()] = s
return s
def _add_pending_connection(self, addr):
pass
def _remove_pending_connection(self, addr):
pass
def async_start_connection(self, dns, handler=None, context=None, do_bind=True):
self.to_start.insert(0, (dns, handler, context, do_bind))
self._start_connection()
return True
def _start_connection(self):
dns, handler, context, do_bind = self.to_start.pop()
try:
s = self.start_connection(dns, handler, context, do_bind)
except Exception, e:
handler.connection_failed(dns, e)
else:
handler.connection_started(s)
def wrap_socket(self, sock, handler, context=None, ip=None, port=None):
sock.setblocking(0)
self.poll.register(sock, POLLIN)
s = SingleSocket(self, sock, handler, context, (ip, port))
self.single_sockets[sock.fileno()] = s
return s
# must be called from the main thread
def install_sigint_handler(self):
signal.signal(signal.SIGINT, self._handler)
def _handler(self, signum, frame):
self.external_add_task(self.doneflag.set, 0)
# Allow pressing ctrl-c multiple times to raise KeyboardInterrupt,
# in case the program is in an infinite loop
signal.signal(signal.SIGINT, signal.default_int_handler)
def _handle_events(self, events):
for sock, event in events:
if sock in self.serversockets:
s = self.serversockets[sock]
if event & (POLLHUP | POLLERR) != 0:
try:
self.poll.unregister(s)
s.close()
except socket.error, e:
self.errorfunc(WARNING, _("failed to unregister or close server socket: %s") % str(e))
self.errorfunc(CRITICAL, _("lost server socket"))
else:
handler, context = self.listening_handlers[sock]
try:
newsock, addr = s.accept()
except socket.error, e:
continue
try:
newsock.setblocking(0)
nss = SingleSocket(self, newsock, handler, context, addr)
self.single_sockets[newsock.fileno()] = nss
self.poll.register(newsock, POLLIN)
self._make_wrapped_call(handler. \
connection_made, (nss,), context=context)
except socket.error, e:
self.errorfunc(WARNING,
_("Error handling accepted connection: ") +
str(e))
else:
s = self.single_sockets.get(sock)
if s is None:
if sock == self.wakeupfds[0]:
# Another thread wrote this just to wake us up.
os.read(sock, 1)
continue
s.connected = True
if event & POLLERR:
self._close_socket(s)
continue
if event & (POLLIN | POLLHUP):
s.last_hit = bttime()
try:
data, addr = s.socket.recvfrom(100000)
except socket.error, e:
code, msg = e
if code != EWOULDBLOCK:
self._close_socket(s)
continue
if data == '' and not self.udp_sockets.has_key(s):
self._close_socket(s)
else:
if not self.udp_sockets.has_key(s):
self._make_wrapped_call(s.handler.data_came_in,
(s, data), s)
else:
self._make_wrapped_call(s.handler.data_came_in,
(addr, data), s)
# data_came_in could have closed the socket (s.socket = None)
if event & POLLOUT and s.socket is not None:
s.try_write()
if s.is_flushed():
self._make_wrapped_call(s.handler.connection_flushed,
(s,), s)
def _pop_externally_added(self):
while self.externally_added_tasks:
task = self.externally_added_tasks.pop(0)
self.add_task(*task)
def listen_forever(self):
ret = 0
self.ident = thread.get_ident()
while not self.doneflag.isSet() and not ret:
ret = self.listen_once()
def listen_once(self, period=1e9):
try:
self._pop_externally_added()
if self.funcs:
period = self.funcs[0][0] - bttime()
if period < 0:
period = 0
events = self.poll.poll(period * timemult)
if self.doneflag.isSet():
return 0
while self.funcs and self.funcs[0][0] <= bttime():
garbage, func, args, context = self.funcs.pop(0)
self._make_wrapped_call(func, args, context=context)
self._close_dead()
self._handle_events(events)
if self.doneflag.isSet():
return 0
self._close_dead()
except error, e:
if self.doneflag.isSet():
return 0
# I can't find a coherent explanation for what the behavior
# should be here, and people report conflicting behavior,
# so I'll just try all the possibilities
code = None
if hasattr(e, '__getitem__'):
code = e[0]
else:
code = e
if code == ENOBUFS:
# log the traceback so we can see where the exception is coming from
print_exc(file = sys.stderr)
self.errorfunc(CRITICAL,
_("Have to exit due to the TCP stack flaking "
"out. Please see the FAQ at %s") % FAQ_URL)
return -1
elif code in (EINTR,):
# add other ignorable error codes here
pass
else:
self.errorfunc(CRITICAL, str(e))
return 0
except KeyboardInterrupt:
print_exc()
return -1
except:
data = StringIO()
print_exc(file=data)
self.errorfunc(CRITICAL, data.getvalue())
return 0
def _make_wrapped_call(self, function, args, socket=None, context=None):
try:
function(*args)
except KeyboardInterrupt:
raise
except Exception, e: # hopefully nothing raises strings
# Incoming sockets can be assigned to a particular torrent during
# a data_came_in call, and it's possible (though not likely) that
# there could be a torrent-specific exception during the same call.
# Therefore read the context after the call.
if socket is not None:
context = socket.context
if self.noisy and context is None:
data = StringIO()
print_exc(file=data)
self.errorfunc(CRITICAL, data.getvalue())
if context is not None:
context.got_exception(e)
def _close_dead(self):
while len(self.dead_from_write) > 0:
old = self.dead_from_write
self.dead_from_write = []
for s in old:
if s.socket is not None:
self._close_socket(s)
def _close_socket(self, s):
sock = s.socket.fileno()
if self.config['close_with_rst']:
s.socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, NOLINGER)
s.socket.close()
self.poll.unregister(sock)
del self.single_sockets[sock]
s.socket = None
self._make_wrapped_call(s.handler.connection_lost, (s,), s)
s.handler = None
|
{
"content_hash": "c70a2ef8908fc813de403dc2a5707818",
"timestamp": "",
"source": "github",
"line_count": 489,
"max_line_length": 110,
"avg_line_length": 37.646216768916155,
"alnum_prop": 0.5374545059481776,
"repo_name": "galaxy001/libtorrent",
"id": "efa067d330a1bd13802d1de88f47cb64643598d6",
"size": "18988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BitTorrent-4.4.0/BitTorrent/RawServer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4346"
},
{
"name": "C",
"bytes": "97132"
},
{
"name": "C++",
"bytes": "27238"
},
{
"name": "Gnuplot",
"bytes": "1814"
},
{
"name": "HTML",
"bytes": "83937"
},
{
"name": "Makefile",
"bytes": "3536"
},
{
"name": "NSIS",
"bytes": "23483"
},
{
"name": "Python",
"bytes": "5888665"
},
{
"name": "Roff",
"bytes": "68461"
},
{
"name": "Shell",
"bytes": "9245"
},
{
"name": "TeX",
"bytes": "744"
}
],
"symlink_target": ""
}
|
# program067:
# The Fibonacci Sequence is computed based on the following formula:
# f(n)=0 if n=0
# f(n)=1 if n=1
# f(n)=f(n-1)+f(n-2) if n>1
# Please write a program using list comprehension to
# print the Fibonacci Sequence in comma separated form with a given n
# input by console.
# Example:
# If the following n is given as input to the program:
# 7
# Then, the output of the program should be:
# 0,1,1,2,3,5,8,13
# Hints:
# We can define recursive function in Python.
# Use list comprehension to generate a list from an existing list.
# Use string.join() to join a list of strings.
# In case of input data being supplied to the question,
# it should be assumed to be a console input.
# python has no tail call optimization,no advantange of using recursion ;)
i = 0
def fibonacci(number):
global i
i += 1
print i
if number == 0:
return 0
elif number == 1:
return 1
else:
return fibonacci(number - 1) + fibonacci(number - 2)
def main(number):
result = [str(fibonacci(num)) for num in range(0, number + 1)]
print ",".join(result)
if __name__ == '__main__':
main(int(raw_input("Input Number: ")))
|
{
"content_hash": "4b93a3a54c1342f81170dba8631b89e7",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 74,
"avg_line_length": 24.93877551020408,
"alnum_prop": 0.6350245499181669,
"repo_name": "dek-odoo/python-samples",
"id": "98e34d7dae59cf0e49526ed7f3dc7eacc7d9509f",
"size": "1304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python exercises/dek_program067.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "95972"
}
],
"symlink_target": ""
}
|
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.fogbow import dashboard
class Status(horizon.Panel):
name = _("Status")
slug = "status"
dashboard.Fogbow.register(Status)
|
{
"content_hash": "24ea77c5f13f092653d70816c0cf543c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 59,
"avg_line_length": 18.692307692307693,
"alnum_prop": 0.757201646090535,
"repo_name": "fogbow/fogbow-dashboard",
"id": "4438cd23e31577d76f3f8ad7b7e32e09b66310d3",
"size": "243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/fogbow/status/panel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2153611"
},
{
"name": "HTML",
"bytes": "95921"
},
{
"name": "JavaScript",
"bytes": "375181"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "1359088"
},
{
"name": "Shell",
"bytes": "13040"
}
],
"symlink_target": ""
}
|
import unittest
from sqlalchemy import Table, MetaData, Column, String, create_engine
from sqlalchemy.engine.reflection import Inspector
from alembic.util import CommandError
version_table = Table('version_table', MetaData(),
Column('version_num', String(32), nullable=False))
class TestMigrationContext(unittest.TestCase):
_bind = []
@property
def bind(self):
if not self._bind:
engine = create_engine('sqlite:///', echo=True)
self._bind.append(engine)
return self._bind[0]
def setUp(self):
self.connection = self.bind.connect()
self.transaction = self.connection.begin()
def tearDown(self):
version_table.drop(self.connection, checkfirst=True)
self.transaction.rollback()
def make_one(self, **kwargs):
from alembic.migration import MigrationContext
return MigrationContext.configure(**kwargs)
def get_revision(self):
result = self.connection.execute(version_table.select())
rows = result.fetchall()
if len(rows) == 0:
return None
self.assertEqual(len(rows), 1)
return rows[0]['version_num']
def test_config_default_version_table_name(self):
context = self.make_one(dialect_name='sqlite')
self.assertEqual(context._version.name, 'alembic_version')
def test_config_explicit_version_table_name(self):
context = self.make_one(dialect_name='sqlite',
opts={'version_table': 'explicit'})
self.assertEqual(context._version.name, 'explicit')
def test_get_current_revision_creates_version_table(self):
context = self.make_one(connection=self.connection,
opts={'version_table': 'version_table'})
self.assertEqual(context.get_current_revision(), None)
insp = Inspector(self.connection)
self.assertTrue('version_table' in insp.get_table_names())
def test_get_current_revision(self):
context = self.make_one(connection=self.connection,
opts={'version_table': 'version_table'})
version_table.create(self.connection)
self.assertEqual(context.get_current_revision(), None)
self.connection.execute(
version_table.insert().values(version_num='revid'))
self.assertEqual(context.get_current_revision(), 'revid')
def test_get_current_revision_error_if_starting_rev_given_online(self):
context = self.make_one(connection=self.connection,
opts={'starting_rev': 'boo'})
self.assertRaises(CommandError, context.get_current_revision)
def test_get_current_revision_offline(self):
context = self.make_one(dialect_name='sqlite',
opts={'starting_rev': 'startrev',
'as_sql': True})
self.assertEqual(context.get_current_revision(), 'startrev')
def test__update_current_rev(self):
version_table.create(self.connection)
context = self.make_one(connection=self.connection,
opts={'version_table': 'version_table'})
context._update_current_rev(None, 'a')
self.assertEqual(self.get_revision(), 'a')
context._update_current_rev('a', 'b')
self.assertEqual(self.get_revision(), 'b')
context._update_current_rev('b', None)
self.assertEqual(self.get_revision(), None)
|
{
"content_hash": "7b197e32a205ecb4e64d43e0b422e162",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 75,
"avg_line_length": 40.264367816091955,
"alnum_prop": 0.6203254353411362,
"repo_name": "briandailey/alembic",
"id": "0343cb1c6b0491593999e89023edfe9aece34ee6",
"size": "3503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_version_table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "285753"
}
],
"symlink_target": ""
}
|
import os
import datetime
from slugify import slugify
from contextlib import contextmanager
from flask import Flask, render_template, make_response, request, jsonify, redirect
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column, Integer, Sequence, String, DateTime, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
APP_NAME = 'python-cuturl'
# Paths
PROJECT_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)))
TEMPLATE_PATH = os.path.join(PROJECT_PATH, 'templates')
STATIC_PATH = os.path.join(PROJECT_PATH, 'assets')
#print('PP '+PROJECT_PATH)
#print('TP '+TEMPLATE_PATH)
#print('SP '+PROJECT_PATH)
# Create the app
app = Flask(__name__,
template_folder=TEMPLATE_PATH,
static_folder=STATIC_PATH)
# SQL Alchemy setup
dburi = os.environ["DATABASE_URL"]
dburi = dburi.replace("postgres://", "postgresql://", 1)
# to ensure compatibility between recent SQL Alchemy and Heroku:
# https://stackoverflow.com/questions/62688256/sqlalchemy-exc-nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectspostgre
app.config.update(
SQA_ECHO = True,
SQLALCHEMY_DATABASE_URI = dburi,
SQLALCHEMY_TRACK_MODIFICATIONS = False
)
if ("postgresql+psycopg2" in app.config['SQLALCHEMY_DATABASE_URI']):
try:
import psycopg2
# For windows, add to PATH: C:\Program Files\PostgreSQL\...\bin
# Note, DATABASE_URL is an environment variable used by Heroku;
# Therefore, override in the dev environment if using a local setup
except (OSError, ImportError, KeyError):
raise Exception('PostgreSQL can not be used, psycopg2 import failed!')
exit()
if app.config['SQA_ECHO'] == True:
print("SQLALCHEMY_DATABASE_URI is " + app.config['SQLALCHEMY_DATABASE_URI'])
Base = declarative_base()
class Link(Base):
__tablename__ = 'link'
link_id = Column(Integer, Sequence('link_id_seq'), primary_key=True)
url = Column(String(1000))
slug = Column(String(1000))
description = Column(String(1000))
create_time = Column(DateTime)
def __init__(self, slug, url, description, create_time):
self.url = url
self.slug = slug
self.description = description
self.create_time = create_time
def __repr__(self):
rpr = "<Link('{0}', '{1}', '{2}', '{3}')>".format(self.url, self.slug, self.description, self.create_time)
return rpr
engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'], echo=app.config['SQA_ECHO'])
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
@contextmanager
def session_scope():
"""Provide a transactional scope around a series of operations."""
session = Session()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
# Route methods
def index():
return(render_template("index.html", title = "My best site"))
def static(filepath):
return static_file(filepath, root=settings.STATIC_PATH)
def list():
links = None
with session_scope() as session:
links = session.query(Link).all()
print(links)
return render_template("list.html", jlinks=links)
def link(slug):
with session_scope() as session:
links = session.query(Link)
this_link = session.query(Link).filter_by(slug=slug).first()
return render_template("single.html", link=this_link)
@app.route('/add',methods = ['POST'])
def add():
url = request.form.get('url-input')
comment = request.form.get('comment-input')
if (url and comment):
with session_scope() as session:
session.add(Link(url=url,
description=comment,
slug=slugify(url),
create_time=datetime.datetime.now()))
return redirect("/list", code=302)
# Route bindings
app.add_url_rule('/', 'index', index)
app.add_url_rule('/list', 'list', list)
app.add_url_rule('/assets/<path:filepath>', static)
app.add_url_rule('/list/<string:slug>', 'link', link)
|
{
"content_hash": "18de0691fd4b9b0b6fd3dd6584d9c5bd",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 123,
"avg_line_length": 28.81118881118881,
"alnum_prop": 0.6660194174757281,
"repo_name": "BartGo/bottle-cuturl",
"id": "30421618d1251bef57cd8955396e83c76a82480f",
"size": "4144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/routes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "341"
},
{
"name": "Gherkin",
"bytes": "201"
},
{
"name": "HTML",
"bytes": "3672"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "21484"
},
{
"name": "Shell",
"bytes": "1570"
}
],
"symlink_target": ""
}
|
"""Handles EC2 communication via boto"""
__author__ = 'ml483'
|
{
"content_hash": "a8e21cbe07c2d47b6680c215575818af",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 40,
"avg_line_length": 31,
"alnum_prop": 0.6612903225806451,
"repo_name": "optiminimalist/movingdata",
"id": "a806b4ef2b17b6bad339e9b3e87421b5a7929f3f",
"size": "62",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "deploy/ec2/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37772"
},
{
"name": "Shell",
"bytes": "1315"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "real_estate_agency.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
{
"content_hash": "8c932a48afbc5ebc477e2771d6fb246e",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 82,
"avg_line_length": 37.80952380952381,
"alnum_prop": 0.6234256926952141,
"repo_name": "Dybov/real_estate_agency",
"id": "621729fe3d1999c1a7bfd8aadb7ba39f0751b6e4",
"size": "816",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "real_estate_agency/manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "102329"
},
{
"name": "HTML",
"bytes": "104357"
},
{
"name": "JavaScript",
"bytes": "86459"
},
{
"name": "Python",
"bytes": "259967"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.db.models import F
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
class Migration(DataMigration):
def forwards(self, orm):
Project = orm['sentry.Project']
ReleaseProject = orm['sentry.ReleaseProject']
db.commit_transaction()
queryset = Project.objects.all()
for item in RangeQuerySetWrapperWithProgressBar(queryset):
if item.flags == item.flags | 1:
continue
if not ReleaseProject.objects.filter(project=item).exists():
continue
db.execute("UPDATE sentry_project SET flags = flags | 1 WHERE id = %s", [item.id])
db.start_transaction()
def backwards(self, orm):
pass
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.apitoken': {
'Meta': {
'object_name': 'ApiToken'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True'
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'token':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authenticator': {
'Meta': {
'unique_together': "(('user', 'type'),)",
'object_name': 'Authenticator',
'db_table': "'auth_authenticator'"
},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2017, 2, 22, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.commit': {
'Meta': {
'unique_together': "(('repository_id', 'key'),)",
'object_name': 'Commit',
'index_together': "(('repository_id', 'date_added'),)"
},
'author': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.CommitAuthor']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'message': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'repository_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {
'unique_together': "(('organization_id', 'email'),)",
'object_name': 'CommitAuthor'
},
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.commitfilechange': {
'Meta': {
'unique_together': "(('commit', 'filename'),)",
'object_name': 'CommitFileChange'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'filename': ('django.db.models.fields.CharField', [], {
'max_length': '255'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '1'
})
},
'sentry.counter': {
'Meta': {
'object_name': 'Counter',
'db_table': "'sentry_projectcounter'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'unique': 'True'
}
),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {
'object_name': 'DSymBundle'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'sdk': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymSDK']"
}
)
},
'sentry.dsymobject': {
'Meta': {
'object_name': 'DSymObject'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_path': ('django.db.models.fields.TextField', [], {
'db_index': 'True'
}),
'uuid':
('django.db.models.fields.CharField', [], {
'max_length': '36',
'db_index': 'True'
}),
'vmaddr':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'vmsize':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
})
},
'sentry.dsymsdk': {
'Meta': {
'object_name':
'DSymSDK',
'index_together':
"[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"
},
'dsym_type':
('django.db.models.fields.CharField', [], {
'max_length': '20',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'sdk_name': ('django.db.models.fields.CharField', [], {
'max_length': '20'
}),
'version_build': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {
'unique_together': "[('object', 'address')]",
'object_name': 'DSymSymbol'
},
'address':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {
'unique_together': "(('project_id', 'name'),)",
'object_name': 'Environment'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Project']",
'through': "orm['sentry.EnvironmentProject']",
'symmetrical': 'False'
}
)
},
'sentry.environmentproject': {
'Meta': {
'unique_together': "(('project', 'environment'),)",
'object_name': 'EnvironmentProject'
},
'environment': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Environment']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.event': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group_id', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {
'unique_together':
"(('event_id', 'key_id', 'value_id'),)",
'object_name':
'EventTag',
'index_together':
"(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {
'object_name': 'GlobalDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '36'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'short_id'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True',
'on_delete': 'models.PROTECT'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'short_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupcommitresolution': {
'Meta': {
'unique_together': "(('group_id', 'commit_id'),)",
'object_name': 'GroupCommitResolution'
},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
})
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {
'object_name': 'GroupRedirect'
},
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'previous_group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'unique': 'True'
})
},
'sentry.grouprelease': {
'Meta': {
'unique_together': "(('group_id', 'release_id', 'environment'),)",
'object_name': 'GroupRelease'
},
'environment':
('django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64'
}),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {
'unique_together': "(('group', 'user'),)",
'object_name': 'GroupSubscription'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Project']"
}
),
'reason':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('group', 'key', 'value'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'",
'index_together': "(('project', 'key', 'value', 'last_seen'),)"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationavatar': {
'Meta': {
'object_name': 'OrganizationAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.Organization']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'token': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True',
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationonboardingtask': {
'Meta': {
'unique_together': "(('organization', 'task'),)",
'object_name': 'OrganizationOnboardingTask'
},
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_completed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'flags':
('django.db.models.fields.BigIntegerField', [], {
'default': '0',
'null': 'True'
}),
'forced_color': (
'django.db.models.fields.CharField', [], {
'max_length': '6',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectbookmark': {
'Meta': {
'unique_together': "(('project_id', 'user'),)",
'object_name': 'ProjectBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.projectdsymfile': {
'Meta': {
'unique_together': "(('project', 'uuid'),)",
'object_name': 'ProjectDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'uuid': ('django.db.models.fields.CharField', [], {
'max_length': '36'
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {
'unique_together': "(('project_id', 'platform'),)",
'object_name': 'ProjectPlatform'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'platform': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('organization', 'version'),)",
'object_name': 'Release'
},
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'releases'",
'symmetrical': 'False',
'through': "orm['sentry.ReleaseProject']",
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasecommit': {
'Meta': {
'unique_together': "(('release', 'commit'), ('release', 'order'))",
'object_name': 'ReleaseCommit'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseenvironment': {
'Meta': {
'unique_together': "(('project_id', 'release_id', 'environment_id'),)",
'object_name': 'ReleaseEnvironment',
'db_table': "'sentry_environmentrelease'"
},
'environment_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseproject': {
'Meta': {
'unique_together': "(('project', 'release'),)",
'object_name': 'ReleaseProject',
'db_table': "'sentry_release_project'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.repository': {
'Meta': {
'unique_together':
"(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))",
'object_name':
'Repository'
},
'config': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'external_id':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'provider':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'url': ('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
})
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_default': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'SavedSearchUserDefault',
'db_table': "'sentry_savedsearch_userdefault'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'savedsearch': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.SavedSearch']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_password_expired':
('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'session_nonce':
('django.db.models.fields.CharField', [], {
'max_length': '12',
'null': 'True'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useravatar': {
'Meta': {
'object_name': 'UserAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.useremail': {
'Meta': {
'unique_together': "(('user', 'email'),)",
'object_name': 'UserEmail'
},
'date_hash_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_verified': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'emails'",
'to': "orm['sentry.User']"
}
),
'validation_hash': (
'django.db.models.fields.CharField', [], {
'default': "u'K2xV32fIsyQicTw4UuswECAm3m6ZSPdm'",
'max_length': '32'
}
)
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
symmetrical = True
|
{
"content_hash": "2e489029e8d7dc7bd72112fc1f2ea734",
"timestamp": "",
"source": "github",
"line_count": 2581,
"max_line_length": 98,
"avg_line_length": 36.80317706315382,
"alnum_prop": 0.39643537672783163,
"repo_name": "looker/sentry",
"id": "34121be9e01f1ed9746f5f41731c96b5a84e90bd",
"size": "95013",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/sentry/south_migrations/0298_backfill_project_has_releases.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "289931"
},
{
"name": "HTML",
"bytes": "241322"
},
{
"name": "JavaScript",
"bytes": "3112298"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "7048"
},
{
"name": "Python",
"bytes": "36341504"
},
{
"name": "Ruby",
"bytes": "204"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
}
|
import unittest
import bootstrap
import nark
class LogTests(unittest.TestCase):
def test_can_create_logger(self):
a = nark.Assert()
i = nark.Logging.get()
a.not_null(i, "Unable to create log instance")
def test_can_log_message(self):
a = nark.Assert()
i = nark.Logging.get()
i.debug("Hello %s", "world")
i.info("Hello %s", "world")
i.warning("Hello %s", "world")
i.error("Hello %s", "world")
i.critical("Hello %s", "world")
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "067a08375d568fc684cb40515e709a66",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 50,
"avg_line_length": 21.625,
"alnum_prop": 0.6069364161849711,
"repo_name": "shadowmint/python-nark",
"id": "1c45441e2bef433e828fef7485bbc809e1defc92",
"size": "1097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/nark/log_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "86811"
},
{
"name": "Shell",
"bytes": "278"
}
],
"symlink_target": ""
}
|
import pytest
import unittest
from mock import Mock
from calvin.runtime.north import scheduler
from calvin.runtime.north.plugins.port import queue
from calvin.runtime.north.plugins.port.endpoint import LocalOutEndpoint, LocalInEndpoint
from calvin.csparser.codegen import calvin_codegen
def create_actor(kind, args):
# Must go after fiddling with calvinsys
from calvin.runtime.north.actormanager import ActorManager
node = Mock()
actor_manager = ActorManager(node)
actor_id = actor_manager.new(kind, args)
actor = actor_manager.actors[actor_id]
return actor
def app_from_script(script, script_name):
deployable, issuetracker = calvin_codegen(script, script_name, verify=False)
errors = issuetracker.errors(sort_key='reason')
warnings = issuetracker.warnings(sort_key='reason')
if errors:
return {}, errors, warnings
actors = {}
for name, setup in deployable['actors'].iteritems():
a_type = setup['actor_type']
a_args = setup['args']
a_args.update({"name":name}) # Optional, for human readability only
a = create_actor(a_type, a_args)
actors[name] = a
for src, dests in deployable['connections'].iteritems():
for dest in dests:
a_name, p_name = src.split('.')
outport = actors[a_name].outports[p_name]
# FIXME: setup port properties (queue) from deployable info
outport.set_queue(queue.fanout_fifo.FanoutFIFO({'queue_length': 4, 'direction': "out"}, {}))
a_name, p_name = dest.split('.')
inport = actors[a_name].inports[p_name]
# FIXME: setup port properties (queue) from deployable info
inport.set_queue(queue.fanout_fifo.FanoutFIFO({'queue_length': 4, 'direction': "in"}, {}))
outport.attach_endpoint(LocalOutEndpoint(outport, inport))
inport.attach_endpoint(LocalInEndpoint(inport, outport))
return actors, errors, warnings
def setup_calvinsys():
import calvin.runtime.north.calvinsys as calvinsys
calvinsys.TESTING = True
from calvin.runtime.north.calvinsys import get_calvinsys
sys = get_calvinsys()
sys.init(capabilities={
"sys.schedule": {
"module": "mock.MockInputOutput",
"attributes": {'data': ["dummy"]}
}
})
return sys
def teardown_calvinsys():
import calvin.runtime.north.calvinsys as calvinsys
calvinsys.TESTING = False
del calvinsys._calvinsys
calvinsys._calvinsys = None
class TestBase(unittest.TestCase):
def setUp(self):
node = Mock()
cs = setup_calvinsys()
cs._node = node
actor_manager = Mock()
actor_manager.enabled_actors = Mock(return_value=[1, 3, 7])
self.scheduler = scheduler.SimpleScheduler(node, actor_manager)
node.sched = self.scheduler
def tearDown(self):
teardown_calvinsys()
@pytest.mark.xfail(reason="Fix mocking of calvinsys")
class SchedulerSanityCheck(TestBase):
def test_sanity(self):
assert self.scheduler.actor_mgr.enabled_actors() == [1, 3, 7]
@pytest.mark.xfail(reason="Fix mocking of calvinsys")
class SchedulerCheckStrategy(TestBase):
def test_simple(self):
# Create actors
src = create_actor('std.Constant', {"data":42, "name":"src"})
filter = create_actor('std.Identity', {"name":"filter"})
snk = create_actor('flow.Terminator', {"name":"snk"})
# Get get the ports
src_outport = src.outports['token']
filter_inport = filter.inports['token']
filter_outport = filter.outports['token']
snk_inport = snk.inports['void']
# Set the queue types and length for each port
src_outport.set_queue(queue.fanout_fifo.FanoutFIFO({'queue_length': 4, 'direction': "out"}, {}))
filter_inport.set_queue(queue.fanout_fifo.FanoutFIFO({'queue_length': 4, 'direction': "in"}, {}))
filter_outport.set_queue(queue.fanout_fifo.FanoutFIFO({'queue_length': 4, 'direction': "out"}, {}))
snk_inport.set_queue(queue.fanout_fifo.FanoutFIFO({'queue_length': 4, 'direction': "in"}, {}))
# Create endpoints
src_out_ep = LocalOutEndpoint(src_outport, filter_inport)
filter_in_ep = LocalInEndpoint(filter_inport, src_outport)
filter_out_ep = LocalOutEndpoint(filter_outport, snk_inport)
snk_in_ep = LocalInEndpoint(snk_inport, filter_outport)
# Attach the enpoints to the ports
src_outport.attach_endpoint(src_out_ep)
filter_inport.attach_endpoint(filter_in_ep)
filter_outport.attach_endpoint(filter_out_ep)
snk_inport.attach_endpoint(snk_in_ep)
assert src.name == "src"
assert filter.name == "filter"
# Verify that src.token is connected to filter.token
assert len(src.outports['token'].endpoints) == 1
assert src.outports['token'].endpoints[0].peer_port.owner.name == filter.name
assert src.outports['token'].endpoints[0].peer_port.owner == filter
assert src.outports['token'].endpoints[0].peer_port.name == "token"
assert src.outports['token'].endpoints[0].peer_port == filter.inports['token']
# This is the same test as test_simple above, but using app_from_script to set up the application from a script. Much better in the long run.
def test_simple_script(self):
script = """
src : std.Constant(data=42)
filter : std.Identity()
snk : flow.Terminator()
src.token > filter.token
filter.token > snk.void
"""
actors, errors, warnings = app_from_script(script, "test")
assert not errors
assert actors
src = actors['test:src']
filter = actors['test:filter']
skn = actors['test:snk']
assert src.name == "test:src"
assert filter.name == "test:filter"
# Verify that src.token is connected to filter.token
assert len(src.outports['token'].endpoints) == 1
assert src.outports['token'].endpoints[0].peer_port.owner.name == filter.name
assert src.outports['token'].endpoints[0].peer_port.owner == filter
assert src.outports['token'].endpoints[0].peer_port.name == "token"
assert src.outports['token'].endpoints[0].peer_port == filter.inports['token']
|
{
"content_hash": "a7e23a7700a58ad41f67139b86f205af",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 149,
"avg_line_length": 38.82208588957055,
"alnum_prop": 0.6460176991150443,
"repo_name": "EricssonResearch/calvin-base",
"id": "ae3f942659ca4c408d115fe0c261182d8a3f28a3",
"size": "6328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calvin/tests/test_scheduler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "769"
},
{
"name": "Dockerfile",
"bytes": "612"
},
{
"name": "HTML",
"bytes": "24571"
},
{
"name": "JavaScript",
"bytes": "78325"
},
{
"name": "Makefile",
"bytes": "816"
},
{
"name": "Python",
"bytes": "3291484"
},
{
"name": "Shell",
"bytes": "37140"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.