repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
gzamboni/sdnResilience
|
loxi/of10/message.py
|
Python
|
gpl-2.0
| 248,179
| 0.006455
|
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
import util
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of10']
class message(loxi.OFObject):
subtypes = {}
version = 1
def __init__(self, type=None, xid=None):
if type != None:
self.type = type
else:
self.type = 0
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('B', 1)
subclass = message.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = message()
_version = reader.read("!B")[0]
assert(_version == 1)
obj.type = reader.read("!B")[0]
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("message {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
class stats_reply(message):
subtypes = {}
version = 1
type = 17
def __init__(self, xid=None, stats_type=None, flags=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if stats_type != None:
self.stats_type = stats_type
else:
self.stats_type = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 8)
subclass = stats_reply.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.stats_type = reader.read("!H")[0]
obj.flags = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.stats_type != other.stats_type: return False
if self.flags != other.flags: return False
return True
def pretty_print(self, q):
q.text("stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.breakable()
q.text('}')
message.subtypes[17] = stats_reply
class aggregate_stats_reply(stats_reply):
version = 1
type = 17
stats_type = 2
def __init__(self, xid=None, flags=None, packet_count=None, byte_count=None, flow_count=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if packet_count != None:
|
self.packet_count = packet_count
else:
self.packet_count = 0
if byte_count != None:
self.byte_count = byte_count
else:
self.byte_count = 0
if flow_count != None:
self.flow_count = flow_count
else:
self.flow_count = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed
|
.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(struct.pack("!Q", self.packet_count))
packed.append(struct.pack("!Q", self.byte_count))
packed.append(struct.pack("!L", self.flow_count))
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = aggregate_stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 2)
obj.flags = reader.read("!H")[0]
obj.packet_count = reader.read("!Q")[0]
obj.byte_count = reader.read("!Q")[0]
obj.flow_count = reader.read("!L")[0]
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.packet_count != other.packet_count: return False
if self.byte_count != other.byte_count: return False
if self.flow_count != other.flow_count: return False
return True
def pretty_print(self, q):
q.text("aggregate_stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("packet_count = ");
q.text("%#x" % self.packet_count)
q.text(","); q.breakable()
q.text("byte_count = ");
q.text("%#x" % self.byte_count)
q.text(","); q.breakable()
q.text("flow_count = ");
q.text("%#x" % self.flow_count)
q.breakable()
q.text('}')
stats_reply.subtypes[2] = aggregate_stats_reply
class stats_request(message):
subtypes = {}
version
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/OpenGL/raw/GL/NV/texture_expand_normal.py
|
Python
|
lgpl-3.0
| 591
| 0.021997
|
'''
|
Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_NV_texture_expand_normal'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_NV_t
|
exture_expand_normal',error_checker=_errors._error_checker)
GL_TEXTURE_UNSIGNED_REMAP_MODE_NV=_C('GL_TEXTURE_UNSIGNED_REMAP_MODE_NV',0x888F)
|
the0forge/sp
|
frontend/migrations/0038_auto__add_unique_spuser_username.py
|
Python
|
gpl-3.0
| 22,106
| 0.008097
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'SPUser', fields ['username']
db.create_unique(u'frontend_spuser', ['username'])
def backwards(self, orm):
# Removing unique constraint on 'SPUser', fields ['username']
db.delete_unique(u'frontend_spuser', ['username'])
models = {
u'frontend.backorder': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'BackOrder'},
'amount': ('django.db.models.fields.IntegerField', [], {}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'back_orders'", 'to': u"orm['frontend.OrderProduct']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'frontend.catalog': {
'Meta': {'ordering': "('name',)", 'object_name': 'Catalog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'frontend.catalogissue': {
'Meta': {'object_name': 'CatalogIssue'},
'catalog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issues'", 'to': u"orm['frontend.Catalog']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'catalog_issues'", 'symmetrical': 'False', 'through': u"orm['frontend.CatalogIssueProduct']", 'to': u"orm['frontend.Product']"})
},
u'frontend.catalogissueproduct': {
'Meta': {'object_name': 'CatalogIssueProduct'},
'catalog_issue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['frontend.CatalogIssue']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_ref': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'page_ref': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'catalog_links'", 'to': u"orm['frontend.Product']"}),
'sub_ref': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'})
},
u'frontend.company': {
'Meta': {'object_name': 'Company'},
'fax': ('django.db.models.fields.CharField', [], {'max_lengt
|
h': '25'}),
u'i
|
d': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'logo_img': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True'}),
'logo_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'registration': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'frontend.customer': {
'Meta': {'object_name': 'Customer'},
'address_line_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_line_2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'country': ('django.db.models.fields.CharField', [], {'default': "'Australia'", 'max_length': '100'}),
'customer_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'delivery_address_line_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'delivery_address_line_2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'delivery_attn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'delivery_country': ('django.db.models.fields.CharField', [], {'default': "'Australia'", 'max_length': '100'}),
'delivery_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'delivery_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'delivery_suburb': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'from_src_company_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'from_src_membadd_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_read': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'c_notes'", 'blank': 'True', 'to': u"orm['frontend.Note']"}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'registration': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '150'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'suburb': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'telephone_clean': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'frontend.customercontact': {
'Meta': {'object_name': 'CustomerContact'},
'customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contacts'", 'to': u"orm['frontend.Customer']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'frontend.importnote': {
'Meta': {'object_name': 'ImportNote'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'model_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'src_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'src_model_id_field': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'src_model_id_text': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'default':
|
EvanK/ansible
|
lib/ansible/modules/cloud/azure/azure_rm_managed_disk.py
|
Python
|
gpl-3.0
| 14,499
| 0.002
|
#!/usr/bin/python
#
# Copyright (c) 2017 Bruno Medina Bolanos Cacho <bruno.medina@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_managed_disk
version_added: "2.4"
short_description: Manage Azure Manage Disks
description:
- Create, update and delete an Azure Managed Disk
options:
resource_group:
description:
- Name of a resource group where the managed disk exists or will be created.
required: true
name:
description:
- Name of the managed disk.
required: true
state:
description:
- Assert the state of the managed disk. Use C(present) to create or update a managed disk and C(absent) to delete a managed disk.
default: present
choices:
- absent
- present
location:
description:
- Valid Azure location. Defaults to location of the resource group.
storage_account_type:
description:
- "Type of storage for the managed disk: C(Standard_LRS) or C(Premium_LRS). If not specified the disk is created C(Standard_LRS)."
choices:
- Standard_LRS
- Premium_LRS
create_option:
description:
- "Allowed values: empty, import, copy.
- C(import) from a VHD file in I(source_uri) and C(copy) from previous managed disk I(source_uri)."
choices:
- empty
- import
- copy
source_uri:
description:
- URI to a valid VHD file to be used or the resource ID of the managed disk to copy.
aliases:
- source_resource_uri
os_type:
description:
- "Type of Operating System: C(linux) or C(windows)."
- "Used when I(create_option) is either C(copy) or C(import) and the source is an OS disk."
- "If omitted during creation, no value is set."
- "If omitted during an update, no change is made."
- "Once set, this value cannot be cleared."
choices:
- linux
- windows
disk_size_gb:
description:
- "Size in GB of the managed disk to be created."
- "If I(create_option) is C(copy) then the value must be greater than or equal to the source's size."
managed_by:
description:
- Name of an existing virtual machine with which the disk is or will be associated, this VM should be in the same resource group.
- To detach a disk from a vm, explicitly set to ''.
- If this option is unset, the value will not be changed.
version_added: 2.5
tags:
description:
- Tags to assign to the managed disk.
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Bruno Medina (@brusMX)"
'''
EXAMPLES = '''
- name: Create managed disk
azure_rm_managed_disk:
name: mymanageddisk
location: eastus
resource_group: myResourceGroup
disk_size_gb: 4
- name: Create managed operating system disk from page blob
azure_rm_managed_disk:
name: mymanageddisk
location: eastus2
resource_group: myResourceGroup
create_option: import
source_uri: https://storageaccountname.blob.core.windows.net/containername/blob-name.vhd
os_type: windows
storage_account_type: Premium_LRS
- name: Mount the managed disk to VM
azure_rm_managed_disk:
name: mymanageddisk
location: eastus
resource_group: myResourceGroup
disk_size_gb: 4
managed_by: testvm001
- name: Unmount the managed disk to VM
azure_rm_managed_disk:
name: mymanageddisk
location: eastus
resource_group: myResourceGroup
disk_size_gb: 4
- name: Delete managed disk
azure_rm_manage_disk:
name: mymanageddisk
location: eastus
resource_group: myResourceGroup
state: absent
'''
RETURN = '''
id:
description: The managed disk resource ID.
returned: always
type: dict
state:
description: Current state of the managed disk
returned: always
type: dict
changed:
description: Whether or not the resource has changed
returned: always
type: bool
'''
import re
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.tools import parse_resource_id
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
# duplicated in azure_rm_managed_disk_facts
def managed_disk_to_dict(managed_disk):
create_data = managed_disk.creation_data
return dict(
id=managed_disk.id,
name=managed_disk.name,
location=managed_disk.location,
tags=managed_disk.tags,
create_option=create_data.create_option.lower(),
source_uri=create_data.source_uri or create_data.source_resource_id,
disk_size_gb=managed_disk.disk_size_gb,
os_type=managed_disk.os_type.lower() if managed_disk.os_type else None,
storage_account_type=managed_disk.sku.name if managed_disk.sku else None,
managed_by=managed_disk.managed_by
)
class AzureRMManagedDisk(AzureRMModuleBase):
"""Configuration class for an Azure RM Managed Disk resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
location=dict(
type='str'
),
storage_account_type=dict(
type='str',
choices=['Standard_LRS', 'Premium_LRS']
),
create_option=dict(
type='str',
choices=['empty', 'import', 'copy']
),
source_uri=dict(
type='str',
aliases=['source_resource_uri']
),
os_type=dict(
type='str',
cho
|
ices=['linux', 'windows']
),
disk_size_gb=dict(
type='int'
),
|
managed_by=dict(
type='str'
)
)
required_if = [
('create_option', 'import', ['source_uri']),
('create_option', 'copy', ['source_uri']),
('create_option', 'empty', ['disk_size_gb'])
]
self.results = dict(
changed=False,
state=dict())
self.resource_group = None
self.name = None
self.location = None
self.storage_account_type = None
self.create_option = None
self.source_uri = None
self.os_type = None
self.disk_size_gb = None
self.tags = None
self.managed_by = None
super(AzureRMManagedDisk, self).__init__(
derived_arg_spec=self.module_arg_spec,
required_if=required_if,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
result = None
changed = False
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
disk_instance = self.get_managed_disk()
result = disk_instance
# need create or update
if self.state == 'present':
parameter = self.ge
|
pombredanne/pushmanager
|
tests/test_servlet_newpush.py
|
Python
|
apache-2.0
| 6,300
| 0.00381
|
from contextlib import nested
from contextlib import contextmanager
import mock
import testing as T
import types
from core import db
from core.settings import Settings
from core.mail import MailQueue
from core.util import get_servlet_urlspec
from core.xmppclient import XMPPQueue
import servlets.newpush
from servlets.newpush import NewPushServlet
from servlets.newpush import send_notifications
class NewPushServletTest(T.TestCase, T.ServletTestMixin):
def get_handlers(self):
return [get_servlet_urlspec(NewPushServlet)]
def test_newpush(self):
pushes = []
def on_db_return(success, db_results):
assert success
pushes.extend(db_results.fetchall())
with nested(
mock.patch.dict(db.Settings, T.MockedSettings),
mock.patch.object(NewPushServlet, "get_current_user", return_value = "jblack"),
mock.patch.object(NewPushServlet, "redirect"),
mock.patch.object(MailQueue, "enqueue_user_email"),
):
with mock.patch("%s.servlets.newpush.subprocess.call" % __name__) as mocked_call:
title = "BestPushInTheWorld"
branch = "jblack"
push_type = "regular"
uri = "/newpush?push-title=%s&branch=%s&push-type=%s" % (
title, branch, push_type
)
pushes = []
db.execute_cb(db.push_pushes.select(), on_db_return)
num_pushes_before = len(pushes)
response = self.fetch(uri)
assert response.error == None
pushes = []
db.execute_cb(db.push_pushes.select(), on_db_return)
num_pushes_after = len(pushes)
T.assert_equal(num_pushes_before + 1, num_pushes_after)
# There should be one call to nodebot after a push is created
T.assert_equal(servlet
|
s.newpush.subprocess.call.call_count, 1)
# Verify that we have a valid call to
# subprocess.call. Getting the arguments involves ugly
# mock magic
mocked_call.assert_called_once_with([
'/nail/sys/bin/nodebot',
'-i',
mock.ANY, # nickname
mock.ANY, # channel
mock.A
|
NY, # msg
])
def call_on_db_complete(self, urgent=False):
mocked_self = mock.Mock()
mocked_self.check_db_results = mock.Mock(return_value=None)
mocked_self.redirect = mock.Mock(return_value=None)
mocked_self.pushtype = 'normal'
mocked_self.on_db_complete = types.MethodType(NewPushServlet.on_db_complete.im_func, mocked_self)
push = mock.Mock()
push.lastrowid = 0
no_watcher_req = {
'user': 'testuser',
'watchers': None,
}
watched_req = {
'user': 'testuser',
'watchers': 'testuser1,testuser2',
}
if urgent:
no_watcher_req['tags'] = 'urgent'
watched_req['tags'] = 'urgent'
mocked_self.pushtype = 'urgent'
reqs = [no_watcher_req, watched_req]
mocked_self.on_db_complete('success', [push, reqs])
@mock.patch('servlets.newpush.send_notifications')
def test_normal_people_on_db_complete(self, notify):
self.call_on_db_complete()
notify.called_once_with(set(['testuser', 'testuser1', 'testuser2']), mock.ANY, mock.ANY)
@mock.patch('servlets.newpush.send_notifications')
def test_urgent_people_on_db_complete(self, notify):
self.call_on_db_complete(urgent=True)
notify.called_once_with(set(['testuser', 'testuser1', 'testuser2']), mock.ANY, mock.ANY)
class NotificationsTestCase(T.TestCase):
@contextmanager
def mocked_notifications(self):
with mock.patch("%s.servlets.newpush.subprocess.call" % __name__) as mocked_call:
with mock.patch.object(MailQueue, "enqueue_user_email") as mocked_mail:
with mock.patch.object(XMPPQueue, "enqueue_user_xmpp") as mocked_xmpp:
yield mocked_call, mocked_mail, mocked_xmpp
def test_send_notifications(self):
"""New push sends notifications via IRC, XMPP and emails."""
self.people = ["fake_user1", "fake_user2"]
self.pushurl = "/fake_push_url?id=123"
self.pushtype = "fake_puth_type"
with self.mocked_notifications() as (mocked_call, mocked_mail, mocked_xmpp):
send_notifications(self.people, self.pushtype, self.pushurl)
url = "https://%s%s" % (Settings['main_app']['servername'], self.pushurl)
msg = "%s: %s push starting! %s" % (', '.join(self.people), self.pushtype, url)
mocked_call.assert_called_once_with([
'/nail/sys/bin/nodebot',
'-i',
Settings['irc']['nickname'],
Settings['irc']['channel'],
msg
])
mocked_mail.assert_called_once_with(
Settings['mail']['notifyall'],
msg,
mock.ANY, # subject
)
mocked_xmpp.assert_called_once_with(
self.people,
"Push starting! %s" % url
)
def test_send_notifications_empty_user_list(self):
"""If there is no pending push request we'll only send IRC and
email notifications, but not XMPP messages."""
self.people = []
self.pushurl = "fake_push_url"
self.pushtype = "fake_puth_type"
with self.mocked_notifications() as (mocked_call, mocked_mail, mocked_xmpp):
send_notifications(self.people, self.pushtype, self.pushurl)
mocked_call.assert_called_once_with([
'/nail/sys/bin/nodebot',
'-i',
Settings['irc']['nickname'],
Settings['irc']['channel'],
mock.ANY, # msg
])
mocked_mail.assert_called_once_with(
Settings['mail']['notifyall'],
mock.ANY, # msg
mock.ANY, # subject
)
T.assert_is(mocked_xmpp.called, False)
if __name__ == '__main__':
T.run()
|
Kaezon/allianceauth
|
allianceauth/services/modules/mumble/tasks.py
|
Python
|
gpl-2.0
| 1,857
| 0.002154
|
import logging
from django.contrib.auth.models import User
from
|
django.core.exceptions import ObjectDoesNotExist
from celery import shared_task
from allianceauth.services.tasks import QueueOnce
from .models import MumbleUser
logger = logging.getLogger(__name__)
cla
|
ss MumbleTasks:
def __init__(self):
pass
@staticmethod
def has_account(user):
try:
return user.mumble.username != ''
except ObjectDoesNotExist:
return False
@staticmethod
def disable_mumble():
logger.info("Deleting all MumbleUser models")
MumbleUser.objects.all().delete()
@staticmethod
@shared_task(bind=True, name="mumble.update_groups", base=QueueOnce)
def update_groups(self, pk):
user = User.objects.get(pk=pk)
logger.debug("Updating mumble groups for user %s" % user)
if MumbleTasks.has_account(user):
try:
if not user.mumble.update_groups():
raise Exception("Group sync failed")
logger.debug("Updated user %s mumble groups." % user)
return True
except MumbleUser.DoesNotExist:
logger.info("Mumble group sync failed for {}, user does not have a mumble account".format(user))
except:
logger.exception("Mumble group sync failed for %s, retrying in 10 mins" % user)
raise self.retry(countdown=60 * 10)
else:
logger.debug("User %s does not have a mumble account, skipping" % user)
return False
@staticmethod
@shared_task(name="mumble.update_all_groups")
def update_all_groups():
logger.debug("Updating ALL mumble groups")
for mumble_user in MumbleUser.objects.exclude(username__exact=''):
MumbleTasks.update_groups.delay(mumble_user.user.pk)
|
kevintom/django-bloom
|
bloom/image/templatetags/__init__.py
|
Python
|
gpl-3.0
| 89
| 0
|
# Bloom
|
Framework
#
# John Boxall
# Copyright 2008 Handi Mobility
# www.handimo
|
bility.ca
|
nwjs/chromium.src
|
third_party/crashpad/crashpad/build/install_linux_sysroot.py
|
Python
|
bsd-3-clause
| 2,174
| 0
|
#!/usr/bin/env python3
# Copyright 2018 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Various code adapted from:
# https://cs.chromium.org/chromium/src/build/linux/sysroot_scripts/install-sysroot.py
import os
import shutil
import subprocess
import sys
import urllib.request
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
# Sysroot revision from:
# https://cs.chromium.org/chromium/src/build/linux/sysroot_scripts/sysroots.json
SERVER = 'https://commondatastorage.googleapis.com'
PATH = 'chrome-linux-
|
sysroot/toolchain'
REVISION = '43a87bbebccad99325fdcf34166295b121ee15c7'
FILENAME = 'debian_sid_amd64_sysroot.tar.xz'
def main():
url = '%s/%s/%s/%s' % (SERVER, PATH, REVISION, FILENAME)
sysroot = os.path.join(SCRIPT_DIR, os.pardir, 'third_party', 'linux',
'sysroot')
stamp = os.path.join(sysroot, '.stamp')
if os.path.exists(stamp):
with open(stamp) as s:
if s.read() == url:
return
print('Installing Debian r
|
oot image from %s' % url)
if os.path.isdir(sysroot):
shutil.rmtree(sysroot)
os.mkdir(sysroot)
tarball = os.path.join(sysroot, FILENAME)
print('Downloading %s' % url)
for _ in range(3):
response = urllib.request.urlopen(url)
with open(tarball, 'wb') as f:
f.write(response.read())
break
else:
raise Exception('Failed to download %s' % url)
subprocess.check_call(['tar', 'xf', tarball, '-C', sysroot])
os.remove(tarball)
with open(stamp, 'w') as s:
s.write(url)
if __name__ == '__main__':
main()
sys.exit(0)
|
mliudev/sassafras
|
sassafras/settings.py
|
Python
|
mit
| 3,364
| 0
|
"""
Django settings for sassafras project.
Generated by 'django-admin startproject' using Django 1.8.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_NAME = 'sassafras'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*(^jxc&^d46%8bi)dzq3!kezs=bnnh&lbgalj0%zy5y9w!^voi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'debug_toolbar',
'bootstrap3',
'sass_processor',
'trello_cards',
'storages'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.cont
|
rib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'sassafras
|
.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sassafras.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, PROJECT_NAME, STATIC_URL)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'sass_processor.finders.CssFinder',
)
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# Django storages
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_STORAGE_BUCKET_NAME = 'django-sassafras-test'
AWS_ACCESS_KEY = os.getenv('AWS_S3_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_S3_SECRET_ACCESS_KEY')
|
NarlikarLab/DIVERSITY
|
weblogoMod/corebio/utils/deoptparse.py
|
Python
|
gpl-3.0
| 9,010
| 0.016426
|
# Copyright (c) 2004 Gavin E. Crooks <gec@compbio.berkeley.edu>
#
# This software is distributed under the MIT Open Source License.
# <http://www.opensource.org/licenses/mit-license.html>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""Custom extensions to OptionParse for parsing command line options."""
# FIXME: Docstring
# TODO: Add profiling option
# DeOptionParser :
#
# http://docs.python.org/lib/module-optparse.html
#
# Random_options :
# Set random generator and seed. Use options.random as
# source of random numbers
# Copyright :
# print copyright information
# Documentation :
# print extended document information
#
# Additional file_in and file_out types
import sys
from copy import copy
from optparse import Option
from optparse import OptionParser
from optparse import IndentedHelpFormatter
from optparse import OptionValueError
import random
def _copyright_callback(option, opt, value, parser):
if option or opt or value or parser: pass # Shut up lint checker
print parser.copyright
sys.exit()
def _doc_callback(option, opt, value, parser):
if option or opt or value or parser: pass # Shut up lint checker
print parser.long_description
sys.exit()
class DeHelpFormatter(IndentedHelpFormatter) :
def __init__ (self,
indent_increment=2,
max_help_position=32,
width=78,
short_first=1):
IndentedHelpFormatter.__init__(
self, indent_increment, max_help_position,
width, short_first)
def format_option_strings (self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = option._short_opts
long_opts = [lopt + " " + metavar for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if not short_opts : short_opts = [" ",]
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return " ".join(opts)
def _check_file_in(option, opt, value):
if option or opt or value : pass # Shut up lint checker
try:
return file(value, "r")
except IOError:
raise OptionValueError(
"option %s: cannot open file: %s" % (opt, value) )
def _check_file_out(option, opt, value):
if option or opt or value : pass # Shut up lint checker
try:
return file(value, "w+")
except IOError:
raise OptionValueError(
"option %s: cannot open file: %s" % (opt, value) )
def _check_boolean(option, opt, value) :
if option or opt or value : pass # Shut up lint checker
v = value.lower()
choices = {'no': False, 'false':False, '0': False,
'yes': True, 'true': True, '1':True }
try:
return choices[v]
except KeyError:
raise OptionValueError(
"option %s: invalid choice: '%s' " \
"(choose from 'yes' or 'no', 'true' or 'false')" % (opt, value))
def _check_dict(option, opt, value) :
if option or opt or value : pass # Shut up lint checker
v = value.lower()
choices = option.choices
try:
return choices[v]
except KeyError:
raise OptionValueError(
"option %s: invalid choice: '%s' " \
"(choose from '%s')" % (opt, value, "', '".join(choices)))
class DeOption(Option):
TYPES = Option.TYPES + ("file_in","file_out", "boolean", "dict")
TYPE_CHECKER = copy(Option.TYPE_CHECKER)
TYPE_CHECKER["file_in"] = _check_file_in
TYPE_CHECKER["file_out"] = _check_file_out
TYPE_CHECKER["boolean"] = _check_boolean
TYPE_CHECKER["dict"] = _check_dict
choices = None
def _new_check_choice(self):
if self.type == "dict":
if self.choices is None:
raise OptionValueError(
"must supply a dictionary of choices
|
for type 'dict'")
elif not isinstance(self.choices, dict):
raise OptionValueError(
"choices must be a dictionary ('%s' supplied)"
% str(type(self.choices)).split("'")[1])
return
self._check_choice()
# Have to override _check_choices so th
|
at we can parse
# a dict through to check_dict
CHECK_METHODS = Option.CHECK_METHODS
CHECK_METHODS[2] = _new_check_choice
class DeOptionParser(OptionParser) :
def __init__(self,
usage=None,
option_list=None,
option_class=DeOption,
version=None,
conflict_handler="error",
description=None,
long_description = None,
formatter=DeHelpFormatter(),
add_help_option=True,
prog=None,
copyright=None,
add_verbose_options=True,
add_random_options=False
):
OptionParser.__init__(self,
usage,
option_list,
option_class,
version,
conflict_handler,
description,
formatter,
add_help_option,
prog )
if long_description :
self.long_description = long_description
self.add_option("--doc",
action="callback",
callback=_doc_callback,
help="Detailed documentation")
if copyright :
self.copyright = copyright
self.add_option("--copyright",
action="callback",
callback=_copyright_callback,
help="")
if add_verbose_options :
self.add_option("-q", "--quite",
action="store_false",
dest="verbose",
default=False,
help="Run quietly (default)")
self.add_option("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Verbose output (Not quite)")
self.random_options = False
if add_random_options :
self.random_options = True
self.add_option("--seed",
action="store",
type = "int",
dest="random_seed",
help="Initial seed for pseudo-random number generator. (default: System time)",
metavar="INTEGER" )
self.add_option("--generator",
action="store",
dest="random_generator",
default="MersenneTwister",
help="Select
|
katakumpo/niceredis
|
niceredis/client/zset.py
|
Python
|
mit
| 11,593
| 0.000086
|
# -*- coding: utf-8 *-*
from redis._compat import iteritems, iterkeys, itervalues
from redis.connection import Token
from redis.exceptions import RedisError
from .base import RedisBase
class ZsetCommands(RedisBase):
# SORTED SET COMMANDS
def zadd(self, name, *args, **kwargs):
"""
Set any number of score, element-name pairs to the key ``name``. Pairs
can be specified in two ways:
As *args, in the form of: score1, name1, score2, name2, ...
or as **kwargs, in the form of: name1=score1, name2=score2, ...
The following example would add four values to the 'my-key' key:
redis.zadd('my-key', 1.1, 'name1', 2.2, 'name2', name3=3.3, name4=4.4)
If using non-strict Redis (strict_redis=False), args are expected in swapped form:
redis.zadd('my-key', 'name1', 1.1, 'name2', 2.2, name3=3.3, name4=4.4)
"""
pieces = []
if args:
if len(args) % 2 != 0:
raise RedisError("ZADD requires an equal number of "
"values and scores")
pieces.extend(self.strict_redis and args or reversed(args))
for pair in iteritems(kwargs):
pieces.append(pair[1])
pieces.append(pair[0])
return self.execute_command('ZADD', name, *pieces)
def zcard(self, name):
"Return the number of elements in the sorted set ``name``"
return self.execute_command('ZCARD', name)
def zcount(self, name, min, max):
"""
Returns the number of elements in the sorted set at key ``name`` with
a score between ``min`` and ``max``.
"""
return self.execute_command('ZCOUNT', name, min, max)
def zincrby(self, name, value, amount=1):
"Increment the score of ``value`` in sorted set ``name`` by ``amount``"
return self.execute_command('ZINCRBY', name, amount, value)
def zinterstore(self, dest, keys, aggregate=None):
"""
Intersect multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return self._zaggregate('ZINTERSTORE', dest, keys, aggregate)
def zlexcount(self, name, min, max):
"""
Return the number of items in the sorted set ``name`` between the
lexicographical range ``min`` and ``max``.
"""
return self.execute_command('ZLEXCOUNT', name, min, max)
def zrange(self, name, start, end, desc=False, withscores=False,
score_cast_func=float):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in ascending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``desc`` a boolean indicating whether to sort the results descendingly
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if desc:
return self.zrevrange(name, start, end, withscores,
score_cast_func)
pieces = ['ZRANGE', name, start, end]
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrangebylex(self, name, min, max, start=None, num=None):
"""
Return the lexicographical range of values from sorted set ``name``
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice of the
range.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified
|
")
pieces = ['ZRANGEBYLEX', name, min, max]
if start is not None and num is not N
|
one:
pieces.extend([Token('LIMIT'), start, num])
return self.execute_command(*pieces)
def zrangebyscore(self, name, min, max, start=None, num=None,
withscores=False, score_cast_func=float):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
`score_cast_func`` a callable used to cast the score return value
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZRANGEBYSCORE', name, min, max]
if start is not None and num is not None:
pieces.extend([Token('LIMIT'), start, num])
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrank(self, name, value):
"""
Returns a 0-based value indicating the rank of ``value`` in sorted set
``name``
"""
return self.execute_command('ZRANK', name, value)
def zrem(self, name, *values):
"Remove member ``values`` from sorted set ``name``"
return self.execute_command('ZREM', name, *values)
def zremrangebylex(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` between the
lexicographical range specified by ``min`` and ``max``.
Returns the number of elements removed.
"""
return self.execute_command('ZREMRANGEBYLEX', name, min, max)
def zremrangebyrank(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` with ranks between
``min`` and ``max``. Values are 0-based, ordered from smallest score
to largest. Values can be negative indicating the highest scores.
Returns the number of elements removed
"""
return self.execute_command('ZREMRANGEBYRANK', name, min, max)
def zremrangebyscore(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` with scores
between ``min`` and ``max``. Returns the number of elements removed.
"""
return self.execute_command('ZREMRANGEBYSCORE', name, min, max)
def zrevrange(self, name, start, end, withscores=False,
score_cast_func=float):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in descending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``withscores`` indicates to return the scores along with the values
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
pieces = ['ZREVRANGE', name, start, end]
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrevrangebyscore(self, name, max, min, start=None, num=None,
withscores=False, score_cast_func=float):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max`` in descending order.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable
|
Onirik79/aaritmud
|
src/connection.py
|
Python
|
gpl-2.0
| 8,524
| 0.005397
|
# -*- coding: utf-8 -*-
"""
Modulo per la gestione generica di una connessione al sito.
"""
#= IMPORT ======================================================================
import datetime
from twisted.internet import protocol
from src.config import config
from src.enums import LOG, OPTION, TRUST
from src.log import log
#= VARIABILI ===================================================================
# (TD) al posto di utilizzare le connessioni usare le sessions,
# attributi ereditato in questa classe, forse così risolto il problema
# delle connessioni
connections = {} # Connessioni al sito
#= CLASSI ======================================================================
# (TD) Sbaglia
|
to! Devo inserire questi attributi nella Twisted Session al sito
class Connection(protocol.Protocol):
"""
Questa classe serve a gestire le connessioni, e le relative sessioni, dei
differenti Client al Mud.
"""
def __init__(self):
self.account = None # Account utili
|
zzato
self.new_player = None # Personaggio che si sta creando
self.player = None # Personaggio in gioco
self.session = None # Twisted Session
self.request = None # Twisted Request
self.ip = "None" # Ip del Client
self.buffer = "" # Buffer con tutti l'output del gioco da inviare al client ad ogni richiesta ajax
self.stop_buffering = False # Se impostato a valore di verità terminerà l'invio del buffer con una chiusura della sessione
self.already_closed = False # Indica se è già stata chiusa la connessione
self.defer_exit_from_game = None # Deferred che si attiva quando il giocatore esce dal gioco
self.logged_on = datetime.datetime.now() # Data e ora del login
#- Fine Inizializzazione -
def reinit(self):
self.stop_buffering = False
self.buffer = ""
#- Fine Metodo -
def get_id(self, conn_looker=None):
"""
Ritorna una o tutte tra le seguenti informazioni: l'ip della
connessione, il nome dell'account e il nome del personaggio.
Molto utile da utilizzare nei messaggi di log.
Questo metodo fa a coppia con quello nella classe Account.
"""
account_name = "None"
player_name = "None"
if self.account:
account_name = self.account.name
if self.player:
player_name = self.player.code
# Fa visualizzare l'IP completo solo a coloro che hanno abbastanza TRUST
if not conn_looker or (conn_looker and conn_looker.account and conn_looker.account.trust >= TRUST.IMPLEMENTOR):
return "%s %s.%s" % (self.ip, account_name, player_name)
else:
ip_number, port = self.ip.split(":")
return "*.*.*.%s:%s %s.%s" % (ip_number.split(".")[3], port, account_name, player_name)
#- Fine Metodo -
# (TD) magari pensare di convertire queste stringhe identificative in
# elementi di un'enumerazione
def get_browser(self):
"""
Ritorna un codice identificato del browser che il client sta utilizzando.
Utile quando bisogna creare del codice xhtml ad uopo per un browser.
"""
if (not self.request or not self.request.received_headers
or not "user-agent" in self.request.received_headers):
return ""
user_agent = self.request.received_headers["user-agent"]
if not user_agent:
return ""
browser = get_browser_from_ua(user_agent)
if browser == "???":
log.user_agent(self.request)
return browser
#- Fine Metodo -
def get_os(self):
"""
Ritorna un codice identificato del sistema operativo che il client
sta utilizzando.
"""
if (not self.request or not self.request.received_headers
or not "user-agent" in self.request.received_headers):
return ""
user_agent = self.request.received_headers["user-agent"]
if not user_agent:
return ""
operating_system = get_os_from_ua(user_agent)
# Non vengono loggati solamente gli user agent sconosciuti ma anche
# quelli generici, per veder se si riesce a carpire migliori
# informazioni oppure semplicemente per curiosità
if operating_system in ("???", "WINDOWS", "LINUX", "MAC", "MOBILE"):
log.user_agent(self.request)
return operating_system
#- Fine Metodo -
def get_user_agent(self):
if not self.request:
return ""
if not self.request.received_headers:
return ""
if not "user-agent" in self.request.received_headers:
return ""
if not self.request.received_headers["user-agent"]:
return ""
return self.request.received_headers["user-agent"]
#- Fine Metodo -
def close_game_request(self):
"""
Callback che serve a chiudere un'eventuale connessione alla pagina di
gioco ancora aperta quando la sessione web relativa all'account scade.
"""
if not self.player:
return
if not self.account or OPTION.COMET not in self.account.options:
self.player.exit_from_game(True)
if not self.player or not self.player.game_request:
return
log.conn("Chiusura della connessione al gioco: %s" % self.get_id())
try:
self.player.game_request.finish()
except UserWarning:
pass
if self.player:
self.player.game_request = None
self.player = None
#- Fine Metodo -
#= FUNZIONI ====================================================================
def close_all_connections():
for conn in reversed(connections.values()):
conn.close_game_request()
#- Fine Metodo -
def get_browser_from_ua(user_agent):
if not user_agent:
log.bug("user_agent non è un parametro valido: r" % user_agent)
return ""
# -------------------------------------------------------------------------
if "MSIE " in user_agent:
version = user_agent.split("MSIE")[1].split(".")[0]
return "IE_" + version.strip()
elif "Firefox/" in user_agent:
version = user_agent.split("Firefox/")[1].split(".")[0]
return "FIREFOX_" + version.strip()
elif "Chrome/" in user_agent:
version = user_agent.split("Chrome/")[1].split(".")[0]
return "CHROME_" + version.strip()
elif "Safari/" in user_agent:
version = user_agent.split("Version/")[1].split(".")[0]
return "SAFARI_" + version.strip()
elif "Opera/" in user_agent:
version = user_agent.split("Version/")[1].split(".")[0]
return "OPERA_" + version.strip()
elif "Iceweasel/" in user_agent:
version = user_agent.split("Iceweasel/")[1].split(".")[0]
return "FIREFOX_" + version.strip()
elif "Kindle" in user_agent:
versione = user_agent.split("Kindle/")[1].split(".")[0]
return "KINDLE_" + version.strip()
elif "Links (2" in user_agent:
return "LINKS_2"
elif "ELinks/0" in user_agent:
return "ELINKS_0"
return "???"
#- Fine Funzione -
def get_os_from_ua(user_agent):
if not user_agent:
log.bug("user_agent non è un parametro valido: r" % user_agent)
return ""
# -------------------------------------------------------------------------
if "Windows NT 6.1" in user_agent:
return "WINDOWS_SEVEN"
elif "Windows NT 6.0" in user_agent:
return "WINDOWS_VISTA"
elif "Windows NT 5.2" in user_agent:
return "WINDOWS_2003"
elif "Windows NT 5.1" in user_agent:
return "WINDOWS_XP"
elif "Windows NT 5.0" in user_agent:
return "WINDOWS_2000"
elif "Windows" in user_agent:
return "WINDOWS"
elif "Ubuntu" in user_agent:
return "LINUX_UBUNTU"
elif "Sabayon" in user_agent:
return "LINUX_SABAYON"
elif "CentOS" in user_agent:
return "LINUX_CENTOS"
elif
|
paul99/clank
|
tools/grit/grit/gather/tr_html_unittest.py
|
Python
|
bsd-3-clause
| 17,175
| 0.004134
|
#!/usr/bin/python2.4
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.gather.tr_html'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
import types
import unittest
from grit.gather import tr_html
from grit import clique
from grit import util
class ParserUnittest(unittest.TestCase):
def testChunking(self):
p = tr_html.HtmlChunks()
chunks = p.Parse('<p>Hello <b>dear</b> how <i>are</i>you?<p>Fine!')
self.failUnless(chunks == [
(False, '<p>', ''), (True, 'Hello <b>dear</b> how <i>are</i>you?', ''),
(False, '<p>', ''), (True, 'Fine!', '')])
chunks = p.Parse('<p> Hello <b>dear</b> how <i>are</i>you? <p>Fine!')
self.failUnless(chunks == [
(False, '<p> ', ''), (True, 'Hello <b>dear</b> how <i>are</i>you?', ''),
(False, ' <p>', ''), (True, 'Fine!', '')])
chunks = p.Parse('<p> Hello <b>dear how <i>are you? <p> Fine!')
self.failUnless(chunks == [
(False, '<p> ', ''), (True, 'Hello <b>dear how <i>are you?', ''),
(False, ' <p> ', ''), (True, 'Fine!', '')])
# Ensure translateable sections that start with inline tags contain
# the starting inline tag.
chunks = p.Parse('<b>Hello!</b> how are you?<p><i>I am fine.</i>')
self.failUnless(chunks == [
(True, '<b>Hello!</b> how are you?', ''), (False, '<p>', ''),
(True, '<i>I am fine.</i>', '')])
# Ensure translateable sections that end with inline tags contain
# the ending inline tag.
chunks = p.Parse("Hello! How are <b>you?</b><p><i>I'm fine!</i>")
self.failUnless(chunks == [
(True, 'Hello! How are <b>you?</b>', ''), (False, '<p>', ''),
(True, "<i>I'm fine!</i>", '')])
# Check capitals and explicit descriptions
chunks = p.Parse('<!-- desc=bingo! --><B>Hello!</B> how are you?<P><I>I am fine.</I>')
self.failUnless(chunks == [
(True, '<B>Hello!</B> how are you?', 'bingo!'), (False, '<P>', ''),
(True, '<I>I am fine.</I>', '')])
chunks = p.Parse('<B><!-- desc=bingo! -->Hello!</B> how are you?<P><I>I am fine.</I>')
self.failUnless(chunks == [
(True, '<B>Hello!</B> how are you?', 'bingo!'), (False, '<P>', ''),
(True, '<I>I am fine.</I>', '')])
# Linebreaks get changed to spaces just like any other HTML content
chunks = p.Parse('<B>Hello!</B> <!-- desc=bi\nngo\n! -->how are you?<P><I>I am fine.</I>')
self.failUnless(chunks == [
(True, '<B>Hello!</B> how are you?', 'bi ngo !'), (False, '<P>', ''),
(True, '<I>I am fine.</I>', '')])
# In this case, because the explicit description appears after the first
# translateable, it will actually apply to the second translateable.
chunks = p.Parse('<B>Hello!</B> how are you?<!-- desc=bingo! --><P><I>I am fine.</I>')
self.failUnless(chunks == [
(True, '<B>Hello!</B> how are you?', ''), (False, '<P>', ''),
(True, '<I>I am fine.</I>', 'bingo!')])
# Check that replaceables within block tags (where attributes would go) are
# handled correctly.
chunks = p.Parse('<b>Hello!</b> how are you?<p [BINGO] [$~BONGO~$]>'
'<i>I am fine.</i>')
self.failUnless(chunks == [
(True, '<b>Hello!</b> how are you?', ''),
(False, '<p [BINGO] [$~BONGO~$]>', ''),
(True, '<i>I am fine.</i>', '')])
# Check that the contents of preformatted tags preserve line breaks.
chunks = p.Parse('<textarea>Hello\nthere\nhow\nare\nyou?</textarea>')
self.failUnless(chunks == [(False, '<textarea>', ''),
(True, 'Hello\nthere\nhow\nare\nyou?', ''), (False, '</textarea>', '')])
# ...and that other tags' line breaks are converted to spaces
chunks = p.Parse('<p>Hello\nthere\nhow\nare\nyou?</p>')
self.failUnless(chunks == [(False, '<p>', ''),
(True, 'Hello there how are you?', ''), (False, '</p>', '')])
def testTranslateableAttributes(self):
p = tr_html.HtmlChunks()
# Check that the translateable attributes in <img>, <submit>, <button> and
# <text> elements buttons are handled correctly.
chunks = p.Parse('<img src=bingo.jpg alt="hello there">'
'<input type=submit value="hello">'
'<input type="button" value="hello">'
'<input type=\'text\' value=\'Howdie\'>')
self.failUnless(chunks == [
(False, '<img src=bingo.jpg alt="', ''), (True, 'hello there', ''),
(False, '"><input type=submit value="', ''), (True, 'hello', ''),
(False, '"><input type="button" value="', ''), (True, 'hello', ''),
(False, '"><input type=\'text\' value=\'', ''), (True, 'Howdie', ''),
(False, '\'>', '')])
def testTranslateableHtmlToMessage(self):
msg = tr_html.HtmlToMessage(
'Hello <b>[USERNAME]</b>, <how> <i>are</i> you?')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'Hello BEGIN_BOLDX_USERNAME_XEND_BOLD, '
'<how> BEGIN_ITALICareEND_ITALIC you?')
msg = tr_html.HtmlToMessage('<b>Hello</b><I>Hello</I><b>Hello</b>')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'BEGIN_BOLD_1HelloEND_BOLD_1BEGIN_ITALICHelloEND_ITALIC'
'BEGIN_BOLD_2HelloEND_BOLD_2')
# Check that nesting (of the <font> tags) is handled correctly - i.e. that
# the closing placeholder numbers match the opening placeholders.
msg = tr_html.HtmlToMessage(
'''<font size=-1><font color=#FF0000>Update!</font> '''
'''<a href='http://desktop.google.com/whatsnew.html?hl=[$~LANG~$]'>'''
'''New Features</a>: Now search PDFs, MP3s, Firefox web history, and '''
'''more</font>''')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'BEGIN_FONT_1BEGIN_FONT_2Update!END_FONT_2 BEGIN_LINK'
'New FeaturesEND_LINK: Now search PDFs, MP3s, Firefox '
'web history, and moreEND_FONT_1')
msg = tr_html.HtmlToMessage('''<a href='[$~URL~$]'><b>[NUM][CAT]</b></a>''')
pres = msg.GetPresentableContent()
self.failUnless(pres == 'BEGIN_LINKBEGIN_BOLDX_NUM_XX_CAT_XEND_BOLDEND_LINK')
msg = tr_html.HtmlToMessage(
'''<font size=-1><a class=q onClick='return window.qs?qs(this):1' '''
'''href='http://[WEBSERVER][SEARCH_URI]'>Desktop</a></font> '''
''' ''')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'''BEGIN_FONTBEGIN_LINKDesktopEND_LINKEND_FONTSPACE''')
msg = tr_html.HtmlToMessage(
'''<br><br><center><font size=-2>©2005 Google </font></center>''', 1)
pres = msg.GetPresentableContent()
self.failUnless(pres ==
u'BEGIN_BREAK_1BEGIN_BREAK_2BEGIN_CENTERBEGIN_FONT\xa92005'
u' Google END_FONTEND_CENTER')
msg = tr_html.HtmlToMessage(
''' - <a class=c href=[$~CACHE~$]>Cached</a>''')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
' - BEGIN_LINKCachedEND_LINK')
# Check that upper-case tags are handled correctly.
msg = tr_html.HtmlToMessage(
'''You can read the <A HREF='http://desktop.google.com/privacypolicy.'''
'''html?hl=[LANG_CODE]'>Privacy Policy</A> and <A HREF='http://desktop'''
'''.google.com/privacyfaq.html?hl=[LANG_CODE]'>Privacy FAQ</A> online.''')
pres = msg.GetPresentableContent()
self.failUnless(pres ==
'You can read the BEGIN_LINK_1Privacy PolicyEND_LINK_1 and '
'BEGIN_LINK_2Privacy FAQEND_LINK_2 online.')
# Check that tags with linebreaks immediately preceding them are handled
# correctly.
msg = tr_html.HtmlToMessage(
'''You can read the
<A HREF='http://
|
desktop.google.com/privacypolicy.html?hl=[LA
|
NG_CODE]'>Privacy Policy</A>
and <A HREF='http://desktop.google.com/privacyfaq.html?hl=[LANG_CODE]'>Privacy FAQ</A> online.''')
pres = msg.GetPresentableContent()
self.failUnless(pres == '''You c
|
snakeleon/YouCompleteMe-x64
|
third_party/ycmd/third_party/jedi_deps/parso/parso/python/parser.py
|
Python
|
gpl-3.0
| 8,227
| 0.000851
|
from parso.python import tree
from parso.python.token import PythonTokenTypes
from parso.parser import BaseParser
NAME = PythonTokenTypes.NAME
INDENT = PythonTokenTypes.INDENT
DEDENT = PythonTokenTypes.DEDENT
class Parser(BaseParser):
"""
This class is used to parse a Python file, it then divides them into a
class structure of different scopes.
:param pgen_grammar: The grammar object of pgen2. Loaded by load_grammar.
"""
node_map = {
'expr_stmt': tree.ExprStmt,
'classdef': tree.Class,
'funcdef': tree.Function,
'file_input': tree.Module,
'import_name': tree.ImportName,
'import_from': tree.ImportFrom,
'break_stmt': tree.KeywordStatement,
'continue_stmt': tree.KeywordStatement,
'return_stmt': tree.ReturnStmt,
'raise_stmt': tree.KeywordStatement,
'yield_expr': tree.YieldExpr,
'del_stmt': tree.KeywordStatement,
'pass_stmt': tree.KeywordStatement,
'global_stmt': tree.GlobalStmt,
'nonlocal_stmt': tree.KeywordStatement,
'print_stmt': tree.KeywordStatement,
'assert_stmt': tree.AssertStmt,
'if_stmt': tree.IfStmt,
'with_stmt': tree.WithStmt,
'for_stmt': tree.ForStmt,
'while_stmt': tree.WhileStmt,
'try_stmt': tree.TryStmt,
'sync_comp_for': tree.SyncCompFor,
# Not sure if this is the best idea, but IMO it's the easiest way to
# avoid extreme amounts of work around the subtle difference of 2/3
# grammar in list comoprehensions.
'decorator': tree.Decorator,
'lambdef': tree.Lambda,
'lambdef_nocond': tree.Lambda,
'namedexpr_test': tree.NamedExpr,
}
default_node = tree.PythonNode
# Names/Keywords are handled separately
_leaf_map = {
PythonTokenTypes.STRING: tree.String,
PythonTokenTypes.NUMBER: tree.Number,
PythonTokenTypes.NEWLINE: tree.Newline,
PythonTokenTypes.ENDMARKER: tree.EndMarker,
PythonTokenTypes.FSTRING_STRING: tree.FStringString,
PythonTokenTypes.FSTRING_START: tree.FStringStart,
PythonTokenTypes.FSTRING_END: tree.FStringEnd,
}
def __init__(self, pgen_grammar, error_recovery=True, start_nonterminal='file_input'):
super().__init__(pgen_grammar, start_nonterminal,
error_recovery=error_recovery)
self.syntax_errors = []
self._omit_dedent_list = []
self._indent_counter = 0
def parse(self, tokens):
if self._error_recovery:
if self._start_nonterminal != 'file_input':
raise NotImplementedError
tokens = self._recovery_tokenize(tokens)
return super().parse(tokens)
def convert_node(self, nonterminal, children):
"""
Convert raw node information to a PythonBaseNode instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
try:
node = self.node_map[nonterminal](children)
except KeyError:
if nonterminal == 'suite':
# We don't want the INDENT/DEDENT in our parser tree. Those
# leaves are just cancer. They are virtual leaves and not real
# ones and therefore have pseudo start/end positions and no
# prefixes. Just ignore them.
children = [children[0]] + children[2:-1]
node = self.default_node(nonterminal, children)
for c in children:
c.parent = node
return node
def convert_leaf(self, type, value, prefix, start_pos):
# print('leaf', repr(value), token.tok_name[type])
if type == NAME:
if value in self._pgen_grammar.reserved_syntax_strings:
return tree.Keyword(value, start_pos, prefix)
else:
return tree.Name(value, start_pos, prefix)
return self._leaf_map.get(type, tree.Operator)(value, start_pos, prefix)
def error_recovery(self, token):
tos_nodes = self.stack[-1].nodes
if tos_nodes:
last_leaf = tos_nodes[-1].get_last_leaf()
else:
last_leaf = None
if self._start_nonterminal == 'file_input' and \
(token.type == PythonTokenTypes.ENDMARKER
or token.type == DEDENT and not last_leaf.value.endswith('\n')
and not last_leaf.value.endswith('\r')):
# In Python statements need to end with a newline. But since it's
# possible (and valid in Python) that there's no newline at the
# end of a file, we have to recover even if the user doesn't want
# error recovery.
if self.stack[-1].dfa.from_rule == 'simple_stmt':
|
try:
plan = self.stack[-1].dfa.transitions[PythonTokenTypes.NEWLINE]
except KeyError:
pass
else:
if plan.next_dfa.is_final and not plan.dfa_pushes:
|
# We are ignoring here that the newline would be
# required for a simple_stmt.
self.stack[-1].dfa = plan.next_dfa
self._add_token(token)
return
if not self._error_recovery:
return super().error_recovery(token)
def current_suite(stack):
# For now just discard everything that is not a suite or
# file_input, if we detect an error.
for until_index, stack_node in reversed(list(enumerate(stack))):
# `suite` can sometimes be only simple_stmt, not stmt.
if stack_node.nonterminal == 'file_input':
break
elif stack_node.nonterminal == 'suite':
# In the case where we just have a newline we don't want to
# do error recovery here. In all other cases, we want to do
# error recovery.
if len(stack_node.nodes) != 1:
break
return until_index
until_index = current_suite(self.stack)
if self._stack_removal(until_index + 1):
self._add_token(token)
else:
typ, value, start_pos, prefix = token
if typ == INDENT:
# For every deleted INDENT we have to delete a DEDENT as well.
# Otherwise the parser will get into trouble and DEDENT too early.
self._omit_dedent_list.append(self._indent_counter)
error_leaf = tree.PythonErrorLeaf(typ.name, value, start_pos, prefix)
self.stack[-1].nodes.append(error_leaf)
tos = self.stack[-1]
if tos.nonterminal == 'suite':
# Need at least one statement in the suite. This happend with the
# error recovery above.
try:
tos.dfa = tos.dfa.arcs['stmt']
except KeyError:
# We're already in a final state.
pass
def _stack_removal(self, start_index):
all_nodes = [node for stack_node in self.stack[start_index:] for node in stack_node.nodes]
if all_nodes:
node = tree.PythonErrorNode(all_nodes)
for n in all_nodes:
n.parent = node
self.stack[start_index - 1].nodes.append(node)
self.stack[start_index:] = []
return bool(all_nodes)
def _recovery_tokenize(self, tokens):
for token in tokens:
typ = token[0]
if typ == DEDENT:
# We need to count indents, because if we just omit any DEDENT,
# we might omit them in the wrong place.
o = self._omit_dedent_list
if o and o[-1] == self._indent_counter:
o.pop()
self._indent_counter -= 1
continue
self._indent_counter -= 1
elif typ == INDENT:
sel
|
cr0hn/openvas_to_report
|
openvas_to_report/examples/__init__.py
|
Python
|
bsd-3-clause
| 1,761
| 0.007382
|
# -*- coding: utf-8 -*-
#
#
# Project name: OpenVAS2Report: A set of tools to manager OpenVAS XML report files.
# Project URL: https://github.com/cr0hn/openvas_to_report
#
# Copyright (c) 2015, cr0hn<-AT->cr0hn.com
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT
|
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
__author__ = 'cr0hn - cr0hn<-at->cr0hn.com (@ggdaniel)'
| |
soulfx/gmusic-playlist
|
ExportLists.py
|
Python
|
mit
| 3,890
| 0.004627
|
# Author: John Elkins <john.elkins@yahoo.com>
# License: MIT <LICENSE>
from common import *
if len(sys.argv) < 2:
log('ERROR output directory is required')
time.sleep(3)
exit()
# setup the output directory, create it if needed
output_dir = sys.argv[1]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# log in and load personal library
api = open_api()
library = load_personal_library()
def playlist_handler(playlist_name, playlist_description, playlist_tracks):
# skip empty and no-name playlists
if not playlist_name: return
if len(playlist_tracks) == 0: return
# setup output files
playlist_name = playlist_name.replace('/', '')
open_log(os.path.join(output
|
_dir,playlist_name+u'.log'))
outfile = codecs.open(os.path.join(output_dir,playlist_name+u'.csv'),
encoding='utf-8',mode='w')
# keep track of stats
stats = create_stats()
export_skipped = 0
# keep track of songids incase we need to skip duplicates
|
song_ids = []
log('')
log('============================================================')
log(u'Exporting '+ unicode(len(playlist_tracks)) +u' tracks from '
+playlist_name)
log('============================================================')
# add the playlist description as a "comment"
if playlist_description:
outfile.write(tsep)
outfile.write(playlist_description)
outfile.write(os.linesep)
for tnum, pl_track in enumerate(playlist_tracks):
track = pl_track.get('track')
# we need to look up these track in the library
if not track:
library_track = [
item for item in library if item.get('id')
in pl_track.get('trackId')]
if len(library_track) == 0:
log(u'!! '+str(tnum+1)+repr(pl_track))
export_skipped += 1
continue
track = library_track[0]
result_details = create_result_details(track)
if not allow_duplicates and result_details['songid'] in song_ids:
log('{D} '+str(tnum+1)+'. '+create_details_string(result_details,True))
export_skipped += 1
continue
# update the stats
update_stats(track,stats)
# export the track
song_ids.append(result_details['songid'])
outfile.write(create_details_string(result_details))
outfile.write(os.linesep)
# calculate the stats
stats_results = calculate_stats_results(stats,len(playlist_tracks))
# output the stats to the log
log('')
log_stats(stats_results)
log(u'export skipped: '+unicode(export_skipped))
# close the files
close_log()
outfile.close()
# the personal library is used so we can lookup tracks that fail to return
# info from the ...playlist_contents() call
playlist_contents = api.get_all_user_playlist_contents()
for playlist in playlist_contents:
playlist_name = playlist.get('name')
playlist_description = playlist.get('description')
playlist_tracks = playlist.get('tracks')
playlist_handler(playlist_name, playlist_description, playlist_tracks)
if export_thumbs_up:
# get thumbs up playlist
thumbs_up_tracks = []
for track in library:
if track.get('rating') is not None and int(track.get('rating')) > 1:
thumbs_up_tracks.append(track)
# modify format of each dictionary to match the data type
# of the other playlists
thumbs_up_tracks_formatted = []
for t in thumbs_up_tracks:
thumbs_up_tracks_formatted.append({'track': t})
playlist_handler('Thumbs up', 'Thumbs up tracks', thumbs_up_tracks_formatted)
if export_all:
all_tracks_formatted = []
for t in library:
all_tracks_formatted.append({'track': t})
playlist_handler('All', 'All tracks', all_tracks_formatted)
close_api()
|
eri-trabiccolo/exaile
|
plugins/playlistanalyzer/__init__.py
|
Python
|
gpl-2.0
| 6,185
| 0.008892
|
# Copyright (C) 2014 Dustin Spicuzza
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
import gio
import cgi
import json
from os.path import dirname, join
from contextlib import closing
from xl.nls import gettext as _
from xl import (
providers,
settings
)
from xl.metadata.tags import tag_data
from xlgui.widgets import menu
from analyzer_dialog import AnalyzerDialog
class PlaylistAnalyzerPlugin(object):
def __init__(self):
self.menu_items = []
self.dialog = None
self._get_track_groups = None
self.d3_loc = join(dirname(__file__), 'ext', 'd3.min.js')
def enable(self, exaile):
self.exaile = exaile
def on_gui_loaded(self):
# register menu items
item = menu.simple_menu_item('pz-run', [], _('Analyze playlists'),
callback=self.on_analyze_playlists)
item.register('menubar-tools-menu')
self.menu_items.append(item)
item = menu.simple_menu_item('pz-run', ['export-files'], _('Analyze playlist'),
callback=self.on_analyze_playlist)
item.register('playlist-panel-context-menu')
self.menu_items.append(item)
# -> this could have a submenu that gets filled in with all
# of the presets
def on_exaile_loaded(self):
pass
def disable(self, exaile):
if self.dialog is not None:
self.dialog.destroy()
self.dialog = None
for menu_item in self.menu_items:
menu_item.unregister()
#
# Misc
#
def get_track_groups(self, track):
if self._get_track_groups is None:
if 'grouptagger' not in self.exaile.plugins.enabled_plugins:
raise ValueError("GroupTagger plugin must be loaded to use the GroupTagger tag")
self._get_track_groups = self.exaile.plugins.enabled_plugins['grouptagger'].get_track_groups
return self._get_track_groups(track)
#
# Menu functions
#
def on_analyze_playlist(self, widget, name, parent, context):
if self.dialog is None:
self.dialog = AnalyzerDialog(self, context['selected-playlist'])
def on_analyze_playlists(self, widget, name, parent, context):
if self.dialog is None:
self.dialog = AnalyzerDialog(self)
#
# Functions to generate the analysis
#
def get_tag(self, track, tagname, extra):
data = tag_data.get(tagname)
if data is not None:
if data.type == 'int':
ret = track.get_tag_raw(tagname, join=True)
if ret is not None:
if extra == 0:
return int(ret)
else:
return int(ret) - (int(ret) % extra)
return
if data.use_disk:
return track.get_tag_disk(tagname)
if tagname == '__grouptagger':
return list(self.get_track_groups(track))
return track.get_tag_raw(tagname, join=True)
def generate_data(self, tracks, tagdata):
data = []
for track in tracks:
if track is None:
data.append(None)
else:
data.append([self.get_tag(track, tag, extra) for tag, extra in tagdata])
return data
def write_to_file(self, tmpl, uri, *
|
*kwargs):
'''
Opens a template file, performs substitution, writes it to the
|
output URI, and also writes d3.min.js to the output directory.
:param tmpl: Local pathname to template file
:param uri: URI of output file suitable for passing to gio.File
:param kwargs: Named parameters to substitute in template
'''
# read the template file
with open(tmpl, 'rb') as fp:
contents = fp.read()
try:
contents = contents % kwargs
except:
raise RuntimeError("Format string error in template (probably has unescaped % in it)")
outfile = gio.File(uri)
parent_dir = outfile.get_parent()
if parent_dir:
parent_dir = gio.File(parent_dir.get_uri() + "/d3.min.js")
with closing(outfile.replace('', False)) as fp:
fp.write(contents)
# copy d3 to the destination
# -> TODO: add checkbox to indicate whether it should write d3 there or not
if parent_dir:
with open(self.d3_loc, 'rb') as d3fp:
with closing(parent_dir.replace('', False)) as pfp:
pfp.write(d3fp.read())
# New plugin API; requires exaile 3.4.0 or later
plugin_class = PlaylistAnalyzerPlugin
|
pneerincx/easybuild-framework
|
easybuild/tools/parallelbuild.py
|
Python
|
gpl-2.0
| 8,870
| 0.003157
|
# #
# Copyright 2012-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Module for doing parallel builds. This uses a PBS-like cluster. You should be able to submit jobs (which can have
dependencies)
Support for PBS is provided via the PbsJob class. If you want you could create other job classes and use them here.
@author: Toon Willems (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Stijn De Weirdt (Ghent University)
"""
import math
import os
import subprocess
import easybuild.tools.config as config
from easybuild.framework.easyblock import get_easyblock_instance
from easybuild.framework.easyconfig.easyconfig import ActiveMNS
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import get_repository, get_repositorypath
from easybuild.tools.module_naming_scheme.utilities import det_full_ec_version
from easybuild.tools.pbs_job import PbsJob, connect_to_server, disconnect_from_server, get_ppn
from easybuild.tools.repository.repository import init_repository
from vsc.utils import fancylogger
_log = fancylogger.getLogger('parallelbuild', fname=False)
def build_easyconfigs_in_parallel(build_command, easyconfigs, output_dir=None, prepare_first=True):
"""
easyconfigs is a list of easyconfigs which can be built (e.g. they have no unresolved dependencies)
this function will build them in parallel by submitting jobs
@param build_command: build command to use
@param easyconfigs: list of easyconfig files
@param output_dir: output directory
returns the jobs
"""
_log.info("going to build these easyconfigs in parallel: %s", easyconfigs)
job_ids = {}
# dependencies have already been resolved,
# so one can linearly walk over the list and use previous job id's
jobs = []
# create a single connection, and reuse it
conn = connect_to_server()
if conn is None:
_log.error("connect_to_server returned %s, can't submit jobs." % (conn))
# determine ppn once, and pass is to each job being created
# this avoids having to figure out ppn over and over again, every time creating a temp connection to the server
ppn = get_ppn()
def tokey(dep):
"""Determine key for specified dependency."""
return ActiveMNS().det_full_module_name(dep)
for ec in easyconfigs:
# this is very important, otherwise we might have race conditions
# e.g. GCC-4.5.3 finds cloog.tar.gz but it was incorrectly downloaded by GCC-4.6.3
# running this step here, prevents this
if prepare_first:
prepare_easyconfig(ec)
# the new job will only depend on already submitted jobs
_log.info("creating job for ec: %s" % str(ec))
new_job = create_job(build_command, ec, output_dir=output_dir, conn=conn, ppn=ppn)
# sometimes unresolved_deps will contain things, not needed to be build
job_deps = [job_ids[dep] for dep in map(tokey, ec['unresolved_deps']) if dep in job_ids]
new_job.add_dependencies(job_deps)
# place user hold on job to prevent it from starting too quickly,
# we might still need it in the queue to set it as a dependency for another job;
# only set hold for job without dependencies, other jobs have a dependency hold set anyway
with_hold = False
if not job_deps:
with_hold = True
# actually (try to) submit job
new_job.submit(with_hold)
_log.info("job for module %s has been submitted (job id: %s)" % (new_job.module, new_job.jobid))
# update dictionary
job_ids[new_job.module] = new_job.jobid
new_job.cleanup()
jobs.append(new_job)
# release all user holds on jobs after submission is completed
for job in jobs:
if job.has_holds():
_log.info("releasing hold on job %s" % job.jobid)
job.release_hold()
disconnect_from_server(conn)
return jobs
def submit_jobs(ordered_ecs, cmd_line_opts, testing=False):
"""
Submit jobs.
@param ordered_ecs: list of easyconfigs, in the order they should be processed
@param cmd_line_opts: li
|
st of command line options (in 'longopt=value' form)
"""
curdir = os.getcwd()
# the options to ignore (help options can't reach here)
ignore_opts = ['robot', 'job']
# generate_cmd_line returns the options in form --longopt=value
opts = [x for x in cmd_line_opts if not x.split('=')[0] in ['--%s' % y for y in ignore_opts]]
# compose string with command line options, properly quoted and with '%' characters escaped
opts_str = subprocess.list2cmdline(opts).replace('%', '%%')
|
command = "unset TMPDIR && cd %s && eb %%(spec)s %s --testoutput=%%(output_dir)s" % (curdir, opts_str)
_log.info("Command template for jobs: %s" % command)
job_info_lines = []
if testing:
_log.debug("Skipping actual submission of jobs since testing mode is enabled")
else:
jobs = build_easyconfigs_in_parallel(command, ordered_ecs)
job_info_lines = ["List of submitted jobs:"]
job_info_lines.extend(["%s (%s): %s" % (job.name, job.module, job.jobid) for job in jobs])
job_info_lines.append("(%d jobs submitted)" % len(jobs))
return '\n'.join(job_info_lines)
def create_job(build_command, easyconfig, output_dir=None, conn=None, ppn=None):
"""
Creates a job, to build a *single* easyconfig
@param build_command: format string for command, full path to an easyconfig file will be substituted in it
@param easyconfig: easyconfig as processed by process_easyconfig
@param output_dir: optional output path; --regtest-output-dir will be used inside the job with this variable
@param conn: open connection to PBS server
@param ppn: ppn setting to use (# 'processors' (cores) per node to use)
returns the job
"""
if output_dir is None:
output_dir = 'easybuild-build'
# capture PYTHONPATH, MODULEPATH and all variables starting with EASYBUILD
easybuild_vars = {}
for name in os.environ:
if name.startswith("EASYBUILD"):
easybuild_vars[name] = os.environ[name]
others = ["PYTHONPATH", "MODULEPATH"]
for env_var in others:
if env_var in os.environ:
easybuild_vars[env_var] = os.environ[env_var]
_log.info("Dictionary of environment variables passed to job: %s" % easybuild_vars)
# obtain unique name based on name/easyconfig version tuple
ec_tuple = (easyconfig['ec']['name'], det_full_ec_version(easyconfig['ec']))
name = '-'.join(ec_tuple)
# create command based on build_command template
command = build_command % {
'spec': easyconfig['spec'],
'output_dir': os.path.join(os.path.abspath(output_dir), name),
}
# just use latest build stats
repo = init_repository(get_repository(), get_repositorypath())
buildstats = repo.get_buildstats(*ec_tuple)
resources = {}
if buildstats:
previous_time = buildstats[-1]['build_time']
resources['hours'] = int(math.ceil(previous_time * 2 / 60))
job = PbsJob(command, name, easybuild_vars, resources=resources, conn=conn, ppn=ppn)
|
drewhutchison/coolerofdoom
|
data.py
|
Python
|
mit
| 3,415
| 0.009663
|
import sqlite3
TWITTER_CONSUMER_KEY = 'twitter_consumer_key'
TWITTER_CONSUMER_SECRET = 'twitter_consumer_secret'
TWITTER_ACCESS_TOKEN = 'twitter_access_token'
TWITTER_ACCESS_TOKEN_SECRET = 'twitter_access_token_secret'
LAST_LATITUDE = 'last_latitude'
LAST_LONGITUDE = 'last_longitude'
UPC_DATABASE_KEY = 'upc_database_key'
USER_TIMEOUT = 500
class Data(object):
conn = sqlite3.connect('CoD.db')
c = conn.cursor()
def __del__(self):
self.conn.commit()
self.c.close()
self.conn.close()
def _getter(self, key):
self.c.execute('SELECT value FROM kvs WHERE key=?', (key,))
out = self.c.fetchone()[0]
return out
@property
def last_latitude(self): return self._getter(LAST_LATITUDE)
@property
def last_longitude(self): return self._getter(LAST_LONGITUDE)
@property
def twitter_access_token(self): return self._getter(TWITTER_ACCESS_TOKEN)
@property
def twitter_access_token_secret(self):
return self._getter(TWITTER_ACCESS_TOKEN_SECRET)
@property
def twitter_consumer_key(self):
return self._getter(TWITTER_CONSUMER_KEY)
@property
def twitter_consumer_secret(self):
return self._getter(TWITTER_CONSUMER_SECRET)
@property
def upc_database_key(self):
return self._getter(UPC_DATABASE_KEY)
def get_beverage(self, upc):
self.c.execute('SELECT upc, description, untappd_id FROM beverages WHERE upc = ?',
(upc,))
ret = self.c.fetchone()
return Beverage(ret) if ret else None
def new_beverage(self, upc, description):
self.c.execute('''INSERT
INTO beverages (upc, description, untappd_id)
VALUES (?, ?, ?)''',
(upc, description, ''))
self.conn.commit()
def update_user(self, user_id):
print 'updating user ', user_id
self.c.execute('''UPDATE users
SET last_seen=datetime('now')
|
WHERE user_id=?''',
(user_id,))
def log(self, upc):
|
self.c.execute('''INSERT INTO log(upc, timestamp)
VALUES (?, datetime('now'))''',
(upc,))
def get_current_user(self):
self.c.execute('''SELECT *
FROM users
WHERE last_seen BETWEEN datetime('now','-500 seconds')
AND datetime('now')
ORDER BY last_seen DESC
LIMIT 1
''')
ret = self.c.fetchone()
if ret is not None: return User(ret)
def log_beverage(self, user, beverage):
self.c.execute('''INSERT
INTO drinks(user_id, beverage_id, timestamp)
VALUES (?, ?, datetime('now'))''',
(user.id, beverage.upc))
class Beverage(object):
def __init__(self, tup):
if type(tup) is tuple:
self.upc, self.description, self.untapped_id = tup
class User(object):
def __init__(self, tup):
if type(tup) is tuple:
self.user_id, self.name, self.email, self.last_seen, self.twitter_handle = tup
data = Data()
|
alriddoch/cyphesis
|
rulesets/mason/world/tasks/Repairing.py
|
Python
|
gpl-2.0
| 3,147
| 0.013028
|
#This file is distributed under the terms of the GNU General Public license.
#Copyright (C) 2011 Jekin Trivedi <jekintrivedi@gmail.com> (See the file COPYING for details).
from atlas import *
from physics import *
from physics import Quaternion
from physics import Vector3D
import server
class Repairing(server.Task):
"""A very simple Repair system for Repairing structures."""
materials = ["wood"]
def consume_materials (self) :
""" A method which gets the material to be consumed from the inventory & returns the consume operation """
for item in self.character.contains:
if item.type[0] == str(self.materials[0]):
set = Operation("set", Entity(item.id, status = -1), to = item)
return set
else :
print "No Wood in inventory"
return 0
def repair_operation(self, op):
""" The repair op is FROM the the character,
TO the structure that is getting Repaired which we
term the target. """
if len(op) < 1:
sys.stderr.write("Repair task has no target in repair op")
# FIXME Use weak references, once we have them
self.target = server.world.get_object_ref(op[0].id)
self.tool = op.to
def tick_operation(self, op):
""" This method is called repeatedly, each time a Repair turn occurs.
In this example the interval is fixed, but it can be varied. """
# print "Repair.tick"
res=Oplist()
current_status = 0
if self.target() is None:
# print "Target is no more"
self.irrelevant()
return
if self.character.stamina <= 0:
# print "I am exhausted"
self.irrelevant()
return
if square_distance(self.chara
|
cter.lo
|
cation, self.target().location) > self.target().location.bbox.square_bounding_radius():
self.progress = current_status
self.rate = 0
return self.next_tick(1.75)
# Some entity do not have status defined. If not present we assume that the entity is unharmed & stop the task
if hasattr ( self.target(), 'status' ) :
current_status = self.target().status
else:
set = Operation("set", Entity(self.self.target(), status = 1),
to = self.target)
res.append(set)
current_status = 1.0
self.irrelevant()
if current_status < 0.9:
set=Operation("set", Entity(self.target().id, status=current_status+0.1), to=self.target())
res.append(set)
consume = self.consume_materials ()
if consume :
res.append(consume)
else :
self.irrelevant()
else:
set = Operation("set", Entity(self.target().id, status = 1),
to = self.target())
res.append(set)
self.irrelevant()
self.progress = current_status
self.rate = 0.1 / 1.75
res.append(self.next_tick(1.75))
return res
|
wakatime/komodo-wakatime
|
components/wakatime/compat.py
|
Python
|
bsd-3-clause
| 3,553
| 0.001689
|
# -*- coding: utf-8 -*-
"""
wakatime.compat
~~~~~~~~~~~~~~~
For working with Python2 and Python3.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import codecs
import os
import platform
import subprocess
import sys
is_py2 = (sys.version_info[0] == 2)
is_py3 = (sys.version_info[0] == 3)
is_win = platform.system() == 'Windows'
if is_py2: # pragma: nocover
def u(text):
if text is None:
return None
try:
return text.decode('utf-8')
except:
try:
return text.decode(sys.getdefaultencoding())
except:
try:
return unicode(text)
except:
return text.decode('utf-8', 'replace')
open = codecs.open
basestring = basestring
elif is_py3: # pragma: nocover
def u(text):
if text is None:
return None
if isinstance(text, bytes):
try:
return text.decode('utf-8')
except:
try:
return text.decode(sys.getdefaultencoding())
except:
pass
try:
return str(text)
except:
return text.decode('utf-8', 'replace')
open = open
basestring = (str, bytes)
try:
from importlib import import_module
except ImportError: # pragma: nocover
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import.
It specifies the package to use as the anchor point from which to
resolve the relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' "
"argument")
level = 0
for character in name:
if character != '.':
break
|
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
try:
from .packages import simplejson as json
except (ImportError, SyntaxError): # pragma: nocover
import json
class Popen(subprocess.Popen):
"""Patched Popen to prevent opening cmd window on Windows platform."""
def __init__(self, *args, **kwargs):
|
startupinfo = kwargs.get('startupinfo')
if is_win or True:
try:
startupinfo = startupinfo or subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
except AttributeError:
pass
kwargs['startupinfo'] = startupinfo
if 'env' not in kwargs:
kwargs['env'] = os.environ.copy()
kwargs['env']['LANG'] = 'en-US' if is_win else 'en_US.UTF-8'
subprocess.Popen.__init__(self, *args, **kwargs)
|
swastvedt/dailygraphics
|
fabfile/utils.py
|
Python
|
mit
| 813
| 0.00369
|
#!/usr/bin/env python
import boto
from boto.s3.connection import OrdinaryCallingFormat
from fabric.api import prompt
def confirm(message):
"""
Verify a users
|
intentions.
"""
answer = prompt(message, default="Not at all")
if answer.lower() not in ('y', 'yes', 'buzz off', 'screw you'):
exit()
def replace_in_file(filena
|
me, find, replace):
with open(filename, 'r') as f:
contents = f.read()
contents = contents.replace(find, replace)
with open(filename, 'w') as f:
f.write(contents)
def get_bucket(bucket_name):
"""
Established a connection and gets s3 bucket
"""
if '.' in bucket_name:
s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())
else:
s3 = boto.connect_s3()
return s3.get_bucket(bucket_name)
|
82Flex/DCRM
|
WEIPDCRM/views/admin/help/about.py
|
Python
|
agpl-3.0
| 1,308
| 0
|
# coding=utf-8
"""
DCRM - Darwin Cydia Repository Manager
Copyright (C) 2017 WU Zheng <i.82@me.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from d
|
jango.contrib import admin
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import render
from django.utils.translation import ugettext as _
@staff_
|
member_required
def about_view(request):
"""
:param request: Django Request
:return: Django HttpResponse
:rtype: HttpResponse
"""
context = admin.site.each_context(request)
context.update({
'title': _('About'),
'version': "4.1",
})
template = 'admin/help/about.html'
return render(request, template, context)
|
google/active-qa
|
px/nmt/model_test.py
|
Python
|
apache-2.0
| 70,985
| 0.005776
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pprint
import shutil
import sys
import tempfile
import numpy as np
import tensorflow as tf
from px.nmt import attention_model
from px.nmt import gnmt_model
from px.nmt import model
from px.nmt.utils import common_test_utils
from px.nmt.utils import nmt_utils
from px.nmt.utils import trie_decoder_utils
float32 = np.float32
int32 = np.int32
array = np.array
class ModelTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
cls.actual_vars_values = {}
cls.expected_vars_values = {
'AttentionMechanismBahdanau/att_layer_weight/shape': (10, 5),
'AttentionMechanismBahdanau/att_layer_weight/sum':
-0.64981574,
'AttentionMechanismBahdanau/last_dec_weight/shape': (10, 20),
'AttentionMechanismBahdanau/last_dec_weight/sum':
0.058069646,
'AttentionMechanismBahdanau/last_enc_weight/shape': (10, 20),
'AttentionMechanismBahdanau/last_enc_weight/sum':
0.058028102,
'AttentionMechanismLuong/att_layer_weight/shape': (10, 5),
'AttentionMechanismLuong/att_layer_weight/sum':
-0.64981574,
'AttentionMechanismLuong/last_dec_weight/shape': (10, 20),
'AttentionMechanismLuong/last_dec_weight/sum':
0.058069646,
'AttentionMechanismLuong/last_enc_weight/shape': (10, 20),
'AttentionMechanismLuong/last_enc_weight/sum':
0.058028102,
'AttentionMechanismNormedBahdanau/att_layer_weight/shape': (10, 5),
'AttentionMechanismNormedBahdanau/att_layer_weight/sum':
-0.64981973,
'AttentionMechanismNormedBahdanau/last_dec_weight/shape': (10, 20),
'AttentionMechanismNormedBahdanau/last_dec_weight/sum':
0.058067322,
'AttentionMechanismNormedBahdanau/last_enc_weight/shape': (10, 20),
'AttentionMechanismNormedBahdanau/last_enc_weight/sum':
0.058022559,
'AttentionMechanismScaledLuong/att_layer_weight/shape': (10, 5),
'AttentionMechanismScaledLuong/att_layer_weight/sum':
-0.64981574,
'AttentionMechanismScaledLuong/last_dec_weight/shape': (10, 20),
'AttentionMechanismScaledLuong/last_dec_weight/sum':
0.058069646,
'AttentionMechanismScaledLuong/last_enc_weight/shape': (10, 20),
'AttentionMechanismScaledLuong/last_enc_weight/sum':
0.058028102,
'ContextModel_context_bilstm_last/ctx_resizer/shape': (10, 5),
'ContextModel_context_bilstm_last/ctx_resizer/sum':
-0.64984089,
'ContextModel_context_bilstm_pool/ctx_resizer/shape': (10, 5),
'ContextModel_
|
context_bilstm_pool/ctx_resizer/sum':
-0.64984130,
'ContextModel_bilstm_last_decoder_hidden_state/last_dec_weight/shape':
(10, 20),
'ContextModel_bilstm_last_decoder_hidden_state/last_dec_weight/sum':
0.058056116,
'ContextModel_bilstm_last_decoder_hidden_state/last_enc_weight/shape':
(10, 20),
'ContextModel_bilstm_last_decoder_hidden_state/last_enc_weight/sum':
0.058025479,
'Context
|
Model_bilstm_pool_encoder_output/last_dec_weight/shape': (10,
20),
'ContextModel_bilstm_pool_encoder_output/last_dec_weight/sum':
0.058035135,
'ContextModel_bilstm_pool_encoder_output/last_enc_weight/shape': (10,
20),
'ContextModel_bilstm_pool_encoder_output/last_enc_weight/sum':
0.058024108,
'GNMTModel_gnmt/last_dec_weight/shape': (15, 20),
'GNMTModel_gnmt/last_dec_weight/sum':
-0.48634407,
'GNMTModel_gnmt/last_enc_weight/shape': (10, 20),
'GNMTModel_gnmt/last_enc_weight/sum':
0.058025002,
'GNMTModel_gnmt/mem_layer_weight/shape': (5, 5),
'GNMTModel_gnmt/mem_layer_weight/sum':
-0.44815454,
'GNMTModel_gnmt_v2/last_dec_weight/shape': (15, 20),
'GNMTModel_gnmt_v2/last_dec_weight/sum':
-0.48634392,
'GNMTModel_gnmt_v2/last_enc_weight/shape': (10, 20),
'GNMTModel_gnmt_v2/last_enc_weight/sum':
0.058024824,
'GNMTModel_gnmt_v2/mem_layer_weight/shape': (5, 5),
'GNMTModel_gnmt_v2/mem_layer_weight/sum':
-0.44815454,
'NoAttentionNoResidualUniEncoder/last_dec_weight/shape': (10, 20),
'NoAttentionNoResidualUniEncoder/last_dec_weight/sum':
0.057424068,
'NoAttentionNoResidualUniEncoder/last_enc_weight/shape': (10, 20),
'NoAttentionNoResidualUniEncoder/last_enc_weight/sum':
0.058453858,
'NoAttentionResidualBiEncoder/last_dec_weight/shape': (10, 20),
'NoAttentionResidualBiEncoder/last_dec_weight/sum':
0.058025062,
'NoAttentionResidualBiEncoder/last_enc_weight/shape': (10, 20),
'NoAttentionResidualBiEncoder/last_enc_weight/sum':
0.058053195,
'UniEncoderBottomAttentionArchitecture/last_dec_weight/shape': (10, 20),
'UniEncoderBottomAttentionArchitecture/last_dec_weight/sum':
0.058024943,
'UniEncoderBottomAttentionArchitecture/last_enc_weight/shape': (10, 20),
'UniEncoderBottomAttentionArchitecture/last_enc_weight/sum':
0.058025122,
'UniEncoderBottomAttentionArchitecture/mem_layer_weight/shape': (5, 5),
'UniEncoderBottomAttentionArchitecture/mem_layer_weight/sum':
-0.44815454,
'UniEncoderStandardAttentionArchitecture/last_dec_weight/shape': (10,
20),
'UniEncoderStandardAttentionArchitecture/last_dec_weight/sum':
0.058025002,
'UniEncoderStandardAttentionArchitecture/last_enc_weight/shape': (10,
20),
'UniEncoderStandardAttentionArchitecture/last_enc_weight/sum':
0.058024883,
'UniEncoderStandardAttentionArchitecture/mem_layer_weight/shape': (5,
5),
'UniEncoderStandardAttentionArchitecture/mem_layer_weight/sum':
-0.44815454,
}
cls.actual_train_values = {}
cls.expected_train_values = {
'AttentionMechanismBahdanau/loss': 8.8519039,
'AttentionMechanismLuong/loss': 8.8519039,
'AttentionMechanismNormedBahdanau/loss': 8.851902,
'AttentionMechanismScaledLuong/loss': 8.8519039,
'ContextModel_bilstm_last_decoder_hidden_state/loss': 8.8519096,
'ContextModel_bilstm_pool_encoder_output/loss': 8.8519124,
'GNMTModel_gnmt/loss': 8.8519087,
'GNMTModel_gnmt_v2/loss': 8.8519087,
'NoAttentionNoResidualUniEncoder/loss': 8.8516064,
'NoAttentionResidualBiEncoder/loss': 8.851984,
'UniEncoderStandardAttentionArchitecture/loss': 8.8519087,
'InitializerGlorotNormal/loss': 8.9779415,
'InitializerGlorotUniform/loss': 8.7643699,
}
cls.actual_eval_values = {}
cls.expected_eval_values = {
'AttentionMechanismBahdanau/loss': 8.8517132,
'AttentionMechanismBahdanau/predict_count': 11.0,
'AttentionMechanismLuong/loss': 8.8517132,
|
dbentley/pants
|
src/python/pants/goal/goal.py
|
Python
|
apache-2.0
| 8,127
| 0.010336
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.goal.error import GoalError
class Goal(object):
"""Factory for objects representing goals.
Ensures that we have exactly one instance per goal name.
:API: public
"""
_goal_by_name = dict()
def __new__(cls, *args, **kwargs):
raise TypeError('Do not instantiate {0}. Call by_name() instead.'.format(cls))
@classmethod
def register(cls, name, description):
"""Register a goal description.
Otherwise the description must be set when registering some task on the goal,
which is clunky, and dependent on things like registration order of tasks in the goal.
A goal that isn't explicitly registered with a description will fall back to the description
of the task in that goal with the same name (if any). So singleton goals (e.g., 'clean-all')
need not be registered explicitly. This method is primarily useful for setting a
description on a generic goal like 'compile' or 'test', that multiple backends will
register tasks on.
:API: public
"""
cls.by_name(name)._description = description
@classmethod
def by_name(cls, name):
"""Returns the unique object representing the goal of the specified name.
:API: public
"""
if name not in cls._goal_by_name:
cls._goal_by_name[name] = _Goal(name)
return cls._goal_by_name[name]
@classmethod
def clear(cls):
"""Remove all goals and tasks.
This method is EXCLUSIVELY for use in tests and during pantsd startup.
:API: public
"""
cls._goal_by_name.clear()
@staticmethod
def scope(goal_name, task_name):
"""Returns options scope for specified task in specified goal.
:API: public
"""
return goal_name if goal_name == task_name else '{0}.{1}'.format(goal_name, task_name)
@staticmethod
def all():
"""Returns all registered goals, sorted alphabetically by name.
:API: public
"""
return [pair[1] for pair in sorted(Goal._goal_by_name.items())]
@classmethod
def subsystems(cls):
"""Returns all subsystem types used by all tasks, in no particular order.
:API: public
"""
ret = set()
for goal in cls.all():
ret.update(goal.subsystems())
return ret
class _Goal(object):
def __init__(self, name):
"""Don't call this directly.
Create goals only through the Goal.by_name() factory.
"""
self.name = name
self._description = ''
self.serialize = False
self._task_type_by_name = {} # name -> Task subclass.
self._ordered_task_names = [] # The task names, in the order imposed by registration.
@property
def description(self):
if self._description:
return self._description
# Return the docstring for the Task registered under the same name as this goal, if any.
# This is a very common case, and therefore a useful idiom.
namesake_task = self._task_type_by_name.get(self.name)
if namesake_task and namesake_task.__doc__:
# First line of docstring.
# TODO: This is repetitive of Optionable.get_description(). We should probably just
# make Goal an Optionable, for uniformity.
return namesake_task.__doc__.partition('\n')[0].strip()
return ''
def register_options(self, options):
for task_type in sorted(self.task_types(), key=lambda cls: cls.options_scope):
task_type.register_options_on_scope(options)
def install(self, task_registrar, first=False, replace=False, before=None, after=None):
"""Installs the given task in this goal.
The placement of the task in this goal's execution list defaults to the end but its position
can be influenced by specifying exactly one of the following arguments:
first: Places the task 1st in the execution list.
replace: Removes all existing tasks in this goal and installs this task.
before: Places the task before the named task in the execution list.
after: Places the task after the named task in the execution list.
"""
if [bool(place) for place in [first, replace, before, after]].count(True) > 1:
raise GoalError('Can only specify one of first, replace, before or after')
task_name = task_registrar.name
options_scope = Goal.scope(self.name, task_name)
# Currently we need to support registering the same task type multiple times in different
# scopes. However we still want to have each task class know the options scope it was
# registered in. So we create a synthetic subclass here.
# TODO(benjy): Revisit this when we revisit the task lifecycle. We probably want to have
# a task *instance* know its scope, but this means converting option registration from
# a class method to an instance method, and instantiating the task much sooner in the
# lifecycle.
superclass = task_registrar.task_type
subclass_name = b'{0}_{1}'.format(superclass.__name__,
options_scope.replace('.', '_').replace('-', '_'))
task_type = type(subclass_name, (superclass,), {
'__doc__': superclass.__doc__,
'__module__': superclass.__module__,
'options_scope': options_scope,
'_stable_name': superclass.stable_name()
})
otn = self._ordered_task_names
if replace:
for tt in self.task_types():
tt.options_scope = None
del otn[:]
self._task_type_by_name = {}
if first:
otn.insert(0, task_name)
elif before in otn:
otn.insert(otn.index(before), task_name)
elif after in otn:
otn.insert(otn.index(after) + 1, task_name)
else:
otn.append(task_name)
self._task_type_by_name[task_name] = task_type
if task_registrar.serialize:
self.serialize = True
return self
def uninstall_task(self, name):
"""Removes the named task from this goal.
Allows external plugins to modify the execution plan. Use with caution.
Note: Does not relax a serialization requirement that originated
from the uninstalled task's install() call.
"""
if name in self._task_type_by_name:
self._task_type_by_name[name].options_scope = None
del self._task_type_by_name[name]
self._ordered_task_names = [x for x in self._ordered_task_names if x != name]
else:
raise GoalError('Cannot uninstall unknown task: {0}'.format(name))
def known_scope_infos(self):
"""Yields ScopeInfos for all known scopes under this goal."""
# Note that we don't yield the goal's own scope. We don't need it (as we don't register
# options on it), and it's needlessly confusing when a task has the same name as its goal,
# in which case we shorten its scope to the goal's scope (e.g., idea.idea -> idea).
for task_type in self.task_types():
for scope_info in task_type.known_scope_infos():
yield scope_info
def subsystems(self):
"""Returns all subsystem
|
types used by tasks in this goal, in no particular order."""
ret = set()
for task_type in self.task_types():
ret.update([dep.subsystem_cls for dep in task_type.subsystem_dependencies_iter()])
return ret
def ordered_task_names(self):
"""The task names in this goal, in registration
|
order."""
return self._ordered_task_names
def task_type_by_name(self, name):
"""The task type registered under the given name."""
return self._task_type_by_name[name]
def task_types(self):
"""Returns the task types in this goal, unordered."""
return self._task_type_by_name.values()
def task_items(self):
for name, task_type in self._task_type_by_name.items():
yield name, task_type
def has_task_of_type(self, typ):
"""Returns True if this goal has a task of the given type (or a subtype of it)."""
for task_type in self.task_types():
if issubclass(task_type, typ):
return True
return False
def __repr__(self):
return self.name
|
UFCGProjects/sig
|
src/tests/FugaRotaTest.py
|
Python
|
mit
| 1,680
| 0.008333
|
import psycopg2
import unittest
import sys
import os
class GFugaRotaTest(unittest.TestCase):
def setUp(self):
self.table = open(os.path.abspath('../') + '/sql/createsTable/FugaRota.sql', 'r')
self.constraints = open(os.path.abspath('../') + '/sql/createsTable/FugaRota_const.sql', 'r')
self.insert = open(os.path.abspath('../') + '/sql/inserts/Horarios_inserts.sql', 'r')
self.falho = open(os.path.abspath('../') + '/sql/inserts/FugaRota_inserts_falhos.sql', 'r')
self.FugaRota = self.table.read()
self.cons = self.constraints.read()
self.inserts = self.insert.readlines()
self.falhos = self.falho.readlines()
self.table.close()
self.constraints.close()
self.insert.close()
self.falho.close()
conn = psycopg2.connect("dbname=teste user=postgres")
conn.set_isolation_level(0) # set autocommit
self.cur = conn.cursor()
def tearDown(self):
self.cur.close()
def testBCreateTable(self):
self.cur.execute(self.FugaRota)
self.assertEqual(self.cur.statusmessage, "CREATE TABLE")
def testCCo
|
nstraints(self):
self.cur.execute(self.cons)
self.assertEqual(self.cur.st
|
atusmessage, "ALTER TABLE")
def testDInsertTable(self):
for self.dados in self.inserts:
self.cur.execute(self.dados)
self.assertEqual(self.cur.statusmessage, "INSERT 0 1")
def testEInsertTableFalhos(self):
for self.dadosFalhos in self.falhos:
try:
self.cur.execute(self.dadosFalhos)
except:
self.assertTrue(True)
|
uclouvain/osis
|
infrastructure/tests/shared_kernel/academic_year/repository/in_memory/test_academic_year.py
|
Python
|
agpl-3.0
| 2,533
| 0.001975
|
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2022 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import datetime
from django.test import SimpleTestCase
from ddd.logic.shared_kernel.academic_year.
|
domain.model.academic_year import AcademicYear, AcademicYearIdentity
from infrastructure.shared_kernel.academic_year.repository.in_memory.academic_year import AcademicYearInMemoryRepository
class TestAcademicYearInMemory
|
Repository(SimpleTestCase):
def setUp(self):
self.academic_year_repository = AcademicYearInMemoryRepository()
for annee in range(2016, 2021):
self.academic_year_repository.save(AcademicYear(
entity_id=AcademicYearIdentity(year=annee),
start_date=datetime.date(annee, 9, 15),
end_date=datetime.date(annee+1, 9, 30),
))
def test_search_should_return_specific_academic_years_if_specified_year(self):
years = self.academic_year_repository.search(from_year=2018)
self.assertEqual(len(years), 3)
for index, annee in enumerate(range(2018, 2021)):
self.assertEqual(years[index].year, annee)
def test_search_should_return_all_academic_years_if_not_specified_year(self):
years = self.academic_year_repository.search()
self.assertEqual(len(years), 5)
for index, annee in enumerate(range(2016, 2021)):
self.assertEqual(years[index].year, annee)
|
NeCTAR-RC/horizon
|
openstack_dashboard/test/integration_tests/tests/test_defaults.py
|
Python
|
apache-2.0
| 3,108
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
class TestDefaults(helpers.AdminTestCase):
def setUp(self):
super(TestDefaults, self).setUp()
self.defaults_page = self.home_pg.go_to_admin_system_defaultspage()
self.add_up = random.randint(1, 10)
def test_update_compute_defaults(self):
"""Tests the Update Default Compute Quotas functionality:
1) Login as Admin and go to Admin > System > Defaults
2) Updates default compute Quotas by adding a random
number between 1 and 10
3) Verifies that the updated values are present in the
Compute Quota Defaults table
"""
default_quota_values = self.defaults_page.compute_quota_values
self.defaults_page.update_compute_defaults(self.add_up)
self.assertTrue(
self.defaults_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
self.defaults_page.find_message_and_dismiss(messages.ERROR))
self.assertGreater(len(default_quota_values), 0)
for quota_name in default_quota_values:
self.assertTrue(
self.defaults_page.is_compute_quota_a_match(
quota_name,
default_quota_values[quota_name] + self.add_up
))
def test_update_volume_defaults(self):
"""Tests the Update Default Volume Quotas functionality:
1) Login as Admin and go to Admin > System > Defaults
2) Clicks on Volume Quotas tab
3) Updates default volume Quotas by adding a random
number between 1 and 10
4) Verifies that the updated values are present in the
Volume Quota Defaults table
"""
self.defaults_page.go_to_volume_quotas_tab()
default_quota_values = self.defaults_page.volume_quota_values
self.defaults_page.update_volume_defaults(self.add_up)
self.assertTrue(
self.defaults_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
self.defaults_page.find_message_and_dismiss(messages.ERROR))
self.ass
|
ertGreater(len(default_quota_values), 0)
for quota_name in default_quota_values:
self.assertTrue(
self.defaults_p
|
age.is_volume_quota_a_match(
quota_name,
default_quota_values[quota_name] + self.add_up
))
|
mcmaxwell/idea_digital_agency
|
idea/feincms/module/page/migrations/0010_auto_20180124_1945.py
|
Python
|
mit
| 612
| 0.001634
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2018-01-24 19:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('page', '0009_auto_20180124_0105'),
|
]
operations = [
migrations.AlterField(
model_name='page',
name='template_key',
field=models.CharField(choices=[(b'content/pages/page.html', 'Pa
|
ge'), (b'content/pages/index_page.html', 'Index Page')], default=b'content/pages/page.html', max_length=255, verbose_name='template'),
),
]
|
yephper/django
|
django/views/generic/base.py
|
Python
|
bsd-3-clause
| 7,898
| 0.001013
|
from __future__ import unicode_literals
import logging
from functools import update_wrapper
from django import http
from django.core.exceptions import ImproperlyConfigured
from django.template.response import TemplateResponse
from django.urls import NoReverseMatch, reverse
from django.utils import six
from django.utils.decorators import classonlymethod
logger = logging.getLogger('django.request')
class ContextMixin(object):
"""
A default context mixin that passes the keyword arguments received by
get_context_data as the template context.
"""
def get_context_data(self, **kwargs):
if 'view' not in kwargs:
kwargs['view'] = self
return kwargs
class View(object):
"""
Intentionally simple parent class for all views. Only implements
dispatch-by-method and simple sanity checking.
"""
http_method_names = ['get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace']
def __init__(self, **kwargs):
"""
Constructor. Called in the URLconf; can contain helpful extra
keyword arguments, and other things.
"""
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classonlymethod
def as_view(cls, **initkwargs):
"""
Main entry point for a request-response process.
"""
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError("You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r. as_view "
"only accepts arguments that are already "
"attributes of the class." % (cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
self.request = request
self.args = args
self.kwargs = kwargs
return self.dispatch(request, *args, **kwargs)
view.view_class = cls
view.view_initkwargs = initkwargs
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
def http_method_not_allowed(self, request, *args, **kwargs):
logger.warning('Method Not Allowed (%s): %s', request.method, request.path,
extra={
'status_code': 405,
'request': request
}
)
return http.HttpResponseNotAllowed(self._allowed_methods())
def options(self, request, *args, **kwargs):
"""
Handles responding to requests for the OPTIONS HTTP verb.
"""
response = http.HttpResponse()
response['Allow'] = ', '.join(self._allowed_methods())
response['Content-Length'] = '0'
return response
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
class TemplateResponseMixin(object):
"""
A mixin that can be used to render a template.
"""
template_name = None
template_engine = None
response_class = TemplateResponse
content_type = None
def render_to_response(self, context, **response_kwargs):
"""
Returns a response, using the `response_class` for this
view, with a template
|
rendered w
|
ith the given context.
If any keyword arguments are provided, they will be
passed to the constructor of the response class.
"""
response_kwargs.setdefault('content_type', self.content_type)
return self.response_class(
request=self.request,
template=self.get_template_names(),
context=context,
using=self.template_engine,
**response_kwargs
)
def get_template_names(self):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
if self.template_name is None:
raise ImproperlyConfigured(
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'")
else:
return [self.template_name]
class TemplateView(TemplateResponseMixin, ContextMixin, View):
"""
A view that renders a template. This view will also pass into the context
any keyword arguments passed by the URLconf.
"""
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class RedirectView(View):
"""
A view that provides a redirect on any GET request.
"""
permanent = False
url = None
pattern_name = None
query_string = False
def get_redirect_url(self, *args, **kwargs):
"""
Return the URL redirect to. Keyword arguments from the
URL pattern match generating the redirect request
are provided as kwargs to this method.
"""
if self.url:
url = self.url % kwargs
elif self.pattern_name:
try:
url = reverse(self.pattern_name, args=args, kwargs=kwargs)
except NoReverseMatch:
return None
else:
return None
args = self.request.META.get('QUERY_STRING', '')
if args and self.query_string:
url = "%s?%s" % (url, args)
return url
def get(self, request, *args, **kwargs):
url = self.get_redirect_url(*args, **kwargs)
if url:
if self.permanent:
return http.HttpResponsePermanentRedirect(url)
else:
return http.HttpResponseRedirect(url)
else:
logger.warning('Gone: %s', request.path,
extra={
'status_code': 410,
'request': request
})
return http.HttpResponseGone()
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def options(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
|
gh0std4ncer/doit
|
doc/tutorial/uptodate_callable.py
|
Python
|
mit
| 279
| 0.003584
|
def fake_get_value_from_db():
|
return 5
def check_outdated():
total = fake_get_value_from_db()
return total > 10
def task_put_more_stuff_in_db():
def put_stuff(): pass
return {'actions': [put_stuff],
'uptodate': [check
|
_outdated],
}
|
MSFTOSSMgmt/WPSDSCLinux
|
Providers/Scripts/2.6x-2.7x/Scripts/nxIPAddress.py
|
Python
|
mit
| 30,909
| 0.027727
|
#!/usr/bin/env python
#============================================================================
# Copyright (c) Microsoft Corporation. All rights reserved. See license.txt for license information.
#============================================================================
from __future__ import print_function
from __future__ import with_statement
import os
import sys
import tempfile
import re
import platform
import imp
import socket
protocol=imp.load_source('protocol','../protocol.py')
"""
MOF:
[ClassVersion("1.0.0"), FriendlyName("nxIPAddress")]
class MSFT_nxIPAddress : OMI_BaseResource
{
[write] string IPAddress;
[Key] string InterfaceName;
[write,ValueMap{"Automatic", "Static"},Values{"Automatic", "Static"}] string BootProtocol;
[write] string DefaultGateway;
[write,ValueMap{"Present", "Absent"}, Values{"Present", "Absent"}] string Ensure;
[write] integer PrefixLength;
[Key,write,ValueMap{"IPv4", "IPv6"},Values{"IPv4", "IPv6"}] string AddressFamily;
};
"""
def ValidateAddresses(IPAddress,AddressFamily,PrefixLength):
if 'IPv4' in AddressFamily:
ptype=socket.AF_INET
elif 'IPv6' in AddressFamily:
ptype=socket.AF_INET6
else:
return False
try:
socket.inet_pton(ptype,IPAddress)
except:
print('Error: IPAddress "'+IPAddress+'" is invalid.',file=sys.stderr)
return False
if type(PrefixLength) == int or type(PrefixLength) == long :
if 'IPv4' in AddressFamily and ( PrefixLength < 0 or PrefixLength > 32) :
print('Error: PrefixLength "'+ str(PrefixLength) +'" is invalid. Values are 0-32.',file=sys.stderr)
return False
if 'IPv6' in AddressFamily and ( PrefixLength < 0 or PrefixLength > 128) :
print('Error: PrefixLength "'+ str(PrefixLength) +'" is invalid. Values are 0-128.',file=sys.stderr)
return False
return True
def bitNetmaskConversion(PrefixLength):
if PrefixLength == '':
return ''
if type(PrefixLength) != long and type(PrefixLength) != int :
N = int(PrefixLength)
else :
N = PrefixLength
M = int(N / 8) #number of 255 sections (full octets)
MASK = 255
netmaskIP = ""
count = 0
while count < M:
netmaskIP = netmaskIP + "255."
count += 1
if N % 8 != 0:
netmaskIP += str((MASK << (8 - N%8)) & MASK) + "."
count += 1
while count < 4:
netmaskIP = netmaskIP + "0."
count += 1
if netmaskIP[-1] == ".":
netmaskIP = netmaskIP[:-1]
return netmaskIP
def netmaskBitConversion(netmask):
if netmask==None or netmask=='' :
return 0
arrTmp = netmask.strip("'")
arr = arrTmp.split(".")
sumT = 0
for i in arr:
i = int(i)
if i == 255:
sumT += 8
else:
j = 0
while j < 8:
sumT += (i >> j) & 1
j+=1
return sumT
def init_vars(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
if PrefixLength == None:
PrefixLength=''
if BootProtocol == None or len(BootProtocol)<1:
BootProtocol='Automatic'
else :
BootProtocol=BootProtocol[0].upper()+BootProtocol[1:].lower()
if Ensure == None or len(Ensure)<1:
Ensure='Present'
else :
Ensure=Ensure[0].upper()+Ensure[1:].lower()
if AddressFamily == None or len(AddressFamily)<1:
AddressFamily='IPv4'
else :
AddressFamily=AddressFamily[0].upper()+AddressFamily[1].upper()+AddressFamily[2].lower()+AddressFamily[3:]
if IPAddress == None:
IPAddress=''
if len(IPAddress)>0:
if ValidateAddresses(IPAddress,AddressFamily,PrefixLength) == False:
return False,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily
elif BootProtocol != 'Automatic' and Ensure == 'Present':
print('ERROR: BootProtocol != Automatic. IPAdress is required.',file=sys.stdout)
return False,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily
if DefaultGateway == None:
DefaultGateway=''
if len(DefaultGateway) > 0 and ValidateAddresses(DefaultGateway,AddressFamily,'') == False:
return False,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily
return True,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily
def Set_Marshall(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
ret,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily = \
init_vars(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
if ret is False :
return [-1]
MyDistro=GetMyDistro()
retval = MyDistro.Set(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
return retval
def Test_Marshall(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
ret,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily = \
init_vars(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
if ret is False :
return [-1]
MyDistro=GetMyDistro()
return MyDistro.Test(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
def Get_Marshall(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily):
arg_names=list(locals().keys())
ret,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily = \
init_vars(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
if ret is False :
return [-1,IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily]
retval = 0
MyDistro=GetMyDistro()
(retval, IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily) = MyDistro.Get(IPAddress,InterfaceName,BootProtocol,DefaultGateway,Ensure,PrefixLength,AddressFamily)
Ensure = protocol.MI_String(Ensure.encode("utf-8"))
IPAddress = protocol.MI_String(IPAddress.encode("utf-8"))
AddressFamily= protocol.MI_String(AddressFamily.encode("utf-8"))
InterfaceName = protocol.MI_String(InterfaceName.encode("utf-8"))
BootProtocol = protocol.MI_String(BootProtocol.encode("utf-8"))
DefaultGateway = protocol.MI_String(DefaultGateway.encode("utf-8"))
if type(PrefixLength) == int or type(PrefixLength) == long :
PrefixLength=protocol.MI_Uint32(PrefixLength)
else:
PrefixLength=protocol.MI_Uint32(int(PrefixLength))
retd={}
ld=locals()
for k in arg_names :
retd[k]=ld[k]
return retval, retd
def ReplaceFileContentsAtomic(filepath, contents):
"""
Write 'contents' to 'filepath' by creating a temp file, and replacing original.
"""
handle, temp = tempfile.mkstemp(dir = os.path.dirname(filepath))
if type(contents) == str :
contents=contents.encode('latin-1')
try:
os.write(handle, contents)
except IOError, e:
print('ReplaceFileContentsAtomic','Writing to file ' + filepath + ' Exception is ' + str(e),file=sys.stderr)
return None
finally:
os.close(handle)
try:
os.rename(temp, filepath)
return None
except IOError, e:
print('ReplaceFileContentsAtomic','Renaming ' + temp+ ' to ' + filepath + ' Exception is ' +str(e),file=sys.stderr)
try:
os.remove(filepath)
except IOError, e:
print('ReplaceFileContentsAtomic','Removing '+ filepath + '
|
Exception is ' +str(e),file=sys.stderr)
try:
os.rename(temp,filepath)
except IOError, e:
print('ReplaceFileContentsAtomic','Removing '+ filepath + ' Exception is ' +str(e),file=sys.stderr)
return 1
return 0
def GetMyDistro(dist_class_name=
|
''):
"""
Return MyDistro object.
NOTE: Logging is not initialized at this point.
"""
if dist_class_name == '':
|
PetePriority/home-assistant
|
homeassistant/components/wink/fan.py
|
Python
|
apache-2.0
| 3,231
| 0
|
"""
Support for Wink fans.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/fan.wink/
"""
import logging
from homeassistant.components.fan import (
SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, SUPPORT_DIRECTION,
SUPPORT_SET_SPEED, FanEntity)
from homeassistant.components.wink import DOMAIN, WinkDevice
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['wink']
SPEED_AUTO = 'auto'
SPEED_LOWEST = 'lowest'
SUPPORTED_FEATURES = SUPPORT_DIRECTION + SUPPORT_SET_SPEED
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink platform."""
import pywink
for fan in pywink.get_fans():
if fan.object_id() + fan.name() not in hass.data[DOMAIN]['unique_ids']:
add_entities([WinkFanDevice(fan, hass)])
class WinkFanDevice(WinkDevice, FanEntity):
"""Representation of a Wink fan."""
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]['entities']['fan'].append(self)
def set_direction(self, direction: str) -> None:
"""Set the direction of the fan."""
self.wink.set_fan_direction(direction)
def set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
self.wink.set_state(True, speed)
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn on the fan."""
self.wink.set_state(True, speed)
def turn_off(self, **kwargs) -> None:
"""Turn off the fan."""
self.wink.set_state(False)
@property
def is_on(self):
"""Return true if the entity is on."""
return self.wink.state()
@property
def speed(self) -> str:
"""Return the current speed."""
current_wink_speed = self.wink.current_fan_speed()
if SPEED_AUTO == current_wink_speed:
return SPEED_AUTO
|
if SPEED_LOWEST == current_wink_speed:
return SPEED_LOWEST
if SPEED_LOW == current_wink_speed:
return SPEED_LOW
if SPEED_MEDIUM == current_wink_speed:
return SPEED_MEDIUM
if SPEED_HIGH == current_wink_speed:
return SPEED_HIGH
|
return None
@property
def current_direction(self):
"""Return direction of the fan [forward, reverse]."""
return self.wink.current_fan_direction()
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
wink_supported_speeds = self.wink.fan_speeds()
supported_speeds = []
if SPEED_AUTO in wink_supported_speeds:
supported_speeds.append(SPEED_AUTO)
if SPEED_LOWEST in wink_supported_speeds:
supported_speeds.append(SPEED_LOWEST)
if SPEED_LOW in wink_supported_speeds:
supported_speeds.append(SPEED_LOW)
if SPEED_MEDIUM in wink_supported_speeds:
supported_speeds.append(SPEED_MEDIUM)
if SPEED_HIGH in wink_supported_speeds:
supported_speeds.append(SPEED_HIGH)
return supported_speeds
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORTED_FEATURES
|
google-research/sound-separation
|
models/train/data_meeting_io.py
|
Python
|
apache-2.0
| 23,738
| 0.007162
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow input/output utilities."""
import collections
import json
import math
import os
import numpy as np
import tensorflow.compat.v1 as tf
class Features(object):
"""Feature keys."""
# Waveform(s) of audio observed at receiver(s).
RECEIVER_AUDIO = 'receiver_audio'
# Images of each source at each microphone, including reverberation.
# Images are real valued with shape [sources, microphones, length].
SOURCE_IMAGES = 'source_images'
# Boolean diarization labels of shape (sources, length) which indicates
# whether a source is active or not. For nonexisting source, it is all zeros.
DIARIZATION_LABELS = 'diarization_labels'
# Speaker indices (global indices which are contiguous over all training data
# starting with 0) that are present in this meeting or meeting chunk with
# shape (sources,). If number of speakers present in the meeting is less
# than sources, for a non-existing speaker/source, the speaker index is
# set to -1. Note that, for a meeting sub-block, we still have all the
# speaker indices in the meeting even if not all the speakers are present
# in that meeting sub-block.
SPEAKER_INDEX = 'speaker_indices'
def get_inference_spec(num_receivers=1,
num_samples=None):
"""Returns a specification of features in tf.Examples in roomsim format."""
spec = {}
spec[Features.RECEIVER_AUDIO] = tf.FixedLenFeature(
[num_receivers, num_samples], tf.float32)
return spec
def get_roomsim_spec(num_sources,
num_receivers,
num_samples):
"""Returns a specification of features in tf.Examples in roomsim format.
Args:
num_sources: Expected number of sources.
num_receivers: Number of microphones in array.
num_samples: Expected length of sources in samples. 'None' for variable.
|
Returns:
Feature specifications suitable to pass to tf.parse_example.
"""
spec = {}
spec[Features.RECEIVER_AUDIO] = tf.FixedLenFeature(
[num_receivers, num_samples], tf.float32)
spec[Features.SOURCE_IMAGES] = tf.FixedLenFeature(
[num_sources, num_receivers, num_samples], tf.float32)
return spec
def placeholders_from_spec(feature_spec):
"""Returns placeholders compat
|
ible with a given feature spec."""
placeholders = {}
for key, feature in feature_spec.items():
placeholders[key] = tf.placeholder(dtype=feature.dtype,
shape=[1] + feature.shape,
name=key)
return placeholders
def _read_meeting_list(meeting_list, meeting_length_type):
"""Reads meeting list from json file to get necessary information.
Args:
meeting_list: A meeting list read from a json file.
meeting_length_type: One of 'maximum', 'minimum' or 'average'.
Since typically meeting lengths are not fixed, we can
set the training/eval length to the maximum, minimum or average meeting
length in the json file based on the value of this argument. We
eventually pad or clip individual meetings to attain the desired constant
meeting length in our data reading pipeline.
Returns:
num_meetings: Number of meetings.
max_num_spk_per_meeting: Maximum number of speakers in a meeting.
max_num_utt_per_spk: Maximum number of utterances per speaker.
max_dia_seg_per_utt: Maximum diarization segments per utterance.
max_utt_length: Maximum utterance length.
meeting_length: Meeting length that will be used.
speaker_ids: A list of speaker ids that appear in meetings.
"""
max_num_spk_per_meeting = 0
max_num_utt_per_meeting = 0
meeting_lengths = []
speaker_id_to_count = collections.defaultdict(int)
num_meetings = len(meeting_list)
total_spk = 0
total_utt = 0
max_utt_length = 0
max_num_utt_per_spk = 0
max_dia_seg_per_utt = 0
for one_meeting in meeting_list:
sources_start_end = one_meeting['utterance_start_end']
meeting_length = int(one_meeting['duration'])
num_utt_in_meeting = len(sources_start_end)
max_num_utt_per_meeting = max(max_num_utt_per_meeting, num_utt_in_meeting)
utt2spk = []
spk2wavs = collections.defaultdict(list)
spk_utt_idx = collections.defaultdict(int)
for start, end, spkid, wav_path in sources_start_end:
max_utt_length = max(max_utt_length, end - start)
utt2spk.append(spkid)
spk2wavs[spkid].append(wav_path)
speaker_id_to_count[spkid] += 1
spk_utt_idx[spkid] += 1
diarization_info = \
one_meeting['diarization_label'][spkid][spk_utt_idx[spkid] - 1]
num_seg_in_utt = len(diarization_info)
max_dia_seg_per_utt = max(max_dia_seg_per_utt, num_seg_in_utt)
speakers_in_meeting = list(set(utt2spk))
num_spk = len(speakers_in_meeting)
for spkid in speakers_in_meeting:
max_num_utt_per_spk = max(max_num_utt_per_spk,
len(set(spk2wavs[spkid])))
max_num_spk_per_meeting = max(max_num_spk_per_meeting, num_spk)
total_spk += num_spk
total_utt += num_utt_in_meeting
meeting_lengths.append(meeting_length)
if meeting_length_type == 'maximum':
meeting_length = int(math.ceil(np.max(meeting_lengths)))
elif meeting_length_type == 'minimum':
meeting_length = int(math.floor(np.min(meeting_lengths)))
elif meeting_length_type == 'average':
meeting_length = int(round(np.mean(meeting_lengths)))
elif isinstance(meeting_length_type, int):
meeting_length = meeting_length_type
else:
raise ValueError(f'Unknown meeting_length_type={meeting_length_type}')
speaker_ids = sorted(speaker_id_to_count.keys())
tf.logging.info('Read %s meetings from json file.', num_meetings)
tf.logging.info('Average number of speakers per meeting = %f.',
total_spk / num_meetings)
tf.logging.info('Average number of utterances per speaker = %f.',
total_utt / total_spk)
return (num_meetings, max_num_spk_per_meeting, max_num_utt_per_spk,
max_dia_seg_per_utt, max_utt_length,
meeting_length, speaker_ids)
def _pad_mics_tf(signal, new_mics):
"""Pads new mic channels to an input tensor and returns the updated tensor.
Args:
signal: A tf.tensor of shape (input_mics, samples)
new_mics: The number of new mic channels to be added (integer scalar tensor)
Returns:
padded_signal: A tf.tensor of shape (input_mics + new_mics, samples)
"""
# Take first new_mics channels and shift them by 1 sample.
new_inputs = tf.roll(signal[:new_mics, :], shift=1, axis=-1)
# Add noise 1e-3 times the RMS value in the signal.
noise_scale = 1e-3 * tf.sqrt(tf.reduce_mean(tf.square(new_inputs)))
new_inputs += noise_scale * tf.random.normal(tf.shape(new_inputs))
return tf.concat((signal, new_inputs), axis=0)
def json_to_dataset(json_file,
batch_size,
parallel_readers=tf.data.experimental.AUTOTUNE,
randomize_order=False,
num_examples=-1,
prefetch_buffer_size=tf.data.experimental.AUTOTUNE,
shuffle_buffer_size=5,
repeat=True,
num_mics=1,
sample_rate=16000,
use_relative_path=True,
meeting_length_type='maximum',
num_meeting_subdivisions=1,
sensor_noise_range=(0.0, 0.0)):
r"""Fetches features from a dictionary and source .wav files.
Args:
json_file: A json file containing meeting information.
batch_size: The number of examples to read.
parallel_readers: Number of data
|
ganow/gq
|
gq/missing.py
|
Python
|
mit
| 788
| 0.002538
|
class MethodMissing(object):
def __getattr__(self, name):
try:
return self.__getattribute__(name)
except AttributeError:
def method(*args, **kw):
return self.method_missing(name, *args, **kw)
return method
def method_missing(self, name, *args, **kw):
raise AttributeError("%r object has no attribute %r" %
(self.__class__, name))
class ValMissing(object):
def __get
|
attr__(self, name):
try:
return self.__getattribute__(name)
except AttributeError:
|
return self.val_missing(name)
def val_missing(self, name):
raise AttributeError("%r object has no attribute %r" %
(self.__class__, name))
|
ilathid/ilathidEngine
|
engine/music.py
|
Python
|
epl-1.0
| 6,871
| 0.015282
|
#Music Class and support functions
import pygame
import parameters
from filemanager import filemanager
from pygame.locals import *
from pygame import *
from pygame.mixer import *
#Pygame Module for Music and Sound
pigmusic = None
currentStdMusic=None
currentMenuMusic=None
currentType = None
def initmusic():
global pigmusic
#Init pygame mixer and music
print "music init GO"
try:
if pygame.mixer and not pygame.mixer.get_init():
pygame.mixer.init()
if not pygame.mixer:
print 'Warning, sound disabled'
else:
pigmusic=pygame.mixer.music
except (pygame.error):
print 'Warning, unable to init music'
print "music init OUT ",pigmusic
def upmusic():
global pigmusic
if not pigmusic:
return
vol=pigmusic.get_volume()
if vol <= 0.9:
pigmusic.set_volume(vol+0.1)
def downmusic():
global pigmusic
if not pigmusic:
return
vol=pigmusic.get_volume()
if vol > 0.0:
pigmusic.set_volume(vol-0.1)
def stopmusic():
global pigmusic
if not pygame.mixer.get_init():
return
if not pigmusic:
return
if pigmusic.get_busy():
pigmusic.stop()
def setvolume(vol):
global pigmusic
pigmusic.set_volume(vol)
def getcurrentStdMusic():
global currentStdMusic
return currentStdMusic
def getcurrentMenuMusic():
global currentMenuMusic
return currentMenuMusic
def returtostdmusic():
#called when we want to force the music to play std music
cur=currentStdMusic
cur.playmusic()
class Music:
def __init__(self, name, filename, musictype='std', vol=0.5):
self._name=name
self._file=filename
self._type=musictype
self._vol=vol
def playmusic(self,loop=-1):
global pigmusic,currentStdMusic,currentMenuMusic,currentType
print "music play",self._file
if not pigmusic:
initmusic()
if self._type == 'std':
#print "music std type current is ",currentType
if not currentStdMusic:
#print "music std no currentStdMusic, we create it with ",self._file
currentStdMusic=self
#print "is pigmusic busy ? ",pigmusic.get_busy()
if pigmusic.get_busy():
#print "music std, music is busy"
if currentType == 'std':
#print "music std, currentType is std isn't it : ",currentType
if currentStdMusic.getfile()==self._file:
#print "music std, same music don't do anything"
return
else:
#print "music std, not the same we change, currentStdMusic=",self._file
currentStdMusic=self
#print "is pigmusic busy ? ",pigmusic.get_busy()
if pigmusic.get_busy():
print " music std, music is busy"
if currentType == 'std':
print " music std, currentType is std isn't it : ",currentType
if currentStdMusic.getfile()==self._file:
print " music std, same music don't do anything"
|
return
else:
print " music std, not the same we change, cu
|
rrentStdMusic=",self._file
currentStdMusic=self
else:
print " music std, current type is menu isn't it :", currentType ," so we change it to std\n"
#we change menu slide to standard slide
currentType='std'
else:
#print "music std, current type is menu isn't it :", currentType ," so we change it to std\n"
#we change menu slide to standard slide
currentType='std'
else:
#print "music std, music is not busy we start it"
currentType='std'
currentStdMusic=self
else:
#print "music menu type current is ",currentType
if not currentMenuMusic:
#print "music menu no currentMenuMusic, we create it with ",self._file
currentMenuMusic=self
if pigmusic.get_busy():
#print "music menu, music is busy"
if currentType == 'menu':
#print "music menu, currentType is menu isn't it : ",currentType
if currentMenuMusic.getfile()==self._file:
#print "music menu, same music don't do anything"
#return
pass
else:
#print "music menu, not the same we change, currentMenuMusic=",self._file
currentMenuMusic=self
if pigmusic.get_busy():
print " music menu, music is busy"
if currentType == 'menu':
print " music menu, currentType is menu isn't it : ",currentType
if currentMenuMusic.getfile()==self._file:
print " music menu, same music don't do anything"
return
else:
print " music menu, not the same we change, currentMenuMusic=",self._file
currentMenuMusic=self
else:
print " music menu, current type is std isn't it :", currentType ," so we change it to menu\n"
#we change standard slide to menu slide
currentType='menu'
else:
#print "music menu, current type is std isn't it :", currentType ," so we change it to menu\n"
#we change standard slide to menu slide
currentType='menu'
else:
#print "music menu ,music is not busy we start it"
currentType='menu'
currentMenuMusic=self
pigmusic.load(filemanager.find_music(self._file))
pigmusic.set_volume(self._vol)
pigmusic.play(loop)
def getfile(self):
return self._file
def getname(self):
return self._name
def stopmusic(self):
print "we stop music!!!!! ",self._file
global pigmusic
if not pigmusic:
return
if pigmusic.get_busy():
if self._type == 'std':
if currentStdMusic.getfile()==self._file:
pigmusic.stop()
else:
if currentMenuMusic.getfile()==self._file:
pigmusic.stop()
|
jwilder/nginx-proxy
|
test/test_multiple-hosts.py
|
Python
|
mit
| 562
| 0.003559
|
import pytest
def test_unknown_virtual_host_is_503(docker_compose, nginxproxy
|
):
r = nginxproxy.get("http://unknown.nginx-proxy.tld/port")
assert r.status_code == 503
def test_webA_is_forwarded(docker_compose, nginxproxy):
r = nginxproxy.get("http://webA.nginx-proxy.tld/port")
assert r.status_code == 200
assert r.text == "answer from port 81\n"
def test_webB_is_forwarded(docker_compose, nginxproxy):
r = nginxproxy.get("http://webB.nginx-proxy.tld/port")
assert r.sta
|
tus_code == 200
assert r.text == "answer from port 81\n"
|
andreas-kowasch/DomainSearch
|
DomainSearchViewer/additional/Scheduler.py
|
Python
|
bsd-2-clause
| 3,767
| 0.003186
|
# -*- coding: utf-8 -*-
"""
The scheduler is responsible for the module handling.
"""
import modules
from importlib import import_module
from additional.Logging import Logging
################################################################################
class Scheduler():
"""
This class instantiates the modules, takes care of the module's versions
and gets the module's select queries.
"""
# dictonary of instantiated modules
_instantiated_modules = {}
def __init__(self, db):
self._db = db
self._log = Logging(self.__class__.__name__).get_logger()
########################################################################
self._instantiate_modules()
self._check_module_versions()
############################################################################
def _instantiate_modules(self):
"""
Method to instantiate modules.
All modules must contain a class with the exact same name as the module.
This class must implement the abstract base class (abc) DatasourceBase.
"""
# finds all modules to import
for module_name in modules.__all__:
# imports an instantiates the module by name
module = import_module('modules.' + module_name)
module = getattr(module, module_name)()
# makes sure the module implements DatasourceBase
if not isinstance(module, modules.DatasourceBase):
raise SubClassError(
'Modul is not an instance of DatasourceBase: {}'
.format(module.__class__.__name__))
# adds the module to the list of instantieated modules
self._instantiated_modules[module.__class__.__name__] = module
############################################################################
def _check_module_versions(self):
"""
Method to check module's versions.
"""
for module_name, module in self._instantiated_modules.items():
module_version = module.get_version()
# searches module's version in the database
result = self._db.select_data('''
SELECT version
FROM versions
WHERE module = %s''', (module_name,))
if not result:
# appends the module with it's version to the database
self._db.insert_data('''
INSERT INTO versions (module, version)
VALUES (%s, %s)''', (module_name, module_version))
elif result[0][0] < module_version:
# updates the request entry
self.server.db.update_data('''
UPDATE versions
SET version = %s
WHERE module = %s''', (module_version, module_name,))
elif result[0][0] > module_version:
raise VersionError('Old module version detected!' +
'Module: {} - Expected: {} - Found: {}'
.format(
|
module_name, result[0][0], module_version))
############################################################################
def get_mod
|
ule_select_queries(self):
"""
Returns the module's search queries.
"""
queries = {}
for module_name, module in self._instantiated_modules.items():
queries[module_name] = module.get_queries('select')
return queries
################################################################################
class SubClassError(Exception):
"""
Exception for module subclass errors.
"""
class VersionError(Exception):
"""
Exception for module version errors.
"""
|
anythingrandom/eclcli
|
eclcli/dh/v2/usage.py
|
Python
|
apache-2.0
| 2,651
| 0.010562
|
import copy
import six
from eclcli.common import command
from eclcli.common import utils
class ListUsage(command.Lister):
def get_parser(self, prog_name):
parser = super(ListUsage, self).get_parser(prog_name)
parser.add_argument(
"--From",
help="Date to list usage from",
metavar='<from>'
)
parser.add_argument(
"--to",
help="Date to list usage upto. Month of the parameter should be same as 'from'",
metavar='<to>'
)
parser.add_argument(
"--license-type",
help="Name of license type to list",
metavar='<license-type>'
)
return parser
def take_action(self, parsed_args):
dh_client = self.app.client_manager.dh
search_opts = {
"from":parsed_args.From,
"to":parsed_args.to,
"license_type":parsed_args.license_type,
}
self.log.
|
debug('search options: %s',search_opts)
columns = [
'ID', 'Type', 'Value', 'Unit', 'Name', 'Has License Key', 'Resource ID'
]
column_headers = columns
data = dh_client.usages.list(search_opts=search_opts)
return (column_headers,
(utils.get_item_properties(
s, columns
) for s in data))
class ShowUsageHistory(command.ShowOne):
def get_parser(self, prog_name):
parser = super(Sh
|
owUsageHistory, self).get_parser(prog_name)
parser.add_argument(
"usage",
help="Usage id whose history to be shown",
metavar='<usage>'
)
parser.add_argument(
"--From",
help="Date to list usage from",
metavar='<from>'
)
parser.add_argument(
"--to",
help="Date to list usage upto. Month of the parameter should be same as 'from'",
metavar='<to>'
)
return parser
def take_action(self, parsed_args):
dh_client = self.app.client_manager.dh
search_opts = {
"from":parsed_args.From,
"to":parsed_args.to
}
self.log.debug('search options: %s',search_opts)
rows = [
'Tenant ID',
'Unit',
'Resource ID',
'License Type',
'Histories'
]
row_headers = rows
data = dh_client.usages.get_histories(search_opts=search_opts,usage=parsed_args.usage)
return (row_headers, (utils.get_item_properties(
data, rows
)))
|
Annubis45/PhotoDisplayer
|
cgi-bin/index.py
|
Python
|
gpl-2.0
| 2,228
| 0.04623
|
#!/usr/bin/python3
# -*- coding: utf-8 -*
import cgi
import base64
import random
import requests, json
def getCurrentPhoto(currentPhotoMD5):
response = requests.get("http://localhost:9200/photodisplayer/photo/"+currentPhotoMD5)
jsonPhoto = json.loads(response.text)
return jsonPhoto
def add_note(currentPhotoMD5,ajout) :
jsonCurrentPhoto=getCurrentPhoto(currentPhotoMD5)
note = jsonCurrentPhoto["_source"]["note"]
jsonCurrentPhoto["_source"]["note"] = note+ajout
returnJson=jsonCurrentPhoto["_source"]
query2 = json.dumps(returnJson)
print(query2)
url="http://localhost:9200/photodisplayer/photo/"+jsonCurrentPhoto["_id"];
response2 = requests.put(url, data=query2)
print(json.loads(response2.text))
def ban(currentPhotoMD5):
jsonCurrentPhoto=getCurrentPhoto(currentPhotoMD5)
note = jsonCurrentPhoto["_source"]["note"]
jsonCurrentPhoto["_source"]["note"] = 0
returnJson=jsonCurrentPhoto["_source"]
query2 = json.dumps(returnJson)
url="http://localhost:9200/photodisplayer/photo/"+jsonCurrentPhoto["_id"];
response2 = requests.put(url, data=query2)
def getRandom():
query = json.dumps(
{
"query": {
"function_score": {
"functions": [
{
"random_score": {},
"weight": 1
},
{
"field_value_factor": {
"field": "note"
},
"weight": 1
}
],
"score_mode": "multiply"
}
}
})
response = requests.get("http://localhost:9200/photodisplayer/photo/_search?size=
|
1", data=query)
results = json.loads(response.text)
photoMD5=results["hits"]["hits"][0]["_id"]
return photoMD5
form = cgi.FieldStorage()
print("Content-type: text/html; charset=utf-8\n")
#I
|
nit
var = 0;
html = ""
#Get the action
action = form.getvalue("action")
idcurrent = form.getvalue("idcurrent")
idprevious = form.getvalue("previous")
#Switch "action"
if action=="ban":
ban(idcurrent)
html="ok"
if action=="next":
html=getRandom();
if action=="like":
add_note(idcurrent,1)
html="ok"
if action=="dislike":
add_note(idcurrent,-1)
html="ok"
if action=="" or str(action) == "None":
getRandom();
mon_fichier = open("main.html", "r")
contenu = mon_fichier.read()
html=contenu
#Return the content
#print("<!--" +str(action) +"-->")
print(html)
|
yehialicious/pyrf
|
pyrf.py
|
Python
|
gpl-3.0
| 5,240
| 0.033206
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
#
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
#rapidshare config
USER = ''
PASS = ''
USSL = False
import sys
import os
import urllib
import time
import re
import argparse
import md5
def say(message):
print message
def error(message):
print 'ERROR: ' + message
def progress(done, ind_size, size):
total = float((done * ind_size * 100) / size)
progress = float(done * ind_size / 1024)
speed = (float(do
|
ne * ind_size) / float(time.time() - starttime)) / 1024
sys.stdout.write('Progress: %.0f%%, Complete: %.2fKb, Speed: %.3fkb/s\r' % (total, progress, speed))
sys.stdout.flush()
def download(src, dest):
global starttime
starttime = time.time()
filename, headers = urllib.urlretrieve(src, dest, progress)
sys.stdout.write('Complete: 100%\n')
sys.stdout.flush()
for a in headers:
if a.lower() == 'content-disposition':
filename = headers[a][headers[a].find('filename=') + 9:]
urllib.urlcleanup()
return filename
# Based on rsapiget by George Notaras
def rsdl(link, USER=None, PASS=None):
try:
rapidshare, files, fileid, filename = link.rsplit('/') [-4:]
except ValueError:
error('Invalid Rapidshare link')
return
if not rapidshare.endswith('rapidshare.com') or files != 'files':
error('Invalid Rapidshare link')
return
if USSL:
proto = 'https'
else:
proto = 'http'
say('Downloading: %s' % link)
if filename.endswith('.html'):
target_filename = filename[:-5]
else:
target_filename = filename
say('Save file as: %s' % target_filename)
params = {
'sub': 'download',
'fileid': fileid,
'filename': filename,
'try': '1',
'withmd5hex': '0',
}
if USER and PASS:
params.update({
'login': USER,
'password': PASS,
})
params_string = urllib.urlencode(params)
api_url = '%s://api.rapidshare.com/cgi-bin/rsapi.cgi' % proto
conn = urllib.urlopen('%s?%s' % (api_url, params_string))
data = conn.read()
conn.close()
try:
key, value = data.split(':')
except ValueError:
error(data)
return
try:
server, dlauth, countdown, remote_md5sum = value.split(',')
except ValueError:
error(data)
return
#free account wait
if int(countdown):
for t in range(int(countdown), 0, -1):
sys.stdout.write('Waiting for %s seconds..\r' % t)
sys.stdout.flush()
time.sleep(1)
say('Waited for %s seconds, downloading' % countdown)
dl_params = {
'sub': 'download',
'fileid': fileid,
'filename': filename,
}
if USER and PASS:
dl_params.update({
'login': USER,
'password': PASS,
})
else:
dl_params.update({
'dlauth': dlauth,
})
dl_params_string = urllib.urlencode(dl_params)
download_link = '%s://%s/cgi-bin/rsapi.cgi?%s' % (proto, server, dl_params_string)
download(download_link, target_filename)
def mfdl(link):
conn = urllib.urlopen(link)
data = conn.read()
conn.close()
dlink = re.search("kNO = \"(.*)\";", data).group(0)
dlink = dlink[7:-2]
filename = dlink.split('/')[5:]
say('Downloading: %s' % filename[0])
download(dlink, filename[0])
def hfdl(link, USER=None, PASS=None):
apilink = 'http://api.hotfile.com/?action=getdirectdownloadlink'
if USER and PASS:
conn = urllib.urlopen(apilink + '&username=' + USER + '&password=' + PASS)
data = conn.read()
conn.close()
if "premium required" in data:
error('A premium account is required to download from hotfile.')
return
say('Downloading: %s' % filename)
download(data, filename)
def checkLink(link, USER=None, PASS=None):
if "rapidshare.com" in link:
rsdl(link, USER, PASS)
elif "mediafire.com" in link:
mfdl(link)
elif "hotfile.com" in link:
if USER or PASS:
hfdl(link, USER, PASS)
else:
error('You need to enter a username and password for hotfile')
return
elif "http://" in link:
filename = link.split('/')
filename = filename[len(filename)-1]
say('Downloading: %s' % filename)
download(link, filename)
else:
error('Invalid or unsupported link')
return
def main():
parser = argparse.ArgumentParser(description='Command-line Python Rapidshare, Mediafire and Hotfile downloader.')
parser.add_argument('file_url')
parser.add_argument('--user', '-u')
parser.add_argument('--password', '-p')
USER = parser.parse_args().user
PASS = parser.parse_args().password
file_link = parser.parse_args().file_url
if ".txt" in file_link and not "http://" in file_link:
f = open(file_link, 'r')
if f.read(1) == '\xef':
f.seek(3)
file_list = list(f.readlines())
for item in file_list:
checkLink(item.strip('\n'), USER, PASS)
else:
checkLink(file_link, USER, PASS)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
error('\nAborted')
sys.exit(1)
|
will-hart/django-citations
|
citations/migrations/0006_auto__add_field_reference_year.py
|
Python
|
mit
| 2,201
| 0.008178
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Reference.year'
db.add_column(u'citations_reference', 'year',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Reference.year'
db.delete_column(u'citations_reference', 'year')
models = {
u'citations.reference': {
'Meta': {'object_name': 'Reference'},
'abstract': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'edition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isbn': ('django.db.models.fields.CharField', [], {'max_length': '17', 'null': 'True', 'blank': 'True'}),
'place': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'publisher': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'series': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True
|
'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'T
|
rue', 'max_length': '128'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'BK'", 'max_length': '3'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'volume': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['citations']
|
WmHHooper/aima-python
|
submissions/aardvark/myNN.py
|
Python
|
mit
| 8,182
| 0.004033
|
# References:
#
# https://www.tensorflow.org/guide/low_level_intro
#
# only needed for python 2.7
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
import numpy as np
from numpy import array
from numpy import float32
# a complete input set on 7 bits
# useful for training various sorts of data
bin7 = array([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1],
[0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 1, 1],
[0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 1],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 1],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 1],
[0, 0, 1, 0, 1, 1, 0],
[0, 0, 1, 0, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 1],
[0, 0, 1, 1, 0, 1, 0],
[0, 0, 1, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 1],
[0, 1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 1],
[0, 1, 0, 0, 1, 1, 0],
[0, 1, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 1],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 1],
[0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 0, 1],
[0, 1, 0, 1, 1, 1, 0],
[0, 1, 0, 1, 1, 1, 1],
[0, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 1],
[0, 1, 1, 0, 0, 1, 0],
[0, 1, 1, 0, 0, 1, 1],
[0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 1, 0, 1],
[0, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 1],
[0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 0],
[1, 0, 0, 0, 1, 0, 1],
[1, 0, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 0, 0],
[1, 0, 0, 1, 0, 0, 1],
[1, 0, 0, 1, 0, 1, 0],
[1, 0, 0, 1, 0, 1, 1],
[1, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 1, 1, 0, 1],
[1, 0, 0, 1, 1, 1, 0],
[1, 0, 0, 1, 1, 1, 1],
[1, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 1],
[1, 0, 1, 0, 0, 1, 0],
[1, 0, 1, 0, 0, 1, 1],
[1, 0, 1, 0, 1, 0, 0],
[1, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 1, 1, 0],
[1, 0, 1, 0, 1, 1, 1],
[1, 0, 1, 1, 0, 0, 0],
[1, 0, 1, 1, 0, 0, 1],
[1, 0, 1, 1, 0, 1, 0],
[1, 0, 1, 1, 0, 1, 1],
[1, 0, 1, 1, 1, 0, 0],
[1, 0, 1, 1, 1, 0, 1],
[1, 0, 1, 1, 1, 1, 0],
[1, 0, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 0, 0],
[1, 1, 0, 0, 1, 0, 1],
[1, 1, 0, 0, 1, 1, 0],
[1, 1, 0, 0, 1, 1, 1],
[1, 1, 0, 1, 0, 0, 0],
[1, 1, 0, 1, 0, 0, 1],
[1, 1, 0, 1, 0, 1, 0],
[1, 1, 0, 1, 0, 1, 1],
[1, 1, 0, 1, 1, 0, 0],
[1, 1, 0, 1, 1, 0, 1],
[1, 1, 0, 1, 1, 1, 0],
[1, 1, 0, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 1],
[1, 1, 1, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1, 1],
[1, 1, 1, 0, 1, 0, 0],
[1, 1, 1, 0, 1, 0, 1],
[1, 1, 1, 0, 1, 1, 0],
[1, 1, 1, 0, 1, 1, 1],
[1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 1],
[1, 1, 1, 1, 0, 1, 0],
[1, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
])
'''
Train the network to count to 3
column 0: less than 3
column 1: exactly 3
column 2: more than 3
'''
count3 = array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0,
|
0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[1, 0,
|
0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
])
# this takes a looong time to index, and
# python may crash several times before indexing is complete
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
model = Sequential()
model.add(Dense(8,
activation=keras.activations.sigmoid,
))
model.add(Dense(3,
activation=keras.activations.sigmoid,
))
model.compile(
optimizer=tf.train.AdamOptimizer(0.001),
# loss=keras.losses.categorical_crossentropy,
loss=keras.losses.mse,
metrics=[keras.metrics.binary_accuracy]
)
# This is the process I used to train my weights
# model.fit(bin7, count3, epochs=2000)
# myWeights = model.get_weights()
# np.set_printoptions(suppress=True)
# np.set_printoptions(precision=2)
# print('myWeights =', myWeights)
# These are the weights I got, pretty-printed
myWeights = [
# first layer, 7x8
array([[ 1.2 , -1.16, -1.97, 2.16, 0.97, 0.86, -1.2 , 1.12],
[ 1.21, -1.17, -1.97, 2.16, 0.84, 0.76, -1.19, 1.22],
[ 1.19, -1.2 , -1.98, 2.15, 0.87, 0.84, -1.19, 1.13],
[ 1.21, -1.2 , -1.97, 2.15, 0.89, 0.8 , -1.2 , 1.16],
[ 1.21, -1.12, -1.97, 2.16, 0.99, 0.8 , -1.21, 1.18],
[ 1.23, -1.09, -1.98, 2.15, 1.12, 0.81, -1.24, 1.13],
[ 1.24, -1.11, -1.99, 2.14, 1. , 0.77, -1.23, 1.17]],
dtype=float32),
# biases for 8 intermediate nodes
array([-4.57, 3.13, 4. , -4.44, -1.08, -3.11, 4.39, -4.35],
dtype=float32),
# second layer, 8x3
array([[-2.37, -1.54, 2.82],
[ 2.57, -0.09, -3. ],
[ 3.42, -2.18, -4.26],
[-3.27, 1.66, 2.1 ],
[-1.64, 0.12, -0.26],
[-1.85, -1.73, 2.25],
[ 2.71, 0.95, -4.85],
[-2.82, -1.4 , 2.69]], dtype=float32),
# biases for 3 output nodes
array([ 0.21, -0.39, -1.22], dtype=float32)
]
# test the model and your weights
# model.fit(bin7, count3, epochs=1)
# model.set_weights(myWeights)
# predict3 = model.predict(bin7)
# np.set_printoptions(suppress=True)
# np.set_printoptions(precision=1)
# print('prediction =', predict3)
Examples = {
'count3' : [ bin7, count3, model, myWeights ],
|
reclosedev/requests-cache
|
requests_cache/backends/gridfs.py
|
Python
|
bsd-2-clause
| 3,582
| 0.002792
|
"""
.. image::
../_static/mongodb.png
`GridFS <https://docs.mongodb.com/manual/core/gridfs/>`_ is a specification for storing large files
(>16 MB) in MongoDB. See :py:mod:`~requests_cache.backends.mongodb` for more general info on MongoDB.
API Reference
^^^^^^^^^^^^^
.. automodsumm:: requests_cache.backends.gridfs
:classes-only:
:nosignatures:
"""
from logging import getLogger
from threading import RLock
from gridfs import GridFS
from gridfs.errors import CorruptGridFile, FileExists
from pymongo import MongoClient
from .._utils import get_valid_kwargs
from .base import BaseCache, BaseStorage
from .mongodb import MongoDict
logger = getLogger(__name__)
class GridFSCache(BaseCache):
"""GridFS cache backend.
Example:
>>> session = CachedSession('http_cache', backend='gridfs')
Args:
db_name: Database name
connection: :py:class:`pymongo.MongoClient` object to reuse inste
|
ad of creating a new one
kwargs: Additional keyword arguments for :py:class:`pymongo.MongoClient`
"""
def __init__(self, db_name: str, **kwargs):
super().__init__(**kwargs)
self.respo
|
nses = GridFSPickleDict(db_name, **kwargs)
self.redirects = MongoDict(
db_name, collection_name='redirects', connection=self.responses.connection, **kwargs
)
def remove_expired_responses(self, *args, **kwargs):
with self.responses._lock:
return super().remove_expired_responses(*args, **kwargs)
class GridFSPickleDict(BaseStorage):
"""A dictionary-like interface for a GridFS database
Args:
db_name: Database name
collection_name: Ignored; GridFS internally uses collections 'fs.files' and 'fs.chunks'
connection: :py:class:`pymongo.MongoClient` object to reuse instead of creating a new one
kwargs: Additional keyword arguments for :py:class:`pymongo.MongoClient`
"""
def __init__(self, db_name, collection_name=None, connection=None, **kwargs):
super().__init__(**kwargs)
connection_kwargs = get_valid_kwargs(MongoClient, kwargs)
self.connection = connection or MongoClient(**connection_kwargs)
self.db = self.connection[db_name]
self.fs = GridFS(self.db)
self._lock = RLock()
def __getitem__(self, key):
try:
with self._lock:
result = self.fs.find_one({'_id': key})
if result is None:
raise KeyError
return self.serializer.loads(result.read())
except CorruptGridFile as e:
logger.warning(e, exc_info=True)
raise KeyError
def __setitem__(self, key, item):
value = self.serializer.dumps(item)
encoding = None if isinstance(value, bytes) else 'utf-8'
with self._lock:
try:
self.fs.delete(key)
self.fs.put(value, encoding=encoding, **{'_id': key})
# This can happen because GridFS is not thread-safe for concurrent writes
except FileExists as e:
logger.warning(e, exc_info=True)
def __delitem__(self, key):
with self._lock:
res = self.fs.find_one({'_id': key})
if res is None:
raise KeyError
self.fs.delete(res._id)
def __len__(self):
return self.db['fs.files'].estimated_document_count()
def __iter__(self):
for d in self.fs.find():
yield d._id
def clear(self):
self.db['fs.files'].drop()
self.db['fs.chunks'].drop()
|
Thraxis/pymedusa
|
sickbeard/name_parser/regexes.py
|
Python
|
gpl-3.0
| 23,532
| 0.0034
|
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
# all regexes are case insensitive
normal_regexes = [
('standard_repeat',
# Show.Name.S01E02.S01E03.Source.Quality.Etc-Group
# Show Name - S01E02 - S01E03 - S01E04 - Ep Name
r"""
^(?P<series_name>.+?)[. _-]+ # Show_Name and separator
s(?P<season_num>\d+)[. _-]* # S01 and optional separator
e(?P<ep_num>\d+) # E02 and separator
([. _-]+s(?P=season_num)[. _-]* # S01 and optional separator
e(?P<extra_ep_num>\d+))+ # E03/etc and separator
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
"""),
('fov_repeat',
# Show.Name.1x02.1x03.Source.Quality.Etc-Group
# Show Name - 1x02 - 1x03 - 1x04 - Ep Name
r"""
^(?P<series_name>.+?)[. _-]+ # Show_Name and separator
(?P<season_num>\d+)x # 1x
(?P<ep_num>\d+) # 02 and separator
([. _-]+(?P=season_num)x # 1x
(?P<extra_ep_num>\d+))+ # 03/etc and separator
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
"""),
('standard',
# Show.Name.S01E02.Source.Quality.Etc-Group
# Show Name - S01E02 - My Ep Name
# Show.Name.S01.E03.My.Ep.Name
# Show.Name.S01E02E03.Source.Quality.Etc-Group
# Show Name - S01E02-03 - My Ep Name
# Show.Name.S01.E02.E03
r"""
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
\(?s(?P<season_num>\d+)[. _-]* # S01 and optional separator
e(?P<ep_num>\d+)\)? # E02 and separator
(([. _-]*e|-) # linking e/- char
(?P<extra_ep_num>(?!(1080|720|480)[pi])\d+)(\))?)* # additional E03/etc
([. _,-]+((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?)?$ # Group
"""),
('newpct',
# American Horror Story - Temporada 4 HDTV x264[Cap.408_409]SPANISH AUDIO -NEWPCT
# American Horror Story - Temporada 4 [HDTV][Cap.408][Espanol Castellano]
# American Horror Story - Temporada 4 HDTV x264[Cap.408]SPANISH AUDIO –NEWPCT)
r"""
(?P<series_name>.+?).-.+\d{1,2}[ ,.] # Show name: American Horror Story
(?P<extra_info>.+)\[Cap\. # Quality: HDTV x264, [HDTV], HDTV x264
(?P<season_num>\d{1,2}) # Season Number: 4
(?P<ep_num>\d{2}) # Episode Number: 08
((_\d{1,2}(?P<extra_ep_num>\d{2}))|.*\]) # Episode number2: 09
"""),
('fov',
# Show_Name.1x02.Source_Quality_Etc-Group
# Show Name - 1x02 - My Ep Name
# Show_Name.1x02x03x04.Source_Quality_Etc-Group
# Show Name - 1x02-03-04 - My Ep Name
r"""
^((?P<series_name>.+?)[\[. _-]+)? # Show_Name and separator
(?P<season_num>\d+)x # 1x
(?P<ep_num>\d+) # 02 and separator
(([. _-]*x|-) # linking x/- char
(?P<extra_ep_num>
(?!(1080|720|480)[pi])(?!(?<=x)264) # ignore obviously wrong multi-eps
\d+))* # additional x03/etc
[\]. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
"""),
('scene_date_format',
# Show.Name.2010.11.23.Source.Quality.Etc-Group
# Show Name - 2010-11-23 - Ep Name
r"""
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(?P<air_date>(\d+[. _-]\d+[. _-]\d+)|(\d+\w+[. _-]\w+[. _-]\d+))
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
"""),
('scene_sports_format',
# Show.Name.100.Event.2010.11.23.Source.Quality.Etc-Group
# Show.Name.2010.11.23.Source.Quality.Etc-Group
# Show Name - 2010-11-23 - Ep Name
r"""
^(?P<series_name>.*?(UEFA|MLB|ESPN|WWE|MMA|UFC|TNA|EPL|NASCAR|NBA|NFL|NHL|NRL|PGA|SUPER LEAGUE|FORMULA|FIFA|NETBALL|MOTOGP).*?)[. _-]+
((?P<series_num>\d{1,3})[. _-]+)?
(?P<air_date>(\d+[. _-]\d+[. _-]\d+)|(\d+\w+[. _-]\w+[. _-]\d+))[. _-]+
((?P<extra_info>.+?)((?<![. _-])
(?<!WEB)-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$
"""),
('stupid',
# tpz-abc102
r"""
(?P<release_group>.+?)(?<!WEB)-\w+?[\. ]? # tpz-abc
(?!264) # don't count x264
(?P<season_num>\d{1,2}) # 1
(?P<ep_num>\d{2})$ # 02
"""),
('verbose',
# Show Name Season 1 Episode 2 Ep Name
r"""
^(?P<series_name>.+?)[. _-]+ # Show Name and separator
(season|series)[. _-]+ # season and separator
(?P<season_num>\d+)[. _-]+ # 1
episode[. _-]+ # episode and separator
(?P<ep_num>\d+)[. _-]+ # 02 and separator
(?P<extra_info>.+)$ # Source_Quality_Etc-
"""),
('season_only',
# Show.Name.S
|
01.Sourc
|
e.Quality.Etc-Group
r"""
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
s(eason[. _-])? # S01/Season 01
(?P<season_num>\d+)[. _-]* # S01 and optional separator
[. _-]*((?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
"""),
('no_season_multi_ep',
# Show.Name.E02-03
# Show.Name.E02.2010
r"""
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(e(p(isode)?)?|part|pt)[. _-]? # e, ep, episode, or part
(?P<ep_num>(\d+|(?<!e)[ivx]+)) # first ep num
((([. _-]+(and|&|to)[. _-]+)|-) # and/&/to joiner
(?P<extra_ep_num>(?!(1080|720|480)[pi])(\d+|(?<!e)[ivx]+))[. _-]) # second ep num
([. _-]*(?P<extra_info>.+?) # Source_Quality_Etc-
((?<![. _-])(?<!WEB) # Make sure this is really the release group
-(?P<release_group>[^ -]+([. _-]\[.*\])?))?)?$ # Group
"""),
('no_season_general',
# Show.Name.E23.Test
# Show.Name.Part.3.Source.Quality.Etc-Group
# Show.Name.Part.1.and.Part.2.Blah-Group
r"""
^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator
(e(p(isode)?)?|part|pt)[. _-]? # e, ep, episode, or part
|
Gastove/blogric
|
new_post.py
|
Python
|
epl-1.0
| 3,746
| 0.004271
|
import os
from time import localtime, strftime
import re
import sys
from fabric.api import local, lcd, settings, task
from fabric.utils import puts
from blog_config import INPUT_PATH, OUTPUT_PATH
SETTINGS_FILE = 'blog_config'
# Load paths
ABS_DIR_PATH = os.path.dirname(os.path.abspath(__file__))
ABS_SETTINGS_FILE = os.path.join(ABS_DIR_PATH, SETTINGS_FILE)
# ABS_OUTPUT_PATH = os.path.join(ABS_DIR_PATH, os.path.normpath(OUTPUT_PATH))
ABS_INPUT_PATH = os.path.normpath(os.path.join(ABS_DIR_PATH, INPUT_PATH))
__all__ = ['generate_new_post']
@task(alias="np")
def generate_new_post(name = "", extension = ".md",
should_open = True, list_existing = False):
""" Make a new post """
if list_existing:
path = _post_path()
existing_files = os.listdir(path)
puts("Files in today's folder already:")
for n in existing_files:
puts("\t" + n)
if not name:
puts("Enter a post name, or 'quit' to exit':")
name = raw_input("\t:")
if name == "quit":
puts("Done!")
sys.exit(0)
path = _post_path()
file_name = _post_name(name) + extension
full_post_uri = os.path.join(path, file_name)
if not _name_is_unique(full_post_uri):
puts("Name not unique!")
generate_new_post(list_existing = True)
sys.exit(0)
puts("Generated new post: ", file_name)
puts("Stored it in: ", path)
puts("Adding default metadata")
_write_default_metadata(name, full_post_uri)
if should_open:
puts("Opening new post")
_open_file(full_post_uri)
else:
puts("Complete.")
sys.exit(0)
def _write_default_metadata(post_real_name, post_full_path):
# Control structure for metadata order
def load_config_or_else(key, default):
""" Try to load a value from config; if not found, return default """
try:
val = getattr(__import__(SETTINGS_FILE, globals(),
locals(), key.upper()), key.upper())
return val
except AttributeError:
return default
metadata_keys = [
"Title", "Author", "Date", "Slug", "Category", "Tags", "Summary", "Status"
]
metadata_defaults = {
"Title": post_real_name,
"Date": strftime("%Y-%m-%d %H:%M", localtime()),
"Category": "",
"Tags": "",
"Slug": os.path.basename(post_full_path[:-3]),
"Author": "",
"Summary": "",
"Status": "draft"
}
for key in metadata_keys:
metadata_defaults[key] = load_config_or_else(key, metadata_defaults[key])
with open(post_full_path, 'w') as pointer:
for key in metadata_keys:
pointer.write("%s: %s\n" % (key, metadata_defaults[key]))
def _name_is_unique(candidate_path):
""" Check if the generated path name is unique or not """
return False if o
|
s.path.isfile(candidate_path) else True
def _post_path():
""" Generate the c
|
orrect post path and make sure it exists """
abs_path = os.path.join(ABS_INPUT_PATH, 'posts')
if not os.path.exists(abs_path):
local("mkdir -p %s" % abs_path)
return abs_path
def _post_name(input_string):
""" Generate a valid post name """
def is_not_empty(entry): return True if entry else False
first_pass = re.sub("\s", "_", input_string.lower())
second_pass = "".join(filter(is_not_empty, re.findall("\w", first_pass)))
third_pass = re.search("([a-z0-9]*_){,4}[a-z0-9]*", second_pass).group()
timestamp = strftime("%Y-%m-%d", localtime())
return "_".join([timestamp, third_pass])
def _open_file(filepath):
""" Open the given file for editing """
cmd = "$EDITOR " + filepath
local(cmd)
|
shakamunyi/beam
|
sdks/python/apache_beam/utils/annotations_test.py
|
Python
|
apache-2.0
| 6,739
| 0.007716
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import warnings
from apache_beam.utils.annotations import deprecated
from apache_beam.utils.annotations import experimental
class AnnotationTests(unittest.TestCase):
# Note: use different names for each of the the functions decorated
# so that a warning is produced for each of them.
def test_deprecated_with_since_current_message(self):
with warnings.catch_warnings(record=True) as w:
@deprecated(since='v.1', current='multiply', extra_message='Do this')
def fnc_test_deprecated_with_since_current_message():
return 'lol'
fnc_test_deprecated_with_since_current_message()
self.check_annotation(
warning=w, warning_size=1,
warning_type=DeprecationWarning,
fnc_name='fnc_test_deprecated_with_since_current_message',
annotation_type='deprecated',
label_check_list=[('since', True),
('instead', True),
('Do this', True)])
def test_deprecated_with_since_current(self):
with warnings.catch_warnings(record=True) as w:
@deprecated(since='v.1', current='multiply')
def fnc_test_deprecated_with_since_current():
return 'lol'
fnc_test_deprecated_with_since_current()
self.check_annotation(warning=w, warning_size=1,
warning_type=DeprecationWarning,
fnc_name='fnc_test_deprecated_with_since_current',
annotation_type='deprecated',
label_check_list=[('since', True),
('instead', True)])
def test_deprecated_without_current(self):
with warnings.catch_warnings(record=True) as w:
@deprecated(since='v.1')
def fnc_test_deprecated_without_current():
return 'lol'
fnc_test_deprecated_without_current()
self.check_annotation(warning=w, warning_size=1,
warning_type=DeprecationWarning,
fnc_name='fnc_test_deprecated_without_current',
annotation_type='deprecated',
label_check_list=[('since', True),
('instead', False)])
def test_deprecated_without_since_should_fail(self):
with warnings.catch_warnings(record=True) as w:
with self.assertRaises(TypeError):
@deprecated()
def fnc_test_deprecated_without_since_should_fail():
return 'lol'
fnc_test_deprecated_without_since_should_fail()
assert not w
def test_experimental_with_current_message(self):
with warnings.catch_warnings(record=True) as w:
@experimental(current='multiply', extra_message='Do this')
def fnc_test_experimental_with_current_message():
return 'lol'
fnc_test_experimental_with_current_message()
self.check_annotation(
warning=w, warning_size=1,
warning_type=FutureWarning,
fnc_name='fnc_test_experimental_with_current_message',
annotation_type='experimental',
label_check_list=[('instead', True),
('Do this', True)])
def test_experimental_with_current(self):
with warnings.catch_warnings(record=True) as w:
@experimental(current='multiply')
def fnc_test_experimental_with_current():
return 'lol'
fnc_test_experimental_with_current()
self.check_annotation(warning=w, warning_size=1,
warning_type=FutureWarning,
fnc_name='fnc_test_experimental_with_current',
annotation_type='experimental',
label_check_list=[('instead', True)])
def test_experimental_without_current(self):
with warnings.catch_warnings(record=True) as w:
@experimental()
def fnc_test_experimental_without_current():
return 'lol'
fnc_test_experimental_without_current()
self.check_annotation(warning=w, warning_size=1,
warning_type=FutureWarning,
fnc_name='fnc_test_experimental_without_current',
annotation_type='experimental',
label_check_list=[('instead', False)])
def test_frequency(self):
"""Tests that the filter 'once' is sufficient to print once per
warning independently of location."""
with warnings.catch_warnings(record=True) as w:
@experimental()
def fnc_test_annotate_frequency():
return 'lol'
@experimental()
def fnc2_test_annotate_frequency():
return 'lol'
fnc_test_annotate_frequency()
fnc_test_annotate_frequency()
fnc2_test_annotate_frequency()
self.check_annotation(warning=[w[0]], warning_size=1,
warning_type=FutureWarning,
fnc_name='fnc_test_annotate_frequency',
annotation_type='experimental',
label_check_list=[])
self.c
|
heck_a
|
nnotation(warning=[w[1]], warning_size=1,
warning_type=FutureWarning,
fnc_name='fnc2_test_annotate_frequency',
annotation_type='experimental',
label_check_list=[])
# helper function
def check_annotation(self, warning, warning_size, warning_type, fnc_name,
annotation_type, label_check_list):
self.assertEqual(1, warning_size)
self.assertTrue(issubclass(warning[-1].category, warning_type))
self.assertIn(fnc_name + ' is ' + annotation_type, str(warning[-1].message))
for label in label_check_list:
if label[1] is True:
self.assertIn(label[0], str(warning[-1].message))
else:
self.assertNotIn(label[0], str(warning[-1].message))
if __name__ == '__main__':
unittest.main()
|
Distrotech/qtcreator
|
share/qtcreator/debugger/qttypes.py
|
Python
|
lgpl-2.1
| 90,089
| 0.003519
|
############################################################################
#
# Copyright (C) 2015 The Qt Company Ltd.
# Contact: http://www.qt.io/licensing
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms and
# conditions see http://www.qt.io/terms-conditions. For further information
# use the contact form at http://www.qt.io/contact-us.
#
# GNU Lesser General Public License Usage
# Alternatively, this file may be used under the terms of the GNU Lesser
# General Public License version 2.1 or version 3 as published by the Free
# Software Foundation and appearing in the file LICENSE.LGPLv21 and
# LICENSE.LGPLv3 included in the packaging of this file. Please review the
# following information to ensure the GNU Lesser General Public License
# requirements will be met: https://www.gnu.org/licenses/lgpl.html and
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
#
# In addition, as a special exception, The Qt Company gives you certain additional
# rights. These rights are described in The Qt Company LGPL Exception
# version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
#
#############################################################################
import platform
from dumper import *
def qdump__QAtomicInt(d, value):
d.putValue(int(value["_q_value"]))
d.putNumChild(0)
def qdump__QBasicAtomicInt(d, value):
d.putValue(int(value["_q_value"]))
d.putNumChild(0)
def qdump__QAtomicPointer(d, value):
d.putType(value.type)
q = value["_q_value"]
p = toInteger(q)
d.putValue("@0x%x" % p)
d.putNumChild(1 if p else 0)
if d.isExpanded():
with Children(d):
d.putSubItem("_q_value", q.dereference())
def qform__QByteArray():
return [Latin1StringFormat, SeparateLatin1StringFormat,
Utf8StringFormat, SeparateUtf8StringFormat ]
def qdump__QByteArray(d, value):
data, size, alloc = d.byteArrayData(value)
d.putNumChild(size)
elided, p = d.encodeByteArrayHelper(d.extractPointer(value), d.displayStringLimit)
displayFormat = d.currentItemFormat()
|
if displayFormat == AutomaticFormat or displayFormat == Latin1StringFormat:
d.putValue(p, Hex2EncodedLatin1, elided=elided)
elif displayFormat == SeparateLatin1StringFormat:
d.putValue(p, Hex2EncodedLatin1, elided=elided)
d.putField("editformat", DisplayLatin1String)
d.putField("editvalue", d.encodeByteArray(value, limit=100000))
|
elif displayFormat == Utf8StringFormat:
d.putValue(p, Hex2EncodedUtf8, elided=elided)
elif displayFormat == SeparateUtf8StringFormat:
d.putValue(p, Hex2EncodedUtf8, elided=elided)
d.putField("editformat", DisplayUtf8String)
d.putField("editvalue", d.encodeByteArray(value, limit=100000))
if d.isExpanded():
d.putArrayData(data, size, d.charType())
def qdump__QByteArrayData(d, value):
data, size, alloc = d.byteArrayDataHelper(d.addressOf(value))
d.putValue(d.readMemory(data, size), Hex2EncodedLatin1)
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putIntItem("size", size)
d.putIntItem("alloc", alloc)
def qdump__QChar(d, value):
d.putValue(int(value["ucs"]))
d.putNumChild(0)
def qform__QAbstractItemModel():
return [SimpleFormat, EnhancedFormat]
def qdump__QAbstractItemModel(d, value):
displayFormat = d.currentItemFormat()
if displayFormat == SimpleFormat:
d.putPlainChildren(value)
return
#displayFormat == EnhancedFormat:
# Create a default-constructed QModelIndex on the stack.
try:
ri = d.makeValue(d.qtNamespace() + "QModelIndex", "-1, -1, 0, 0")
this_ = d.makeExpression(value)
ri_ = d.makeExpression(ri)
rowCount = int(d.parseAndEvaluate("%s.rowCount(%s)" % (this_, ri_)))
columnCount = int(d.parseAndEvaluate("%s.columnCount(%s)" % (this_, ri_)))
except:
d.putPlainChildren(value)
return
d.putValue("%d x %d" % (rowCount, columnCount))
d.putNumChild(rowCount * columnCount)
if d.isExpanded():
with Children(d, numChild=rowCount * columnCount, childType=ri.type):
i = 0
for row in xrange(rowCount):
for column in xrange(columnCount):
with SubItem(d, i):
d.putName("[%s, %s]" % (row, column))
mi = d.parseAndEvaluate("%s.index(%d,%d,%s)"
% (this_, row, column, ri_))
#warn("MI: %s " % mi)
#name = "[%d,%d]" % (row, column)
#d.putValue("%s" % mi)
d.putItem(mi)
i = i + 1
#warn("MI: %s " % mi)
#d.putName("[%d,%d]" % (row, column))
#d.putValue("%s" % mi)
#d.putNumChild(0)
#d.putType(mi.type)
#gdb.execute("call free($ri)")
def qform__QModelIndex():
return [SimpleFormat, EnhancedFormat]
def qdump__QModelIndex(d, value):
displayFormat = d.currentItemFormat()
if displayFormat == SimpleFormat:
d.putPlainChildren(value)
return
r = value["r"]
c = value["c"]
try:
p = value["p"]
except:
p = value["i"]
m = value["m"]
if d.isNull(m) or r < 0 or c < 0:
d.putValue("(invalid)")
d.putPlainChildren(value)
return
mm = m.dereference()
mm = mm.cast(mm.type.unqualified())
ns = d.qtNamespace()
try:
mi = d.makeValue(ns + "QModelIndex", "%s,%s,%s,%s" % (r, c, p, m))
mm_ = d.makeExpression(mm)
mi_ = d.makeExpression(mi)
rowCount = int(d.parseAndEvaluate("%s.rowCount(%s)" % (mm_, mi_)))
columnCount = int(d.parseAndEvaluate("%s.columnCount(%s)" % (mm_, mi_)))
except:
d.putPlainChildren(value)
return
try:
# Access DisplayRole as value
val = d.parseAndEvaluate("%s.data(%s, 0)" % (mm_, mi_))
v = val["d"]["data"]["ptr"]
d.putStringValue(d.makeValue(ns + 'QString', v))
except:
d.putValue("")
d.putNumChild(1)
if d.isExpanded():
with Children(d):
d.putFields(value, False)
i = 0
for row in xrange(rowCount):
for column in xrange(columnCount):
with UnnamedSubItem(d, i):
d.putName("[%s, %s]" % (row, column))
mi2 = d.parseAndEvaluate("%s.index(%d,%d,%s)"
% (mm_, row, column, mi_))
d.putItem(mi2)
i = i + 1
d.putCallItem("parent", value, "parent")
#gdb.execute("call free($mi)")
def qdump__QDate(d, value):
jd = int(value["jd"])
if jd:
d.putValue(jd, JulianDate)
d.putNumChild(1)
if d.isExpanded():
# FIXME: This improperly uses complex return values.
with Children(d):
if d.canCallLocale():
d.putCallItem("toString", value, "toString",
d.enumExpression("DateFormat", "TextDate"))
d.putCallItem("(ISO)", value, "toString",
d.enumExpression("DateFormat", "ISODate"))
d.putCallItem("(SystemLocale)", value, "toString",
d.enumExpression("DateFormat", "SystemLocaleDate"))
d.putCallItem("(Locale)", value, "toString",
d.enumExpression("DateFormat", "LocaleDate"))
d.putFields(value)
else:
d.putValue("(invalid)")
d.putNumChild(0)
def qdump__QTime(d, value):
mds = int(value["mds"])
if mds >= 0:
d.putValue(mds, MillisecondsSinceMidnight)
d.putNumChild(1)
|
alwaysrookie/superlists
|
superlists/settings.py
|
Python
|
apache-2.0
| 2,067
| 0
|
"""
Django settings for superlists project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '47@j(z&b(+=1kr7i)l4&_x#$el3)4h0p*+k$u&k(v5bcw7(pta'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
|
'lists',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'dja
|
ngo.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'superlists.urls'
WSGI_APPLICATION = 'superlists.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
DarioGT/OMS-PluginXML
|
org.modelsphere.sms/lib/jython-2.2.1/Lib/keyword.py
|
Python
|
gpl-3.0
| 2,162
| 0.004625
|
#! /usr/bin/env python
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
kwlist = [
#--start keywords--
'and',
'assert',
'break',
'c
|
lass',
'continue',
'def',
'del',
'elif',
'else',
'except',
'exec',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'not',
'or',
'pass',
'print',
'raise',
|
'return',
'try',
'while',
'yield',
#--end keywords--
]
kwdict = {}
for keyword in kwlist:
kwdict[keyword] = 1
iskeyword = kwdict.has_key
def main():
import sys, re
args = sys.argv[1:]
iptfile = args and args[0] or "Python/graminit.c"
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
# scan the source file for keywords
fp = open(iptfile)
strprog = re.compile('"([^"]+)"')
lines = []
while 1:
line = fp.readline()
if not line: break
if line.find('{1, "') > -1:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "',\n")
fp.close()
lines.sort()
# load the output skeleton from the target
fp = open(optfile)
format = fp.readlines()
fp.close()
# insert the lines of keywords
try:
start = format.index("#--start keywords--\n") + 1
end = format.index("#--end keywords--\n")
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
fp = open(optfile, 'w')
fp.write(''.join(format))
fp.close()
if __name__ == "__main__":
main()
|
jefftc/changlab
|
Betsy/Betsy/modules/run_loocv_random_forest.py
|
Python
|
mit
| 4,207
| 0.002615
|
from Module import Abstract
|
Module
class Module(AbstractModule):
de
|
f __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
import os
import arrayio
from genomicode import filelib
from Betsy import bie3
from Betsy import rulebase
from Betsy import read_label_file
cls_node, data_node = antecedents
M = arrayio.read(data_node.identifier)
x = read_label_file.read(cls_node.identifier)
a, training_label, second_line = x
predict_model = __import__(
'Betsy.modules.' + 'classify_with_random_forest',
globals(), locals(),
['classify_with_random_forest'], -2)
evaluate_model = __import__(
'Betsy.modules.' + 'evaluate_prediction',
globals(), locals(), ['evaluate_prediction'], -2)
full_index = range(M.ncol())
f = file(outfile, 'w')
f.write('\t'.join(['sample_name', 'Predicted_class', 'Confidence',
'Actual_class', 'Correct?']))
f.write('\n')
for i in range(M.ncol()):
# Make filenames
# gene expression for N samples.
merge_file = 'merge' + '_' + str(i)
# class label file for the training samples (samples 1-(N-1)).
train_label = 'train_label' + '_' + str(i)
# class label file for the test sample (sample N).
test_label = 'test_label' + '_' + str(i)
# Save the output of the prediction and evaluation.
predict_file = "predict.txt"
evaluate_file = "evaluate.txt"
test_index = i
train_index = full_index[:]
train_index.remove(test_index)
merge_index = train_index + [test_index]
y_training = [training_label[x] for x in train_index]
y_test = [training_label[test_index]]
# Write the files for this iteration.
M_merge = M.matrix(None, merge_index)
arrayio.gct_format.write(M_merge, open(merge_file, 'w'))
read_label_file.write(train_label, second_line, y_training)
read_label_file.write(test_label, second_line, y_test[0])
# Make objects to be used in this analysis.
x = rulebase.SignalFile.output(
format='gct', contents='class0,class1,test')
merge_data = bie3.IdentifiedDataNode(x, identifier=merge_file)
x = rulebase.ClassLabelFile.output(contents='class0,class1')
train_label_data = bie3.IdentifiedDataNode(
x, identifier=train_label)
x = rulebase.ClassLabelFile.output(contents='test')
test_label_data = bie3.IdentifiedDataNode(x, identifier=test_label)
# Make a fake object to pass to evaluate_model.run.
out_node = filelib.GenericObject()
out_node.identifier = predict_file
# Run the predictions.
x = train_label_data, merge_data
predict_model.Module().run(
network, x, out_attributes, user_options, num_cores,
predict_file)
# Run the evaluation.
new_parameters = out_attributes.copy()
x = test_label_data, out_node
evaluate_model.Module().run(
network, x, new_parameters, user_options, num_cores,
evaluate_file)
# Is this the right line?
lines = open(evaluate_file).readlines()
f.write(lines[1])
os.remove(merge_file)
os.remove(train_label)
os.remove(test_label)
os.remove(predict_file)
os.remove(evaluate_file)
f.close()
def name_outfile(self, antecedents, user_options):
from Betsy import module_utils
data_node, cls_node = antecedents
original_file = module_utils.get_inputid(data_node.identifier)
filename = 'predication_loocv_random_forest' + original_file + '.txt'
return filename
|
idiap/cbrec
|
data.py
|
Python
|
gpl-3.0
| 2,541
| 0.023219
|
# Copyright (c) 2014 Idiap Research Institute, http://www.idiap.ch/
# Written by Nikolaos Pappas <nikolaos.pappas@idiap.ch>,
#
# This file is part of CBRec.
#
#
|
CBRec is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# CBRec is distributed in the hope
|
that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CBRec. If not, see <http://www.gnu.org/licenses/>.
import sys
import json
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from utils import Unbuffered, write
class Data:
def __init__(self, items, attrs=None, preprocess=True, debug=False):
self.texts = [] # item texts
self.items = items # item hash representations
self.attrs = attrs # item attributes to keep
self.regex = '\r|\t|\n|--' # symbols to be removed
self.min_w = 2 # minimum length of each word
self.debug = debug # print status and debug messages
if attrs is None:
self.attrs = list(set(items[0].keys()) - set(['id']))
self.extract_text()
self.preprocess() if preprocess else ''
def extract_text(self):
write("\n "+"-> Extracting text".ljust(50,'.')) if self.debug else ''
for idx, item in enumerate(self.items):
attr_texts = []
for attr in self.attrs:
attr_texts.append(item[attr])
text = " ".join(attr_texts).replace(self.regex,"")
self.texts.append(text)
write("[OK]") if self.debug else ''
def preprocess(self):
write("\n "+"-> Preprocessing text".ljust(50,'.')) if self.debug else ''
stoplist = stopwords.words('english')
wregex = RegexpTokenizer(r'\w+')
for idx, item in enumerate(self.items):
words = wregex.tokenize(self.texts[idx].lower())
final_words = []
for iw, word in enumerate(words):
if word not in stoplist and len(word) > self.min_w:
final_words.append(word)
self.texts[idx] = ' '.join(final_words)
write("[OK]") if self.debug else ''
if __name__== '__main__':
items = json.loads(open('example.json').read())
data = Data(items)
example = data.items[0]
print "Total items: %d" % len(data.texts)
print "Example (id=%d, title=%s):" % (example['id'], example['title'])
print data.texts[0]
|
cbare/Etudes
|
python/quick_sort.py
|
Python
|
apache-2.0
| 572
| 0.001748
|
import random
def pivot(items, a, b):
p = items[b]
i = a
for j in range(a,b):
|
if items[j] <= p:
items[i], items[j] = items[j], items[i]
i += 1
items[i], items[b] = items[b], items[i]
return i
def quicksort(items, i, j):
"""
inplace quicksort
"""
if i < j:
p = pivot(items, i, j)
quicksort(items, i, p-1)
quicksort(items, p+1, j)
letters = random.choices('abcdefghijklmnopqrstuvwxyz', k=100)
quick
|
sort(letters, 0, len(letters)-1)
print(''.join(letter for letter in letters))
|
pgiri/asyncoro
|
py3/asyncoro/discoronode.py
|
Python
|
mit
| 55,226
| 0.00344
|
#!/usr/bin/python3
"""This file is part of asyncoro; see http://asyncoro.sourceforge.net for
details.
This program can be used to start discoro server processes so discoro scheduler
(see 'discoro.py') can send computations to these server processes for executing
distributed communicating proceses (coroutines). All coroutines in a server
execute in the same thread, so multiple CPUs are not used by one server. If CPU
intensive computations are to be run on systems with multiple processors, then
this program should be run with multiple instances (see below for '-c' option to
this program).
|
See 'discoro_client*.py' files for example use cases.
"""
__author__ = "Giridhar Pemmasani (pgiri@yahoo.com)"
__copyright
|
__ = "Copyright (c) 2014 Giridhar Pemmasani"
__license__ = "MIT"
__url__ = "http://asyncoro.sourceforge.net"
def _discoro_server_coro_proc():
# coroutine
"""Server process receives computations and runs coroutines for it.
"""
import os
import shutil
import traceback
import sys
import time
from asyncoro.discoro import MinPulseInterval, MaxPulseInterval, \
DiscoroNodeInfo, DiscoroNodeAvailInfo, Scheduler
import asyncoro.disasyncoro as asyncoro
from asyncoro.disasyncoro import Coro, SysCoro, Location
_discoro_coro = asyncoro.AsynCoro.cur_coro()
_discoro_config = yield _discoro_coro.receive()
_discoro_node_coro = asyncoro.deserialize(_discoro_config['node_coro'])
_discoro_scheduler_coro = asyncoro.deserialize(_discoro_config['scheduler_coro'])
assert isinstance(_discoro_scheduler_coro, Coro)
_discoro_computation_auth = _discoro_config.pop('computation_auth', None)
if _discoro_config['min_pulse_interval'] > 0:
MinPulseInterval = _discoro_config['min_pulse_interval']
if _discoro_config['max_pulse_interval'] > 0:
MaxPulseInterval = _discoro_config['max_pulse_interval']
_discoro_busy_time = _discoro_config.pop('busy_time')
asyncoro.MsgTimeout = _discoro_config.pop('msg_timeout')
_discoro_name = asyncoro.AsynCoro.instance().name
_discoro_dest_path = os.path.join(asyncoro.AsynCoro.instance().dest_path,
'discoroproc-%s' % _discoro_config['id'])
if os.path.isdir(_discoro_dest_path):
shutil.rmtree(_discoro_dest_path)
asyncoro.AsynCoro.instance().dest_path = _discoro_dest_path
os.chdir(_discoro_dest_path)
sys.path.insert(0, _discoro_dest_path)
for _discoro_var in _discoro_config.pop('peers', []):
Coro(asyncoro.AsynCoro.instance().peer, asyncoro.deserialize(_discoro_var))
for _discoro_var in ['clean', 'min_pulse_interval', 'max_pulse_interval']:
del _discoro_config[_discoro_var]
_discoro_coro.register('discoro_server')
asyncoro.logger.info('discoro server %s started at %s; '
'computation files will be saved in "%s"',
_discoro_config['id'], _discoro_coro.location, _discoro_dest_path)
_discoro_req = _discoro_client = _discoro_auth = _discoro_msg = None
_discoro_peer_status = _discoro_monitor_coro = _discoro_monitor_proc = _discoro_job = None
_discoro_job_coros = set()
_discoro_jobs_done = asyncoro.Event()
def _discoro_peer_status(coro=None):
coro.set_daemon()
while 1:
status = yield coro.receive()
if not isinstance(status, asyncoro.PeerStatus):
asyncoro.logger.warning('Invalid peer status %s ignored', type(status))
continue
if status.status == asyncoro.PeerStatus.Offline:
if (_discoro_scheduler_coro and
_discoro_scheduler_coro.location == status.location):
if _discoro_computation_auth:
_discoro_coro.send({'req': 'close', 'auth': _discoro_computation_auth})
def _discoro_monitor_proc(zombie_period, coro=None):
coro.set_daemon()
while 1:
msg = yield coro.receive(timeout=zombie_period)
if isinstance(msg, asyncoro.MonitorException):
asyncoro.logger.debug('coro %s done', msg.args[0])
_discoro_job_coros.discard(msg.args[0])
if not _discoro_job_coros:
_discoro_jobs_done.set()
_discoro_busy_time.value = int(time.time())
elif not msg:
if _discoro_job_coros:
_discoro_busy_time.value = int(time.time())
else:
asyncoro.logger.warning('invalid message to monitor ignored: %s', type(msg))
_discoro_var = _discoro_config['computation_location']
_discoro_var = asyncoro.Location(_discoro_var.addr, _discoro_var.port)
if (yield asyncoro.AsynCoro.instance().peer(_discoro_var)):
raise StopIteration(-1)
asyncoro.AsynCoro.instance().peer_status(SysCoro(_discoro_peer_status))
yield asyncoro.AsynCoro.instance().peer(_discoro_node_coro.location)
yield asyncoro.AsynCoro.instance().peer(_discoro_scheduler_coro.location)
_discoro_scheduler_coro.send({'status': Scheduler.ServerDiscovered,
'coro': _discoro_coro, 'name': _discoro_name,
'auth': _discoro_computation_auth})
if _discoro_config['_server_setup']:
if _discoro_config['_disable_servers']:
while 1:
_discoro_var = yield _discoro_coro.receive()
if (isinstance(_discoro_var, dict) and
_discoro_var.get('req', None) == 'enable_server' and
_discoro_var.get('auth', None) == _discoro_computation_auth):
_discoro_var = _discoro_var['setup_args']
if not isinstance(_discoro_var, tuple):
_discoro_var = tuple(_discoro_var)
break
else:
asyncoro.logger.warning('Ignoring invalid request to run server setup')
else:
_discoro_var = ()
_discoro_var = yield asyncoro.Coro(globals()[_discoro_config['_server_setup']],
*_discoro_var).finish()
if _discoro_var:
asyncoro.logger.debug('discoro server %s @ %s setup failed',
_discoro_config['id'], _discoro_coro.location)
raise StopIteration(_discoro_var)
_discoro_config['_server_setup'] = None
_discoro_scheduler_coro.send({'status': Scheduler.ServerInitialized,
'coro': _discoro_coro, 'name': _discoro_name,
'auth': _discoro_computation_auth})
_discoro_var = _discoro_config['zombie_period']
if _discoro_var:
_discoro_var /= 3
else:
_discoro_var = None
_discoro_monitor_coro = SysCoro(_discoro_monitor_proc, _discoro_var)
_discoro_node_coro.send({'req': 'server_setup', 'id': _discoro_config['id'],
'coro': _discoro_coro})
_discoro_busy_time.value = int(time.time())
asyncoro.logger.debug('discoro server "%s": Computation "%s" from %s',
_discoro_name, _discoro_computation_auth,
_discoro_scheduler_coro.location)
while 1:
_discoro_msg = yield _discoro_coro.receive()
if not isinstance(_discoro_msg, dict):
continue
_discoro_req = _discoro_msg.get('req', None)
if _discoro_req == 'run':
_discoro_client = _discoro_msg.get('client', None)
_discoro_auth = _discoro_msg.get('auth', None)
_discoro_job = _discoro_msg.get('job', None)
if (not isinstance(_discoro_client, Coro) or
_discoro_auth != _discoro_computation_auth):
asyncoro.logger.warning('invalid run: %s', type(_discoro_job))
if isinstance(_discoro_client, Coro):
_discoro_client.send(None)
continue
try:
if _discoro_job.code:
exec(_discoro_job.code, globals())
_discoro_job.args = asyncoro.deserialize(_discoro_job.args)
|
hasgeek/lastuser
|
migrations/versions/07f975f81f03_remove_team_domain.py
|
Python
|
bsd-2-clause
| 655
| 0.001527
|
# -*- coding: utf-8 -*-
"""Remove team domain
Revision ID: 07f975f81f03
Revises: 4e206c5ddabd
Create Date: 2017-08-04 15:12:11.992856
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers
|
, used by Alembic.
revision = '07f975f81f03'
down_revision = '4e206c5ddabd'
branch_labels = None
depends_on = None
def upgrade():
op.drop_index('ix_team_domain', table_name='team')
op.drop_column('team', 'domain')
def downgrade():
op.add_column(
'team',
sa.Column('domain', sa.VARCHAR(length=253), autoincrement=False, nullable=True)
|
,
)
op.create_index('ix_team_domain', 'team', ['domain'], unique=False)
|
axinging/chromium-crosswalk
|
tools/perf/measurements/skpicture_printer_unittest.py
|
Python
|
bsd-3-clause
| 1,305
| 0.004598
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import shutil
import tempfile
from telemetry import decorators
from telemetry.testing import options_for_unittests
from telemetry.testing import page_test_test_case
from measurements import skpicture_printer
class SkpicturePrinterUnitTest(page_test_test_case.PageTestTestCase):
def setUp(self):
self._options = options_for_unittests.GetCopy()
self._skp_outdir = tempfile.mkdtemp('_skp_test')
def tearDown(self):
shutil.rmtree(self._skp_
|
outdir)
@decorators.Disabled('android')
def testSkpicturePrinter(self):
|
ps = self.CreateStorySetFromFileInUnittestDataDir('blank.html')
measurement = skpicture_printer.SkpicturePrinter(self._skp_outdir)
results = self.RunMeasurement(measurement, ps, options=self._options)
# Picture printing is not supported on all platforms.
if results.failures:
assert 'not supported' in results.failures[0].exc_info[1].message
return
saved_picture_count = results.FindAllPageSpecificValuesNamed(
'saved_picture_count')
self.assertEquals(len(saved_picture_count), 1)
self.assertGreater(saved_picture_count[0].GetRepresentativeNumber(), 0)
|
janekg89/flutype_webapp
|
flutype/gal-file.py
|
Python
|
lgpl-3.0
| 5,034
| 0.0147
|
import pandas as pd
import os
# -*- coding: utf-8 -*-
from flutype.data_management.fill_master import Master
import numpy as np
def print_full(x):
pd.set_option('display.max_rows', len(x))
print(x)
pd.reset_option('display.max_rows')
def extract_peptide_batch(ma):
gal_lig_fix = ma.read_gal_ligand("170725_N13", index=False)
unique_peptides = gal_lig_fix[0].drop_duplicates(subset=["ID"])
unique_peptides = unique_peptides[unique_peptides.ID != "Empty"]
unique_peptides.ID = unique_peptides.ID.astype(int)
unique_peptides.sort_values(by = "ID", inplace=True)
unique_peptides.Name = unique_peptides.Name.str.replace('FAIL_', "")
unique_peptides['Concentration'] = unique_peptides.Name.str.rpartition('_')[0]
unique_peptides['Concentration'] = unique_peptides.Concentration.str.partition('_')[0]
peptide_batch = pd.DataFrame(unique_peptides[["Name","Concentration"]].values,columns=["sid","concentration"])
peptide_batch["labeling"] = ""
peptide_batch["buffer"] = ""
peptide_batch["ph"] = ""
peptide_batch["purity"] = ""
peptide_batch["produced_by"] = ""
peptide_batch["comment"] = ""
peptide_batch["ligand"] = ""
peptide_batch["ligand"] = unique_peptides.Name.str.partition('_')[2].values
return peptide_batch
def gal_reformat(ma):
gal_lig_fix = ma.read_gal_ligand("170725_N15", index= False)
gal_lig_fix_new = pd.DataFrame(gal_lig_fix[0][["Block","Row","Column","Name"]])
mapping = {"Empty":"NO",
"Panama":"Pan3",
"California":"Cal2",
"Aichi":"Ach1",
"1.0_Kloe_Amid":"KLOA025",
"0.5_Kloe_Amid":"KLOA050",
"0.25_Kloe_Amid":"KLOA025",
"1.0_pep_Nenad":"NEN100",
"0.5_pep_Nenad":"NEN050",
"0.25_pep_Nenad":"NEN025",
"1.0_Fetuin":"P012-1",
"0.5_Fetuin":"P012-05",
"0.25_Fetuin":"P012-025",
"1.0_Leuchtefix":"DYE100",
"0.5_Leuchtefix":"DYE050",
"0.25_Leuchtefix":"DYE025",
'FAIL_': ""
}
for key in mapping:
gal_lig_fix_new.Name = gal_lig_fix_new.Name.str.replace(key, mapping[key])
mapping = {"1.0_Kloe_S":"KLOS100",
"0.5_Kloe_S":"KLOS050",
"0.25_Kloe_S":"KLOS025"
}
for key in mapping:
gal_lig_fix_new.loc[gal_lig_fix_new["Name"].str.contains(key), "Name"] = mapping[key]
return gal_lig_fix_new
def peptide_batches_not_in_master(ma,gal_lig_fix):
s_gal = set(gal_lig_fix["Name"].values)
data_dic = ma.read_data_tables()
s_pb = set(data_dic["peptide_batch"]["sid"].values)
s_ab = set(data_dic["antibody_batch"]["sid"].values)
s_vb = set(data_dic["virus_batch"]["sid"].values)
s_b = s_pb
s_b.update(s_ab)
s_b.update(s_vb)
return(s_gal - s_b)
def reshape_gal_file(shape, gal_file):
a = []
b = []
for i in range(shape[1]):
for ii in range(shape[0]):
a.append(i )
b.append(ii )
gal_file["row_factor"] = 0
gal_file["column_factor"] = 0
print(a)
print(b)
for block_num,block_factor in enumerate(a):
gal_file.loc[gal_file["Block"] == block_num+1, "row_factor"] = block_factor
for block_num, block_factor in enumerate(b):
gal_file.loc[gal_file["Block"] == block_num+1, "column_factor"] = block_factor
gal_file["Row"]=gal_file["Row"]+(gal_file["Row"].max()*gal_file["row_factor"])
gal_file["Column"]=gal_file["Column"]+(gal_file["Column"].max()*gal_file["column_factor"])
return gal_file
def three_viruses_gal(gal_file):
virus_map = {}
for i in range(1,33):
if i <= 12:
virus_map[i] = "Ach1"
elif 12 < i <= 24:
virus_map[i] = "Cal2"
elif 24 < i:
virus_map[i] = "Pan3"
for key in virus_map.keys():
gal_file.loc[gal_file["Block"]== key , "Name"] =virus_map[key]
return gal_file
####################################################################
if __name__ == "__main__":
ma_path = "../master_uncomplete/"
ma = Master(ma_path)
#peptide_batch = extract_peptide_batch(ma)
|
# print_full(peptide_batch)
#fp = os.path.join(ma.collections_path,"170725_N13","peptides_batch.csv")
# peptide_batch.to_csv(fp)
ma_path_standard = "../master/"
ma_standard = Master(ma_path_standard)
gal_lig_fix = gal_reformat(ma)
#subset = peptide_batches_not_in_master(ma_standard,gal_lig_fix)
gal_lig_fix= reshape_gal_file((4,8), ga
|
l_lig_fix)
gal_lig_fix = gal_lig_fix.reset_index(drop=True)
fp = os.path.join(ma.collections_path,"170725_P7","lig_fix_012.txt")
gal_lig_fix.to_csv(fp, sep='\t',index=True , index_label="ID")
#gal_lig_fix = three_viruses_gal(gal_lig_fix)
gal_lig_fix["Name"] = "Ach1"
fp2 = os.path.join(ma.collections_path,"170725_P7","lig_mob_016.txt")
gal_lig_fix.to_csv(fp2, sep='\t', index=True,index_label="ID")
|
LamCiuLoeng/vat
|
vatsystem/util/common.py
|
Python
|
mit
| 7,482
| 0.014969
|
# -*- coding: utf-8 -*-
import datetime
from datetime import date, datetime as dt
from email import Encoders
from email.header import Header
import os
import pickle
import random
import smtplib
import traceback
import calendar
from decimal import Decimal
from email.MIMEBase import MIMEBase
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE
from email.Utils import formatdate
from tg import flash
from tg import redirect
from tg import response
from tg import session
from tg import config
from vatsystem.model import *
from vatsystem.util import const
from sqlalchemy import *
from sqlalchemy.sql import and_
DISPLAY_DATE_FORMAT="%Y-%m-%d"
__all__=['getCompanyCode', "tabFocus", "Date2Text", "getOr404",
"sendEmail", "number2alphabet", "serveFile", 'alphabet2number',
'gerRandomStr', 'allAlpha', 'comp', '_get_params_from_args_and_obj', '_orderly_dict', '_get_lastday_of_month', 'CRef']
def comp(obj,compObj):
if type(obj) == type(1):compObj = int(compObj)
if type(obj) == type(Decimal("1.00")):
obj = float(str(obj))
if compObj:
compObj = float(str(compObj))
if type(obj) == type(u'') or type(compObj) == type(u''):
obj = obj.encode("utf-8").replace("\r\n",'') if obj else None
if compObj:
compObj = compObj.encode("utf-8")
return [True,str(obj),str(compObj)] if not obj==compObj else [False]
def getCompanyCode(type = None):
company_code = [session['company_code']]
if type == 1: company_code = "('%s')" % session['company_code']
return company_code
def tabFocus(tab_type=""):
def decorator(fun):
def returnFun(*args, ** keywordArgs):
returnVal=fun(*args, ** keywordArgs)
if type(returnVal)==dict and "tab_focus" not in returnVal:
returnVal["tab_focus"]=tab_type
return returnVal
return returnFun
return decorator
def Date2Text(value=None, dateTimeFormat=DISPLAY_DATE_FORMAT, defaultNow=False):
if not value and defaultNow: value=datetime.now()
format=dateTimeFormat
result=value
if isinstance(value, date):
try:
result=value.strftime(format)
except:
traceback.print_exc()
elif hasattr(value, "strftime"):
try:
result=value.strftime(format)
except:
traceback.print_exc()
if not result:
result=""
return result
def getOr404(obj, id, redirect_url="/index", message="The record deosn't exist!"):
try:
v=DBSession.query(obj).get(id)
if v: return v
else: raise "No such obj"
except:
traceback.print_exc()
flash(message)
redirect(redirect_url)
def number2alphabet(n):
result=[]
while n>=0:
if n>26:
result.insert(0, n%26)
n/=26
else:
result.insert(0, n)
break
return "".join([chr(r+64) for r in result]) if result else None
def alphabet2number(str):
if not str or not isinstance(str, basestring): raise TypeError
if not str.isalpha(): raise ValueError
return reduce(lambda a, b: (a*26)+ord(b)-ord("a")+1, str.lower(), 0)
def sendEmail(send_from, send_to, subject, text, cc_to=[], files=[], server="192.168.42.13"):
assert type(send_to)==list
assert type(files)==list
msg=MIMEMultipart()
msg.set_charset("utf-8")
msg['From']=send_from
msg['To']=COMMASPACE.join(send_to)
if cc_to:
assert type(cc_to)==list
msg['cc']=COMMASPACE.join(cc_to)
send_to.extend(cc_to)
msg['Date']=formatdate(localtime=True)
msg['Subject']=subject
msg.attach(MIMEText(text))
for f in files:
part=MIMEBase('application', "octet-stream")
part.set_payload(open(f, "rb").read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"'%Header(os.path.basename(f), 'utf-8'))
msg.attach(part)
smtp=smtplib.SMTP(server)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.close()
def serveFile(fileName, contentType="application/x-download", contentDisposition="attachment", charset="utf-8"):
response.headers['Content-type']='application/x-download' if not contentType else contentType
#response.headers['Content-Disposition']="%s;filename=%s"%(contentDisposition, Header(os.path.basename(fileName), charset))
response.headers['Content-Disposition']="%s;filename=%s"%(contentDisposition, os.path.basename(fileName).encode('utf-8'))
f=open(fileName, 'rb')
content="".joi
|
n(f.readlines())
f.close()
return content
def defaultIfNone(blackList=[None, ], default=""):
def returnFun(value):
defaultValue=default() if callable(default) else default
if value in blackList:
return defaultValue
else:
try:
return str(v
|
alue)
except:
try:
return repr(value)
except:
pass
return defaultValue
return returnFun
def _get_params_from_args_and_obj(keys, obj, ** args):
params = {}
for i in keys:
if type(i) == dict:
params.update(i)
else:
i, j = i if ((type(i) == list or type(i) == tuple) and len(i) == 2) else (i, i)
if args.get(j) != None:
params[i] = args.get(j)
elif obj.__dict__.get(j) != None:
params[i] = obj.__dict__[j]
return params
def _orderly_dict(list, coor):
new_list = {}
for i in list:
if new_list.get(i.get(coor)):
new_list[i.get(coor)].append(i)
else:
new_list.update({i.get(coor):[i]})
new_dict = []
for key, value in new_list.iteritems():
new_dict.extend(value)
return new_dict
def _get_lastday_of_month(date_str):
date_str = date_str.split(".")
last_day = calendar.monthrange(int(date_str[0]), int(date_str[1]))[1]
return datetime.datetime.strptime("%s.%s.%s" % (date_str[0], date_str[1], last_day if len(date_str) < 3 else date_str[2]), "%Y.%m.%d")
class CRef(object):
def __init__(self):
self.file = os.path.join(os.path.abspath(os.path.curdir), 'data', "ref.pickle")
def save(self, **kwargs):
pickle.dump(kwargs, open(self.file, "w"))
def get(self, head_type):
refTime = dt.now().strftime('%Y%m')[2:]
if os.path.isfile(self.file):
obj = pickle.load(open(self.file, 'r'))
r = obj.get(head_type, 0)
if r and r != 0 and str(r)[:4] != refTime:
r = 0
else:
r = int(r[4:]) if isinstance(r, str) else 0
r = "%s%06d" % (refTime, r + 1)
obj.update({head_type:r})
self.save(**obj)
return r
else:
r = "%s%06d" % (refTime, 1)
self.save(**{head_type:r})
return r
null2blank=defaultIfNone(blackList=[None, "NULL", "null", "None"])
numberAlpha=[str(a) for a in range(10)]
lowerAlpha=[chr(a) for a in range(ord("a"), ord("z")+1)]
upperAlpha=[chr(a) for a in range(ord("A"), ord("Z")+1)]
allAlpha=numberAlpha+lowerAlpha+upperAlpha
gerRandomStr=lambda str_length, randomRange=numberAlpha : "".join(random.sample(randomRange, str_length))
|
Valloric/YouCompleteMe
|
python/ycm/tests/mock_utils.py
|
Python
|
gpl-3.0
| 4,496
| 0.013568
|
# Copyright (C) 2017 YouCompleteMe contributors
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from unittest import mock
import requests
class FakeResponse:
"""A fake version of a requests response object, just about suitable for
mocking a server response. Not usually used directly. See
MockServerResponse* methods"""
def __init__( self, response, exception ):
self._json = response
self._exception = exception
self.status_code = requests.codes.ok
self.text = not exception
def json( self ):
if self._exception:
return None
return self._json
def raise_for_status( self ):
if self._exception:
raise self._exception
class FakeFut
|
ure:
"""A fake version of a future response object, just about suitable for
mocking a server response as generated by PostDataToHandlerAsync.
Not usually used directly. See MockAsyncServerResponse* methods"""
def __init__( self, done, response = None, exception = None ):
self._done = done
if not done:
self._result = None
else:
self._result = FakeResponse( response, exception )
def done( self ):
return self
|
._done
def result( self ):
return self._result
def MockAsyncServerResponseDone( response ):
"""Return a MessagePoll containing a fake future object that is complete with
the supplied response message. Suitable for mocking a response future within
a client request. For example:
with MockVimBuffers( [ current_buffer ], [ current_buffer ], ( 1, 1 ) ) as v:
mock_response = MockAsyncServerResponseDone( response )
with patch.dict( ycm._message_poll_requests, {} ):
ycm._message_poll_requests[ filetype ] = MessagesPoll( v.current.buffer )
ycm._message_poll_requests[ filetype ]._response_future = mock_response
# Needed to keep a reference to the mocked dictionary
mock_future = ycm._message_poll_requests[ filetype ]._response_future
ycm.OnPeriodicTick() # Uses ycm._message_poll_requests[ filetype ] ...
"""
return mock.MagicMock( wraps = FakeFuture( True, response ) )
def MockAsyncServerResponseInProgress():
"""Return a fake future object that is incomplete. Suitable for mocking a
response future within a client request. For example:
with MockVimBuffers( [ current_buffer ], [ current_buffer ], ( 1, 1 ) ) as v:
mock_response = MockAsyncServerResponseInProgress()
with patch.dict( ycm._message_poll_requests, {} ):
ycm._message_poll_requests[ filetype ] = MessagesPoll( v.current.buffer )
ycm._message_poll_requests[ filetype ]._response_future = mock_response
# Needed to keep a reference to the mocked dictionary
mock_future = ycm._message_poll_requests[ filetype ]._response_future
ycm.OnPeriodicTick() # Uses ycm._message_poll_requests[ filetype ] ...
"""
return mock.MagicMock( wraps = FakeFuture( False ) )
def MockAsyncServerResponseException( exception ):
"""Return a fake future object that is complete, but raises an exception.
Suitable for mocking a response future within a client request. For example:
with MockVimBuffers( [ current_buffer ], [ current_buffer ], ( 1, 1 ) ) as v:
mock_response = MockAsyncServerResponseException( exception )
with patch.dict( ycm._message_poll_requests, {} ):
ycm._message_poll_requests[ filetype ] = MessagesPoll( v.current.buffer )
ycm._message_poll_requests[ filetype ]._response_future = mock_response
# Needed to keep a reference to the mocked dictionary
mock_future = ycm._message_poll_requests[ filetype ]._response_future
ycm.OnPeriodicTick() # Uses ycm._message_poll_requests[ filetype ] ...
"""
return mock.MagicMock( wraps = FakeFuture( True, None, exception ) )
# TODO: In future, implement MockServerResponse and MockServerResponseException
# for synchronous cases when such test cases are needed.
|
callowayproject/django-tweeter
|
django_oauth_twitter/__init__.py
|
Python
|
mit
| 149
| 0
|
ACCESS_KEY = 'tw
|
itter
|
_access_token'
REQUEST_KEY = 'twitter_request_token'
SUCCESS_URL_KEY = 'twitter_success_url'
USERINFO_KEY = 'twitter_user_info'
|
z0rr0/eshop
|
shop/sales/migrations/0001_initial.py
|
Python
|
mit
| 4,179
| 0.004786
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-13 15:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='category name', max_length=200, unique=True, verbose_name='name')),
('desc', models.TextField(help_text='category description', verbose_name='description')),
('modified', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Category',
'ordering': ['name'],
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('desc', models.TextField(blank=True, help_text="order's description", verbose_name='description')),
('status', models.PositiveIntegerField(choices=[(0, 'preparation'), (1, 'sent'), (2, 'received')], default=0, help_text="order's status", verbose_name='status')),
('modified', models.DateTimeField(auto_now=True, db_index=True)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
|
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Customer', verbose_name='customer')),
],
options={
'verbose_name': 'Order',
'ordering':
|
['-modified', '-created'],
'verbose_name_plural': 'Orders',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, help_text='product name', max_length=200, verbose_name='name')),
('price', models.FloatField(db_index=True, help_text="product's price", verbose_name='price')),
('image', models.ImageField(help_text="product's image", upload_to='images/', verbose_name='image')),
('desc', models.TextField(help_text="product's description", verbose_name='description')),
('modified', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('category', models.ForeignKey(help_text="product's category", on_delete=django.db.models.deletion.CASCADE, to='sales.Category', verbose_name='category')),
],
options={
'verbose_name': 'Product',
'ordering': ['name'],
'verbose_name_plural': 'Products',
},
),
migrations.CreateModel(
name='ProductSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.PositiveIntegerField(default=1, verbose_name='number')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales.Order', verbose_name='Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales.Product', verbose_name='Product')),
],
options={
'verbose_name': 'ProductSet',
'ordering': ['id'],
'verbose_name_plural': 'ProductSets',
},
),
migrations.AddField(
model_name='order',
name='product',
field=models.ManyToManyField(blank=True, through='sales.ProductSet', to='sales.Product', verbose_name='Product'),
),
]
|
georgemarshall/django
|
django/db/backends/mysql/compiler.py
|
Python
|
bsd-3-clause
| 1,599
| 0.001251
|
from django.db.models.sql import compiler
class SQLCompiler(compiler.SQLCompiler):
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
sql, params = self.as_sql()
return '(%s) IN (%s)' % (', '.join('%s.%s' % (qn(alias), qn2(column)) for column in columns), sql), params
class SQLInsertCompiler(compiler.SQLInsertCompiler,
|
SQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDel
|
eteCompiler, SQLCompiler):
def as_sql(self):
if self.connection.features.update_can_self_select or self.single_alias:
return super().as_sql()
# MySQL and MariaDB < 10.3.2 doesn't support deletion with a subquery
# which is what the default implementation of SQLDeleteCompiler uses
# when multiple tables are involved. Use the MySQL/MariaDB specific
# DELETE table FROM table syntax instead to avoid performing the
# operation in two queries.
result = [
'DELETE %s FROM' % self.quote_name_unless_alias(
self.query.get_initial_alias()
)
]
from_sql, from_params = self.get_from_clause()
result.extend(from_sql)
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(from_params) + tuple(params)
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
|
catapult-project/catapult
|
tracing/tracing_build/vulcanize_histograms_viewer.py
|
Python
|
bsd-3-clause
| 912
| 0.006579
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import tracing_project
from py_vulcanize import generate
from tracing_build import render_histograms_viewer
def VulcanizeHistogramsViewer():
"""Vulcanizes Histograms viewer with its dependencies.
Args:
path: destination to write the vulcanized viewer HTML.
"""
vulcanizer = tracing_project.TracingProject().CreateVulcanizer()
load_sequence = vulcanizer.CalcLoadSequenceForModuleNames(
['tracing_bui
|
ld.histograms_viewer'])
return generate.GenerateStandaloneHTMLAsString(load_sequence)
def VulcanizeAndRenderHistogramsViewer(
histogram_dicts, output_stream, reset_results=False):
render_histograms_viewer.RenderHistogramsViewer(
histogram_dicts, output_stream, reset_
|
results,
VulcanizeHistogramsViewer())
|
rockwolf/python
|
cece/app/views.py
|
Python
|
bsd-3-clause
| 4,840
| 0.022727
|
"""
See LICENSE file for copyright and license details.
"""
from app import app
from flask import render_template, flash, redirect
#from app.forms import LoginForm
from app.modules.constant import *
@app.route("/")
@app.route("/index")
@app.route("/index/")
@app.route("/<app_profile>/index")
@app.route("/<app_profile>/index/")
@app.route("/<app_profile>")
@app.route("/<app_profile>/")
def index(app_profile = AppProfile.PERSONAL):
"""
Index page
"""
user = { 'login': 'rockwolf' } # fake user
if app_profile == '':
app_profile = 'personal'
return render_template("index.html",
title = 'Central command entity',
user = user,
app_profile = app_profile.lower())
@app.route("/report_finance")
@app.route("/report_finance/")
@app.route("/<app_profile>/report_finance")
@app.route("/<app_profile>/report_finance/")
def report_finance(app_profile = AppProfile.PERSONAL):
"""
Financial reports.
"""
# Make reports per year in pdf (gnucash) and put links to them here.
return('TBD');
@app.route("/trading_journal")
@app.route("/trading_journal/")
@app.route("/<app_profile>/trading_journal")
@app.route("/<app_profile>/trading_journal/")
def trading_journal(app_profile = AppProfile.PERSONAL):
"""
Trading Journal
"""
if app_profile == AppProfile.ZIVLE:
r
|
eturn render_template("trading_journal.html",
title = 'Trading Journal',
user = user,
app_profile = app_profile.lower())
else:
return render_template("404.html",
title = '404')
@app.route("/contact")
@app.route("/contact/")
@app.route("/<app_profile>/contact")
@app.route("/<app_profile>/contact/")
def contac
|
t(app_profile = AppProfile.PERSONAL):
"""
Address book.
"""
# Try to sync this with abook? Can abook export them?
return('TBD');
@app.route("/task")
@app.route("/task/")
@app.route("/<app_profile>/task")
@app.route("/<app_profile>/task/")
def task(app_profile = AppProfile.PERSONAL):
"""
Task and schedule information.
"""
# TODO: generate output of reminders and put it in a new text-file,
# e.g. remind ~/.reminders -c etc.
# TODO: where to schedule the reminders.txt generation?
if app_profile == AppProfile.ZIVLE:
task_file = TaskFile.ZIVLE
reminder_file = ReminderFile.ZIVLE
elif app_profile == AppProfile.PERSONAL:
task_file = TaskFile.PERSONAL
reminder_file = ReminderFile.PERSONAL
else:
error = true
if not error:
return render_template("task.html",
title = 'Tasks',
user = user,
app_profile = app_profile.lower(),
tasks = load_lines(task_file),
reminders = load_lines(reminder_file)
)
else:
return render_template("404.html",
title = '404')
@app.route('/login', methods = ['GET', 'POST'])
@app.route('/login/', methods = ['GET', 'POST'])
def login():
form = LoginForm()
return render_template('login.html',
title = 'Sign In',
form = form)
@app.route("/links")
@app.route("/links/")
@app.route("/<app_profile>/links")
@app.route("/<app_profile>/links/")
def links(app_profile = AppProfile.PERSONAL):
"""
Link bookmarks.
"""
user = { 'login': 'rockwolf' } # fake user
# Try to read from text-files and build links dynamically
# Format: data/<profile>/links.txt
# Textfile format: <url>;<name>;<description>
#TODO: put links_file in constant.py
#or find a more general way to configure files?
#links_file = 'C:\\Users\\AN\\home\\other\\Dropbox\\cece\\app\\data\\' + app_profile + '\\links.txt'
links_file = '/home/rockwolf/Dropbox/cece/app/data/' + app_profile + '/links.txt'
links_full = load_lines(links_file)
links = []
for link_full in links_full:
links.append(link_full.split(';'))
links.sort(key=lambda k: k[1])
categories = []
for link in links:
if link[1] not in categories:
categories.append(link[1])
return render_template("links.html",
title = 'Bookmarks',
user = user,
app_profile = app_profile.lower(),
categories = categories,
total = len(links),
links = links
)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html',
title = '404'), 404
def load_lines(text_file):
"""
Reads the text file and returns a list of lines.
"""
lines = []
with open(text_file, encoding='utf-8') as text:
for line in text:
lines.append(line.strip())
return lines
|
hip-odoo/odoo
|
addons/l10n_nl/__init__.py
|
Python
|
agpl-3.0
| 158
| 0
|
#
|
-*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (C) 2016 Onestein (<htt
|
p://www.onestein.eu>).
|
rvswift/BlastNFilter
|
build/lib/BlastNFilter/Utilities/Run.py
|
Python
|
bsd-3-clause
| 827
| 0.001209
|
__author__ = 'robswift'
__project__ = 'blastnfilter'
import os
from BlastNFilter.PreRelease import ParsePreRelease
from BlastNF
|
ilter.Blast import ParseAlignment
import OutPut
def run(options):
non_polymer = options.non_polymer
polymer = options.polymer
out = options.o
|
ut
blast_dir = os.path.abspath(options.blast_db)
pdb_db = os.path.join(blast_dir, 'pdb_db')
fasta = os.path.join(blast_dir, 'pdb_seqres.txt')
target_list = ParsePreRelease.add_ligands(non_polymer)
target_list = ParsePreRelease.add_sequences(polymer, target_list)
#new = [x for x in target_list if x.get_pdb_id().lower() == '2n02']
target_list = ParseAlignment.blast_the_targets(target_list, pdb_db, fasta)
target_list = ParseAlignment.remove_multiple_dockers(target_list)
OutPut.write_csv(target_list, out)
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractBijinsans.py
|
Python
|
bsd-3-clause
| 387
| 0.023256
|
def extractB
|
ijinsans(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'Benkyou no Kamisama wa Hitomishiri'
|
in item['tags']:
return buildReleaseMessageWithType(item, 'Benkyou no Kamisama wa Hitomishiri', vol, chp, frag=frag, postfix=postfix)
return False
|
arcticshores/kivy
|
kivy/__init__.py
|
Python
|
mit
| 14,620
| 0.000547
|
'''
Kivy framework
==============
Kivy is an open source library for developing multi-touch applications. It is
completely cross-platform (Linux/OSX/Win) and released under the terms of the
MIT License.
It comes with native support for many multi-touch input devices, a growing
library of multi-touch aware widgets and hardware accelerated OpenGL drawing.
Kivy is designed to let you focus on building custom and highly interactive
applications as quickly and easily as possible.
With Kivy, you can take full advantage of the dynamic nature of Python. There
are thousands of high-quality, free libraries that can be integrated in your
application. At the same time, performance-critical parts are implemented
in the C language.
See http://kivy.org for more information.
'''
__all__ = (
'require',
'kivy_configure', 'kivy_register_post_configuration',
'kivy_options', 'kivy_base_dir',
'kivy_modules_dir', 'kivy_data_dir', 'kivy_shader_dir',
'kivy_icons_dir', 'kivy_home_dir', 'kivy_userexts_dir',
'kivy_config_fn', 'kivy_usermodules_dir',
)
__version__ = '1.9.1-dev'
import sys
import shutil
from getopt import getopt, GetoptError
from os import environ, mkdir, pathsep
from os.path import dirname, join, basename, exists, expanduser, isdir
from kivy.logger import Logger, LOG_LEVELS
from kivy.utils import platform
# internals for post-configuration
__kivy_post_configuration = []
if platform == 'macosx' and sys.maxsize < 9223372036854775807:
r = '''Unsupported Python version detected!:
Kivy requires a 64 bit version of Python to run on OS X. We strongly
advise you to use the version of Python that is provided by Apple
(don't use ports, fink or homebrew unless you know what you're
doing).
See http://kivy.org/docs/installation/installation-macosx.html for
details.
'''
Logger.critical(r)
def require(version):
'''Require can be used to check the minimum version required to run a Kivy
application. For example, you can start your application code like this::
import kivy
kivy.require('1.0.1')
If a user attempts to run your application with a version of Kivy that is
older than the specified version, an Exception is raised.
The Kivy version string is built like this::
X.Y.Z[-tag[-tagrevision]]
X is the major version
Y is the minor version
Z is the bugfixes revision
The tag is optional, but may be one of 'dev', 'alpha', or 'beta'.
The tagrevision is the revision of the tag.
.. warning::
You must not ask for a version with a tag, except -dev. Asking for a
'dev' version will just warn the user if the current Kivy
version is not a -dev, but it will never raise an exception.
You must not ask for a version with a tagrevision.
'''
def parse_version(version):
# check for tag
tag = None
tagrev = None
if '-' in version:
l = version.split('-')
if len(l) == 2:
version, tag = l
elif len(l) == 3:
version, tag, tagrev = l
else:
raise Exception('Revision format must be X.Y.Z[-tag]')
# check x y z
l = version.split('.')
if len(l) != 3:
raise Exception('Revision format must be X.Y.Z[-tag]')
return [int(x) for x in l], tag, tagrev
# user version
revision, tag, tagrev = parse_version(version)
# current version
sysrevision, systag, systagrev = parse_version(__version__)
# ensure that the required version don't contain tag, except dev
if tag not in (None, 'dev'):
raise Exception('Revision format must not have any tag except "dev"')
if tag == 'dev' and systag != 'dev':
Logger.warning('Application requested a -dev version of Kivy. '
'(You have %s, but the application requires %s)' % (
__version__, version))
# not tag rev (-alpha-1, -beta-x) allowed.
if tagrev is not None:
raise Exception('Revision format must not contain any tagrevision')
# finally, checking revision
if sysrevision < revision:
raise Exception('The version of Kivy installed on this system '
'is too old. '
'(You have %s, but the application requires %s)' % (
__version__, version))
def kivy_configure():
'''Call post-configuration of Kivy.
This function must be called if you create the window yourself.
'''
for callback in __kivy_post_configuration:
callback()
def kivy_register_post_configuration(callback):
'''Register a function to be called when kivy_configure() is called.
.. warning::
Internal use only.
'''
__kivy_post_configuration.append(callback)
def kivy_usage():
'''Kivy Usage: %s [OPTION...]::
-h, --help
Prints this help message.
-d, --debug
Shows debug log.
-a, --auto-fullscreen
Force 'auto' fullscreen mode (no resolution change).
Uses your display's resolution. This is most likely what you want.
-c, --config section:key[:value]
Set a custom [section] key=value in the configuration object.
-f, --fullscreen
Force running in fullscreen mode.
-k, --fake-fullscreen
Force 'fake' fullscreen mode (no window border/decoration).
Uses the resolution specified by width and height in your config.
-w, --windowed
Force running in a window.
-p, --provider id:provider[,options]
Add an input provider (eg: ccvtable1:tuio,192.168.0.1:3333).
-m mod, --module=mod
Activate a module (use "list" to get a list of available modules).
-r, --rotation
Rotate the window's contents (0, 90, 180, 270).
-s, --save
Save current Kivy configuration.
--size=640x480
Size of window geometry.
--dpi=96
Manually overload the Window DPI (for testing only.)
'''
print(kivy_usage.__doc__ % (basename(sys.argv[0])))
#: Global settings options for kivy
kivy_options = {
'window': ('egl_rpi', 'sdl2', 'pygame', 'sdl', 'x11'),
'text': ('pil', 'sdl2', 'pygame', 'sdlttf'),
'video': (
'gstplayer', 'ffmpeg', 'ffpyplayer', 'gi', 'pygst', 'pyglet',
'null'),
'audio': (
'gstplayer', 'pygame', 'gi', 'pygst', 'ffpyplayer', 'sdl2',
'avplayer'),
'image': ('tex', 'imageio', 'dds', 'gif', 'sdl2', 'pygame', 'pil', 'ffpy'),
'camera': ('opencv', 'gi', 'pygst', 'videocapture', 'avfoundation', 'android'),
'spelling': ('enchant', 'osxappkit', ),
'clipboard': (
'android', 'winctypes', 'xsel', 'xclip', 'dbusklipper', 'nspaste',
'sdl2', 'pygame', 'dummy', 'gtk3', )}
# Read environment
for option in kivy_options:
key = 'KIVY_%s' % option.upper()
if key in environ:
try:
if type(kivy_options[option]) in (list, tuple):
|
kivy_options[option] = environ[key].split(',')
else:
kivy_options[option] = environ[key].lower() in \
(
|
'true', '1', 'yes', 'yup')
except Exception:
Logger.warning('Core: Wrong value for %s environment key' % key)
Logger.exception('')
# Extract all needed path in kivy
#: Kivy directory
kivy_base_dir = dirname(sys.modules[__name__].__file__)
#: Kivy modules directory
kivy_modules_dir = environ.get('KIVY_MODULES_DIR',
join(kivy_base_dir, 'modules'))
#: Kivy extension directory
kivy_exts_dir = environ.get('KIVY_EXTS_DIR',
join(kivy_base_dir, 'extensions'))
#: Kivy data directory
kivy_data_dir = environ.get('KIVY_DATA_DIR',
join(kivy_base_dir, 'data'))
#: Kivy binary deps directory
kivy_binary_deps_dir = environ.get('KIVY_BINARY_DEPS',
join(kivy_base_dir, 'binary_deps'))
#: Kivy glsl shader directory
kivy_shader_dir = join(kivy_data_dir, 'glsl')
|
derekjchow/models
|
research/object_detection/utils/config_util.py
|
Python
|
apache-2.0
| 36,470
| 0.006581
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for reading and updating configuration files."""
import os
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from object_detection.protos import eval_pb2
from object_detection.protos import graph_rewriter_pb2
from object_detection.protos import input_reader_pb2
from object_detection.protos import model_pb2
from object_detection.protos import pipeline_pb2
from object_detection.protos import train_pb2
def get_image_resizer_config(model_config):
"""Returns the image resizer config from a model config.
Args:
model_config: A model_pb2.DetectionModel.
Returns:
An image_resizer_pb2.ImageResizer.
Raises:
ValueError: If the model type is not recognized.
"""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
return model_config.faster_rcnn.image_resizer
if meta_architecture == "ssd":
return
|
model_config.ssd.image_resizer
raise ValueError("Unknown model type: {}".format(meta_architecture))
def get_spatial_image_size(image_resizer_config):
"""Returns expected spatial size of the output image from a given config.
Args:
image_resizer_config: An image_resizer_pb2.ImageResizer.
Returns:
A list
|
of two integers of the form [height, width]. `height` and `width` are
set -1 if they cannot be determined during graph construction.
Raises:
ValueError: If the model type is not recognized.
"""
if image_resizer_config.HasField("fixed_shape_resizer"):
return [
image_resizer_config.fixed_shape_resizer.height,
image_resizer_config.fixed_shape_resizer.width
]
if image_resizer_config.HasField("keep_aspect_ratio_resizer"):
if image_resizer_config.keep_aspect_ratio_resizer.pad_to_max_dimension:
return [image_resizer_config.keep_aspect_ratio_resizer.max_dimension] * 2
else:
return [-1, -1]
if image_resizer_config.HasField("identity_resizer"):
return [-1, -1]
raise ValueError("Unknown image resizer type.")
def get_configs_from_pipeline_file(pipeline_config_path, config_override=None):
"""Reads config from a file containing pipeline_pb2.TrainEvalPipelineConfig.
Args:
pipeline_config_path: Path to pipeline_pb2.TrainEvalPipelineConfig text
proto.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override pipeline_config_path.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_config`. Value are the
corresponding config objects.
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(pipeline_config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, pipeline_config)
if config_override:
text_format.Merge(config_override, pipeline_config)
return create_configs_from_pipeline_proto(pipeline_config)
def create_configs_from_pipeline_proto(pipeline_config):
"""Creates a configs dictionary from pipeline_pb2.TrainEvalPipelineConfig.
Args:
pipeline_config: pipeline_pb2.TrainEvalPipelineConfig proto object.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_configs`. Value are
the corresponding config objects or list of config objects (only for
eval_input_configs).
"""
configs = {}
configs["model"] = pipeline_config.model
configs["train_config"] = pipeline_config.train_config
configs["train_input_config"] = pipeline_config.train_input_reader
configs["eval_config"] = pipeline_config.eval_config
configs["eval_input_configs"] = pipeline_config.eval_input_reader
# Keeps eval_input_config only for backwards compatibility. All clients should
# read eval_input_configs instead.
if configs["eval_input_configs"]:
configs["eval_input_config"] = configs["eval_input_configs"][0]
if pipeline_config.HasField("graph_rewriter"):
configs["graph_rewriter_config"] = pipeline_config.graph_rewriter
return configs
def get_graph_rewriter_config_from_file(graph_rewriter_config_file):
"""Parses config for graph rewriter.
Args:
graph_rewriter_config_file: file path to the graph rewriter config.
Returns:
graph_rewriter_pb2.GraphRewriter proto
"""
graph_rewriter_config = graph_rewriter_pb2.GraphRewriter()
with tf.gfile.GFile(graph_rewriter_config_file, "r") as f:
text_format.Merge(f.read(), graph_rewriter_config)
return graph_rewriter_config
def create_pipeline_proto_from_configs(configs):
"""Creates a pipeline_pb2.TrainEvalPipelineConfig from configs dictionary.
This function performs the inverse operation of
create_configs_from_pipeline_proto().
Args:
configs: Dictionary of configs. See get_configs_from_pipeline_file().
Returns:
A fully populated pipeline_pb2.TrainEvalPipelineConfig.
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.CopyFrom(configs["model"])
pipeline_config.train_config.CopyFrom(configs["train_config"])
pipeline_config.train_input_reader.CopyFrom(configs["train_input_config"])
pipeline_config.eval_config.CopyFrom(configs["eval_config"])
pipeline_config.eval_input_reader.extend(configs["eval_input_configs"])
if "graph_rewriter_config" in configs:
pipeline_config.graph_rewriter.CopyFrom(configs["graph_rewriter_config"])
return pipeline_config
def save_pipeline_config(pipeline_config, directory):
"""Saves a pipeline config text file to disk.
Args:
pipeline_config: A pipeline_pb2.TrainEvalPipelineConfig.
directory: The model directory into which the pipeline config file will be
saved.
"""
if not file_io.file_exists(directory):
file_io.recursive_create_dir(directory)
pipeline_config_path = os.path.join(directory, "pipeline.config")
config_text = text_format.MessageToString(pipeline_config)
with tf.gfile.Open(pipeline_config_path, "wb") as f:
tf.logging.info("Writing pipeline config file to %s",
pipeline_config_path)
f.write(config_text)
def get_configs_from_multiple_files(model_config_path="",
train_config_path="",
train_input_config_path="",
eval_config_path="",
eval_input_config_path="",
graph_rewriter_config_path=""):
"""Reads training configuration from multiple config files.
Args:
model_config_path: Path to model_pb2.DetectionModel.
train_config_path: Path to train_pb2.TrainConfig.
train_input_config_path: Path to input_reader_pb2.InputReader.
eval_config_path: Path to eval_pb2.EvalConfig.
eval_input_config_path: Path to input_reader_pb2.InputReader.
graph_rewriter_config_path: Path to graph_rewriter_pb2.GraphRewriter.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_config`. Key/Values are
returned only for valid (non-empty) strings.
"""
configs = {}
if model_config_path:
model_config = model_pb2.DetectionModel()
with tf.gfile.GFile(model_config_path, "r") as f:
text_format.Merge(f.read(), model_config)
configs["model"] = model_config
if train_config_path:
train_config = train_pb2.TrainConfig()
with tf.gfi
|
steve-ord/daliuge
|
daliuge-engine/dlg/manager/__init__.py
|
Python
|
lgpl-2.1
| 1,112
| 0
|
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2015
# Copyright by UWA (in the framework of the ICRAR)
# All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your o
|
ption) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warrant
|
y of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
"""
This package contains all python modules implementing the DROP
Manager concepts, including their external interface, a web UI and a client
"""
|
sagittarian/personal-sort
|
sort.py
|
Python
|
mit
| 4,539
| 0.003745
|
#!/usr/bin/env python3
'''A simple implementation of a sorting algorithm, meant to allow
people to manually rank a list of items using whatever subjective or
objective criteria they want.
This program can be called as a script and used interactively. You
can provide the list of things to sort as command line arguments, or
if there are no arguments provided, you can provide the list in stdin,
one item per line.
Example run:
$ ./sort.py 'ice cream' falafel hamburgers pizza
Which is greater, falafel or ice cream (<, =, or >)? <
Which is greater, hamburgers or ice cream (<, =, or >)? <
Which is greater, hamburgers or falafel (<, =, or >)? >
Which is greater, pizza or hamburgers (<, =, or >)? >
Which is greater, pizza or ice cream (<, =, or >)? <
* ice cream
* pizza
* hamburgers
* falafel
Author: Adam Mesha <adam@mesha.org>
License: MIT
'''
from functools import cmp_to_key
class memoize:
'''We really want to be sure that we don't ask people to compare the
same two items twice, so we cache the result.
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
key = tuple(args)
if key not in self.cache:
self.cache[key] = self.func(*args)
return self.cache[key]
@memoize
def cmpfunc(a, b):
result = None
s = 'Which is greater, {a} or {b} (<, =, or >)? '.format(a=a, b=b)
while result is None or result not in '<=>':
result = input(s).strip()
return '<=>'.index(result) - 1
keyfunc = cmp_to_key(cmpfunc)
def binary_insertion_sort(seq, keyfunc):
'''Insertion sort, using binary search to insert each element. Runs
in O(n**2) time, but the use case is when a human is manually
deciding on the ordering, so the most important thing is to reduce
the number of comparisons.
'''
def mv(srcidx, dstidx):
while
|
srcidx > dstidx:
seq[srcidx], seq[srcidx - 1] = seq[srcidx - 1], seq[srcidx]
srcidx -= 1
i = 1
while i < len(seq):
lower = 0; upper = i
while lower < upper:
j = (upper + lower) // 2
key1, key2 = keyfunc(seq[i]), keyfunc(seq[j])
if key1 == key2:
mv(i, j+1) # X
|
XX this is not stable
i += 1
break
if key1 < key2:
upper = j
else: # >
lower = j + 1
else:
mv(i, upper)
i += 1
class SortableWithHeuristic:
def __init__(self, val, heur):
self.val = val
self.heur = heur
def __str__(self):
return '{val}: {heur}'.format(val=self.val, heur=self.heur)
def __repr__(self):
return '{}(val={}, heur={})'.format(self.__class__.__name__,
repr(self.val),
repr(self.heur))
def get_heuristic_func(val):
result = None
s = 'Give an approximate numeric score to item {}: '.format(val)
while result is None:
try:
result = float(input(s).strip())
except ValueError:
pass
return result
def heuristic_sort(seq, get_heuristic_func, cmpfunc):
def swap(a, b):
seq[a], seq[b] = seq[b], seq[a]
idx = 0
while idx < len(seq):
val = seq[idx]
heur = get_heuristic_func(val)
seq[idx] = SortableWithHeuristic(val, heur)
# find the current location
j = idx
while j > 0 and seq[j].heur < seq[j-1].heur:
swap(j, j-1)
j -= 1
moved = False
while j < idx and cmpfunc(seq[j].val, seq[j+1].val) == 1:
swap(j, j+1)
j += 1
moved = True
if not moved:
while j > 0 and cmpfunc(seq[j].val, seq[j-1].val) == -1:
swap(j, j-1)
j -= 1
if 0 < j < idx:
seq[j].heur = (seq[j-1].heur + seq[j+1].heur) / 2
elif idx > 0:
if j == 0 and seq[j].heur > seq[j+1].heur:
seq[j].heur = seq[j+1].heur - 1
elif j == idx and seq[j].heur < seq[j-1].heur:
seq[j].heur = seq[j-1].heur + 1
idx += 1
def main():
import sys
seq = []
if len(sys.argv) > 1:
seq.extend(sys.argv[1:])
if not seq:
seq.extend(x.strip() for x in sys.stdin.readlines())
heuristic_sort(seq, get_heuristic_func, cmpfunc)
print('\n'.join('* {}'.format(item) for item in reversed(seq)))
if __name__ == '__main__':
main()
|
mldbai/mldb
|
testing/MLDB-1594-aggregator-empty-row.py
|
Python
|
apache-2.0
| 1,755
| 0.005698
|
#
# MLDB-1594-aggregator-empty-row.py
# mldb.ai inc, 2016
# this file is part of mldb. copyright 2016 mldb.ai inc. all rights reserved.
#
import unittest
from mldb import mldb, MldbUnitTest, ResponseException
class Mldb1594(MldbUnitTest):
def test_simple(self):
res1 = mldb.query("select {}")
res2 = mldb.query("select sum({*}) named 'result' from (select {})")
self.assertEqual(res1,res2)
def test_multi_row(self):
dataset_config = {
'type' : 'tabular',
'id' : 'toy'
}
dataset = mldb.create_dataset(dataset_config)
dataset.record_row("rowA", [["txt", "hoho things are great!", 0]])
dataset.record_row("rowB", [["txt", "! ", 0]])
dataset.record_row("rowC", [["txt", "things are great, great", 0]])
dataset.commit()
expected = [
["_rowName", "are", "great", "hoho", "things"],
["pwet", 2, 3, 1, 2]
]
# skipping the null row
self.assertTableResultEquals(
|
mldb.query("""
select sum({*}) as *
named 'pwet'
from (
SELECT tokenize(lower(txt), {splitChars: ' ,.!;:"?', minTokenLength: 2}) as *
from toy
whe
|
re rowName() != 'rowB'
)
"""),
expected)
# passing the empty row (rowB) to sum
self.assertTableResultEquals(
mldb.query("""
select sum({*}) as *
named 'pwet'
from (
SELECT tokenize(lower(txt), {splitChars: ' ,.!;:"?', minTokenLength: 2}) as *
from toy
)
"""),
expected)
mldb.run_tests()
|
sysadminmatmoz/odoo-clearcorp
|
TODO-7.0/sneldev_magento/wizard/sneldev_magento_categories_import.py
|
Python
|
agpl-3.0
| 1,660
| 0.00241
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at
|
your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public Lic
|
ense for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pooler
import os
from export_tools import *
from osv import osv, fields
class wiz_sneldev_categories_import(osv.osv_memory):
_name = 'sneldev.categories.import'
_description = 'Import categories'
_columns = {
}
_defaults = {
}
def do_categories_import(self, cr, uid, ids, context=None):
if (self.pool.get('sneldev.magento').import_categories(cr, uid) < 0):
raise osv.except_osv(('Warning'), ('Import failed, please refer to log file for failure details.'))
return {'type': 'ir.actions.act_window_close'}
wiz_sneldev_categories_import()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
pirata-cat/agora-ciudadana
|
actstream/actions.py
|
Python
|
agpl-3.0
| 3,984
| 0.003012
|
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from actstream.exceptions import check_actionable_model
def follow(user, obj, send_action=True, actor_only=False, request=None):
"""
Creates a relationship allowing the object's activities to appear in the
user's stream.
Returns the created ``Follow`` instance.
If ``send_action`` is ``True`` (the default) then a
``<user> started following <object>`` action signal is sent.
If ``actor_only`` is ``True`` (the default) then only actions where the
object is the actor will appear in the user's activity stream. Set to
``False`` to also include actions where this object is the action_object or
the target.
Example::
follow(request.user, group, actor_only=False)
"""
from actstream.models import Follow, action
check_actionable_model(obj)
follow, created = Follow.objects.get_or_create(user=user,
object_id=obj.pk,
content_type=ContentType.objects.get_for_model(obj),
actor_only=actor_only)
if send_action and created:
if request:
from django.utils import simplejson as json
from agora_site.misc.utils impor
|
t geolocate_ip
action.send(user, verb=_('started following'), target=obj,
ipaddr=request.META.get('REMOTE_ADDR'),
geolocation=json.dumps(geolocate_ip(request.META.get('REMOTE_ADDR'))))
e
|
lse:
action.send(user, verb=_('started following'), target=obj)
return follow
def unfollow(user, obj, send_action=False, request=None):
"""
Removes a "follow" relationship.
Set ``send_action`` to ``True`` (``False is default) to also send a
``<user> stopped following <object>`` action signal.
Example::
unfollow(request.user, other_user)
"""
from actstream.models import Follow, action
check_actionable_model(obj)
Follow.objects.filter(user=user, object_id=obj.pk,
content_type=ContentType.objects.get_for_model(obj)).delete()
if send_action:
if request:
from django.utils import simplejson as json
from agora_site.misc.utils import geolocate_ip
action.send(user, verb=_('stopped following'), target=obj,
ipaddr=request.META.get('REMOTE_ADDR'),
geolocation=json.dumps(geolocate_ip(request.META.get('REMOTE_ADDR'))))
else:
action.send(user, verb=_('stopped following'), target=obj)
def is_following(user, obj):
"""
Checks if a "follow" relationship exists.
Returns True if exists, False otherwise.
Example::
is_following(request.user, group)
"""
from actstream.models import Follow
check_actionable_model(obj)
return bool(Follow.objects.filter(user=user, object_id=obj.pk,
content_type=ContentType.objects.get_for_model(obj)).count())
def action_handler(verb, **kwargs):
"""
Handler function to create Action instance upon action signal call.
"""
from actstream.models import Action
kwargs.pop('signal', None)
actor = kwargs.pop('sender')
check_actionable_model(actor)
newaction = Action(
actor_content_type=ContentType.objects.get_for_model(actor),
actor_object_id=actor.pk,
verb=unicode(verb),
public=bool(kwargs.pop('public', True)),
description=kwargs.pop('description', None),
timestamp=kwargs.pop('timestamp', timezone.now()),
geolocation=kwargs.pop('geolocation', None)
)
for opt in ('target', 'action_object'):
obj = kwargs.pop(opt, None)
if not obj is None:
check_actionable_model(obj)
setattr(newaction, '%s_object_id' % opt, obj.pk)
setattr(newaction, '%s_content_type' % opt,
ContentType.objects.get_for_model(obj))
newaction.save()
|
davidegalletti/koa-proof-of-concept
|
kag/application/urls.py
|
Python
|
agpl-3.0
| 305
| 0.009836
|
from django.conf.urls import patterns, url
from application import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^(?P<application_id>\d+)/$', views.detail, name='detail'),
url(r'^klogin/(?P<user
|
name>\w+)/(?P<passwo
|
rd>\w+)/$', views.klogin, name='klogin'),
)
|
MiltosD/CEF-ELRC
|
metashare/storage/models.py
|
Python
|
bsd-3-clause
| 28,959
| 0.007321
|
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db import models
# pylint: disable-msg=E0611
from hashlib import md5
from metashare.settings import LOG_HANDLER
from metashare import settings
from os import mkdir
from os.path import exists
import os.path
from uuid import uuid1, uuid4
from xml.etree import ElementTree as etree
from datetime import datetime, timedelta
import logging
import re
from json import dumps, loads
from django.core.serializers.json import DjangoJSONEncoder
import zipfile
from zipfile import ZIP_DEFLATED
from django.db.models.query_utils import Q
import glob
# Setup logging support.
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(LOG_HANDLER)
# ALLOWED_ARCHIVE_EXTENSIONS = ('zip', 'tar.gz', 'gz', 'tgz', 'tar', 'bzip2')
ALLOWED_ARCHIVE_EXTENSIONS = ('zip',)
MAXIMUM_MD5_BLOCK_SIZE = 1024
XML_DECL = re.compile(r'\s*<\?xml version=".+" encoding=".+"\?>\s*\n?',
re.I|re.S|re.U)
# Publication status constants and choice:
INTERNAL = 'i'
INGESTED = 'g'
PUBLISHED = 'p'
STATUS_CHOICES = (
(INTERNAL, 'internal'),
(INGESTED, 'ingested'),
(PUBLISHED, 'published'),
)
# Copy status constants and choice:
MASTER = 'm'
REMOTE = 'r'
PROXY = 'p'
COPY_CHOICES = (
(MASTER, 'master copy'),
(REMOTE, 'remote copy'),
(PROXY, 'proxy copy'))
# attributes to by serialized in the global JSON of the storage object
GLOBAL_STORAGE_ATTS = ['source_url', 'identifier', 'created', 'modified',
'revision', 'publication_status', 'metashare_version', 'deleted']
# attributes to be serialized in the local JSON of the storage object
LOCAL_STORAGE_ATTS = ['digest_checksum', 'digest_modified',
'digest_last_checked', 'copy_status', 'source_node']
def _validate_valid_xml(value):
"""
Checks whether the given value is well-formed and valid XML.
"""
try:
# Try to create an XML tree from the given String value.
_value = XML_DECL.sub(u'', value)
_ = etree.fromstring(_value.encode('utf-8'))
return True
except etree.ParseError, parse_error:
# In case of an exception, we raise a ValidationError.
raise ValidationError(parse_error)
# cfedermann: in case of other exceptions, raise a ValidationError with
# the corresponding error message. This will prevent the exception
# page handler to be shown and is hence more acceptable for end users.
except Exception, error:
raise ValidationError(error)
def _create_uuid():
"""
Creates a unique id from a UUID-1 and a UUID-4, checks for collisions.
"""
# Create new identifier based on a UUID-1 and a UUID-4.
new_id = '{0}{1}'.format(uuid1().hex, uuid4().hex)
# Check for collisions; in case of a collision, create new identifier.
while StorageObject.objects.filter(identifier=new_id):
new_id = '{0}{1}'.format(uuid1().hex, uuid4().hex)
return new_id
# pylint: disable-msg=R0902
class StorageObject(models.Model):
"""
Models an object inside the persistent storage layer.
"""
__schema_name__ = "STORAGEOJBECT"
class Meta:
permissions = (
('can_sync', 'Can synchronize'),
)
source_url = models.URLField(verify_exists=False, editable=False,
default=settings.DJANGO_URL,
help_text="(Read-only) base URL for the server where the master copy of " \
"the associated language resource is located.")
identifier = models.CharField(max_length=64, default=_create_uuid,
editable=False, unique=True, help_text="(Read-only) unique " \
"identifier for this storage object instance.")
created = models.DateTimeField(auto_now_add=True, editable=False,
help_text="(Read-only) creation date for this storage object instance.")
modified = models.DateTimeField(editable=False, default=datetime.now(),
help_text="(Read-only) last modification date of the metadata XML " \
"for this storage object instance.")
checksum = models.CharField(blank=True, null=True, max_length=32,
help_text="(Read-only) MD5 checksum of the binary data for this " \
"storage object instance.")
digest_checksum = models.CharField(blank=True, null=True, max_length=32,
help_text="(Read-only) MD5 checksum of the digest zip file containing the " \
"global serialized storage object and the metadata XML for this " \
"storage object instance.")
digest_modified = models.DateTimeField(editable=False, null=True, blank=True,
help_text="(Read-only) last modification date of digest zip " \
"for this storage object instance.")
digest_last_checked = models.DateTimeField(editable=False, null=True, blank=True,
help_text="(Read-only) last update check date of digest zip " \
"for this storage object instance.")
revision = models.PositiveIntegerField(default=1, help_text="Revision " \
"or version information for this storage object instance.")
metashare_version = models.CharField(max_length=32, editable=False,
default=settings.METASHARE_VERSION,
help_text="(Read-only) META-SHARE version used with the storage object instance.")
def _get_master_copy(self):
return self.copy_status == MASTER
def _set_master_copy(self, value):
if value == True:
self.copy_status = MASTER
else:
self.copy_status = REMOTE
master_copy = property(_get_master_copy, _set_master_copy)
copy_status = models.CharField(default=MASTER, max_length=1, editable=False, choices=COPY_CHOICES,
help_text="Generalized copy status flag for this storage object instance.")
def _get_published(self):
return self.publication_status == PUBLISHED
def _set_published(self, value):
if value == True:
self.publication_status = PUBLISHED
else:
# request to unpublish depends on current state:
# if we are currently published, set to ingested;
# else don't change
if self.publication_status == PUBLISHED:
self.publication_status
|
= INGESTED
published = property(_get_published, _set_published)
publication_status = models.CharField(default=INTERNAL, max_length=1, choices=STATU
|
S_CHOICES,
help_text="Generalized publication status flag for this " \
"storage object instance.")
source_node = models.CharField(blank=True, null=True, max_length=32, editable=False,
help_text="(Read-only) id of source node from which the resource " \
"originally stems as set in local_settings.py in CORE_NODES and " \
"PROXIED_NODES; empty if resource stems from this local node")
deleted = models.BooleanField(default=False, help_text="Deletion " \
"status flag for this storage object instance.")
metadata = models.TextField(validators=[_validate_valid_xml],
help_text="XML containing the metadata description for this storage " \
"object instance.")
global_storage = models.TextField(default='not set yet',
help_text="text containing the JSON serialization of global attributes " \
"for this storage object instance.")
local_storage = models.TextField(default='not set yet',
help_text="text containing the JSON serialization of local attributes " \
"for this storage object instance.")
def get_digest_checksum(self):
"""
Checks if the current digest is till up-to-date, recreates it if
required, and return the up-to-date digest checksum.
"""
_expiration_date = _get_expiration_date()
if _expiration_date > self.digest_modified \
and _expiration_date > self.digest_last_checked:
self.update_storage()
return self.digest_checksum
def __unicode__(self):
"""
Re
|
gnmerritt/casino
|
casino/handlers.py
|
Python
|
mit
| 1,275
| 0.000784
|
from twisted.internet import reactor
from twisted.python import log
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
agent = Agent(reactor)
class LoginHandler(object):
def __init__(self, api_url):
self.api_url = api_url
def handle(self, handler, args):
game, bot = args
def failed(ignored, reason="login failed"):
handler.parent.closeBecause(reason)
def http_succeeded(response):
if response.code == 200:
log.msg("login succeeded")
|
try:
handler.parent.login_success(
|
game, bot)
except:
log.err()
elif response.code == 401:
failed(response, "Invalid bot key")
elif response.code == 404:
failed(response, "Invalid game")
elif response.code == 410:
failed(response, "Game is full")
else:
failed(response)
url = '{}/api/internal/join/{}?key={}'\
.format(self.api_url, game, bot)
d = agent.request(
'POST', url,
Headers({'User-Agent': ['Plumbing Connector']}), None
)
d.addCallbacks(http_succeeded, failed)
|
pybursa/homeworks
|
a_karnauh/hw1/6.py
|
Python
|
gpl-2.0
| 354
| 0.04644
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from types import *
def typer(x,y):
if type(x) is StringType or type(y) is StringType :
print u'получена строка'
else:
if x > y:
print u'б
|
ольше'
elif x < y:
print u'меньше'
else:
print u'равно'
typer("12", 4)
typer("12",
|
"4")
typer(12, 4)
typer(4, 45)
typer(4, 4)
|
snowfed/Chessology
|
run_locally.py
|
Python
|
gpl-3.0
| 92
| 0
|
from bottle import run
from bottle_app i
|
mport application
run(host='local
|
host', port=8080)
|
andaluri/rootio_web
|
alembic/versions/1cb4253a8bc6_station_analytics_up.py
|
Python
|
agpl-3.0
| 472
| 0.006356
|
"""station
|
_analytics_update
Revision ID: 1cb4253a8bc6
Revises: 2972360b9a6f
Create Date: 2014-04-27 13:01:26.309272
"""
# revision identifiers, used by Alembic.
revision = '1cb4253a8bc6'
down_revision = '2972360b9
|
a6f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('radio_station', sa.Column('analytic_update_frequency', sa.Float(), nullable=True))
def downgrade():
op.drop_column('radio_station', 'analytic_update_frequency')
|
rizsotto/Ente
|
setup.py
|
Python
|
bsd-3-clause
| 383
| 0
|
#!/usr/bin/env python
# -*- coding:
|
utf-8 -*-
from setuptools import setup
setup(
name="Ente",
version="0.1",
description="place finder on commoncrawl dataset",
author="László Nagy",
author_email="rizsotto@gmail.com",
license='LICENSE',
url='https://github.com/rizsotto/Ente',
long_description=open('README.md').read(
|
),
scripts=['bin/ente']
)
|
moble/scri
|
scri/LVC/__init__.py
|
Python
|
mit
| 242
| 0.004132
|
# Copyright (c) 2
|
018, Michael Boyle
# See LICENSE file for details: <https://github.com/moble/scri/blob/master/LICENSE>
"""Submodule for operating on LVC-format wa
|
veform files"""
from .. import __version__
from .file_io import read_from_h5
|
shaggytwodope/qutebrowser
|
qutebrowser/misc/__init__.py
|
Python
|
gpl-3.0
| 820
| 0
|
# vim:
|
ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either ve
|
rsion 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Misc. modules."""
|
dwgill/dspell
|
dspell/corpus.py
|
Python
|
mit
| 1,947
| 0.007704
|
# Copyright (c) 2013 Daniel Gill
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# in
|
cluding without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# su
|
bject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
Created on Oct 24, 2013
@author: Daniel Gill
'''
import re
import os
token_re = r"(\w+'\w+)|(\w+)"
def tokenize(line):
def has_valid_contraction(tup):
return len(tup[0]) > 0
for matching_tuple in re.findall(token_re, line):
string = ""
if has_valid_contraction(matching_tuple):
string = matching_tuple[0]
else:
string = matching_tuple[1]
if len(string) > 1 or string in ['I', 'a']:
yield string.lower()
def process_file(file_path):
with open(name=file_path, mode='r') as open_file:
for line in open_file:
for word in tokenize(line):
yield word
def process_dir(dir_path):
for file_path in os.listdir(dir_path):
for word in process_file(os.path.join(dir_path, file_path)):
yield word
|
wichovw/tca-gt
|
server/tca/cellaut.py
|
Python
|
mit
| 2,703
| 0.009989
|
# Based on cage v1.1.4
# http://www.alcyone.com/software/cage/
# Copyright (C) 2002-20
|
06 Erik Max Francis <max@alcyone.com>
# GPL License
class Topology:
"""Encaptulation of the
|
shape and dimentionality of a cellular automata"""
def get(self, address):
raise NotImplementedError
def set(self, address, state):
raise NotImplementedError
def normalize(self, address):
raise NotImplementedError
class Neighborhood:
"""Abstraction of the set of cells adjacent to any given cell"""
def neighbors(self, address):
"""Returns a list of addresses which are neighbors."""
raise NotImplementedError
def states(self, address):
"""Returns the list of cell values for all neighbors"""
return [self.get(x) for x in self.neighbors(address)]
class GridTopology(Topology):
"""A two dimentional, bounded topology consisting of a rectangular grid
of cells"""
background = 0
border = 0
def __init__(self, size):
self.width, self.height = size
self.buffer = []
for _ in range(self.width):
self.buffer.append([self.background] * self.height)
self.zero = (0, 0)
def normalize(self, address):
x, y = address
if x < 0 or x >= self.width or y < 0 or y >= self.height:
return None
return address
def get(self, address):
addr = self.normalize(address)
if addr:
x, y = addr
return self.buffer[x][y]
else:
return self.border
def set(self, address, state):
addr = self.normalize(address)
if addr:
x, y = addr
self.buffer[x][y] = state
else:
raise IndexError
class ExtendedNeighborhood(Neighborhood):
"""A neighborhood that retrieves a list of states on each direction"""
def states(self, address, max=1):
return [[self.get(i) for i in j] for j in self.neighbors(address, max)]
class Automaton:
"""Abstraction for the actions that can be made over the different cells
and states of a specified map"""
def __init__(self, map):
self.map = map
self.generation = 0
def update(self):
self.generation += 1
class Rule:
"""Definition of rules to follow to change a cell value in an automaton"""
def __init__(self, map, address):
self.populate(map, address)
def populate(self, map, address):
raise NotImplementedError
def apply(self):
raise NotImplementedError
|
globocom/database-as-a-service
|
dbaas/physical/tests/test_disk_offering.py
|
Python
|
bsd-3-clause
| 9,097
| 0.00033
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
from django.core.cache import cache
from django.test import TestCase
from django.contrib import admin
from physical.tests.factory import DiskOfferingFactory, EnvironmentFactory
from physical.errors import NoDiskOfferingGreaterError, NoDiskOfferingLesserError
from system.models import Configuration
from ..admin.disk_offering import DiskOfferingAdmin
from ..forms.disk_offerring import DiskOfferingForm
from ..models import DiskOffering
LOG = logging.getLogger(__name__)
SEARCH_FIELDS = ('name', )
LIST_FIELDS = ('name', 'size_gb', 'selected_environments')
SAVE_ON_TOP = True
UNICODE_FORMAT = '{}'
class DiskOfferingTestCase(TestCase):
def create_basic_disks(self):
for disk_offering in DiskOffering.objects.all():
for plan in disk_offering.plans.all():
plan.databaseinfras.all().delete()
disk_offering.plans.all().delete()
disk_offering.delete()
cache.clear()
self.bigger = DiskOfferingFactory()
self.bigger.size_kb *= 30
self.bigger.environments.add(self.environment)
self.bigger.save()
self.medium = DiskOfferingFactory()
self.medium.size_kb *= 20
self.medium.environments.add(self.environment)
self.medium.save()
self.smaller = DiskOfferingFactory()
self.smaller.size_kb *= 10
self.smaller.environments.add(self.environment)
self.smaller.save()
def setUp(self):
self.admin = DiskOfferingAdmin(DiskOffering, admin.sites.AdminSite())
self.auto_resize_max_size_in_gb = Configuration(
name='auto_resize_max_size_in_gb', value=100
)
self.auto_resize_max_size_in_gb.save()
self.environment = EnvironmentFactory()
def tearDown(self):
if self.auto_resize_max_size_in_gb.id:
self.auto_resize_max_size_in_gb.delete()
def test_search_fields(self):
self.assertEqual(SEARCH_FIELDS, self.admin.search_fields)
def test_list_fields(self):
self.assertEqual(LIST_FIELDS, self.admin.list_display)
def test_save_position(self):
self.assertEqual(SAVE_ON_TOP, self.admin.save_on_top)
def test_adding_gb_to_kb(self):
disk_offering_form = DiskOfferingForm(
data={
'name': 'disk_offering_small',
'size_gb': 0.5,
'environments': [self.environment.id]
}
)
self.assertTrue(disk_offering_form.is_valid())
self.admin.save_model(
request=None, obj=disk_offering_form.instance,
form=disk_offering_form, change=None
|
)
disk_offering = DiskOffering.objects.get(name='disk_offering_small')
self.assertEqual(disk_offe
|
ring.size_gb(), 0.5)
self.assertEqual(disk_offering.size_kb, 524288)
def test_editing_gb_to_kb(self):
disk_factory = DiskOfferingFactory()
disk_offering = DiskOffering.objects.get(pk=disk_factory.pk)
self.assertEqual(disk_offering.size_gb(), 1)
self.assertEqual(disk_offering.size_kb, 1048576)
disk_offering_form = DiskOfferingForm(
data={
'name': disk_offering.name,
'size_gb': 1.5,
'environments': [self.environment.id]
},
instance=disk_offering
)
self.assertTrue(disk_offering_form.is_valid())
self.admin.save_model(
request=None, obj=disk_offering,
form=disk_offering_form, change=None
)
self.assertEqual(disk_offering.size_gb(), 1.5)
self.assertEqual(disk_offering.size_kb, 1572864)
def test_edit_initial_values(self):
disk_offering_form = DiskOfferingForm()
self.assertNotIn('name', disk_offering_form.initial)
self.assertIn('size_gb', disk_offering_form.initial)
self.assertIsNone(disk_offering_form.initial['size_gb'])
disk_factory = DiskOfferingFactory()
disk_offering = DiskOffering.objects.get(pk=disk_factory.pk)
disk_offering_form = DiskOfferingForm(instance=disk_offering)
self.assertEqual(
disk_offering_form.initial['name'], disk_offering.name
)
self.assertEqual(
disk_offering_form.initial['size_gb'], disk_offering.size_gb()
)
def test_model_sizes(self):
disk_factory = DiskOfferingFactory()
self.assertEqual(disk_factory.size_kb, 1048576)
self.assertEqual(disk_factory.size_gb(), 1.0)
self.assertEqual(disk_factory.size_bytes(), 1073741824)
disk_offering = DiskOffering()
self.assertIsNone(disk_offering.size_kb)
self.assertIsNone(disk_offering.size_gb())
self.assertIsNone(disk_offering.size_bytes())
def test_model_converter(self):
disk_factory = DiskOfferingFactory()
self.assertEqual(disk_factory.converter_kb_to_gb(1572864), 1.5)
self.assertEqual(disk_factory.converter_kb_to_bytes(524288), 536870912)
self.assertEqual(disk_factory.converter_gb_to_kb(0.75), 786432)
self.assertIsNone(disk_factory.converter_kb_to_gb(0))
self.assertIsNone(disk_factory.converter_kb_to_bytes(0))
self.assertIsNone(disk_factory.converter_gb_to_kb(0))
def test_unicode(self):
disk_offering = DiskOffering()
expected_unicode = UNICODE_FORMAT.format(disk_offering.name)
self.assertEqual(expected_unicode, str(disk_offering))
def test_disk_offering_is_in_admin(self):
self.assertIn(DiskOffering, admin.site._registry)
admin_class = admin.site._registry[DiskOffering]
self.assertIsInstance(admin_class, DiskOfferingAdmin)
def test_can_found_greater_disk(self):
self.create_basic_disks()
found = DiskOffering.first_greater_than(
self.smaller.size_kb, self.environment)
self.assertEqual(self.medium, found)
found = DiskOffering.first_greater_than(
self.medium.size_kb, self.environment)
self.assertEqual(self.bigger, found)
def test_cannot_found_greater_disk(self):
self.create_basic_disks()
self.assertRaises(
NoDiskOfferingGreaterError,
DiskOffering.first_greater_than, self.bigger.size_kb, self.environment
)
def test_can_found_greater_disk_with_exclude(self):
self.create_basic_disks()
found = DiskOffering.first_greater_than(
self.smaller.size_kb, self.environment, exclude_id=self.medium.id
)
self.assertEqual(self.bigger, found)
def test_can_found_disk_for_auto_resize(self):
self.create_basic_disks()
self.auto_resize_max_size_in_gb.value = int(self.bigger.size_gb())
self.auto_resize_max_size_in_gb.save()
found = DiskOffering.last_offering_available_for_auto_resize(
self.environment)
self.assertEqual(self.bigger, found)
self.auto_resize_max_size_in_gb.value = int(self.bigger.size_gb()) - 1
self.auto_resize_max_size_in_gb.save()
found = DiskOffering.last_offering_available_for_auto_resize(
self.environment)
self.assertEqual(self.medium, found)
def test_cannot_found_disk_for_auto_resize(self):
self.create_basic_disks()
self.auto_resize_max_size_in_gb.value = int(self.smaller.size_gb()) - 1
self.auto_resize_max_size_in_gb.save()
self.assertRaises(
NoDiskOfferingLesserError,
DiskOffering.last_offering_available_for_auto_resize, self.environment
)
def test_compare_disks(self):
self.create_basic_disks()
self.assertGreater(self.bigger, self.smaller)
self.assertLess(self.smaller, self.bigger)
self.medium_twice = DiskOfferingFactory()
self.medium_twice.size_kb *= 20
self.medium_twice.save()
self.assertEqual(self.medium, self.medium)
self.assertNotEqual(self.medium, self.medium_twice)
self.medium_twice.delete()
def test_disk_is_last_offering(self):
self.create_basic_d
|
rapodaca/Terasology
|
blender_addons/io_mesh_terasology/__init__.py
|
Python
|
apache-2.0
| 3,098
| 0.037121
|
#!BPY
"""
Name: 'TerasologyBlockShapeExport'
Blender: 260
Group: 'Export'
Tooltip: 'Export a Terasology Block Shape'
"""
bl_info = {
"name": "Terasology Block Shape Export",
"description": "Exporter for producing Terasology Block Shape files",
"author": "Immortius",
"version": (1, 1),
"blender": (2, 6, 0),
"location": "File > Import-Export",
"category": "Import-Export"}
import bpy
import os
import bpy_extras.io_utils
from bpy.props import StringPr
|
operty, BoolProperty
class ExportBlockShape(bpy.types.Operator, bpy_extras.io_utils.ExportHelper):
bl_idname = "export_mesh.terasology_block_shape"
bl_label = "Export Terasology Block Shape"
filename_ext = ".groovy"
filter_glob = StringPrope
|
rty(default="*.groovy", options={'HIDDEN'})
apply_modifiers = BoolProperty(
name="Apply Modifiers",
description="Apply Modifiers to the exported mesh",
default=True)
for_embed = BoolProperty(
name="For Embed in Jar",
description="Adds the default package to the groovy file",
default=False)
@classmethod
def poll(cls, context):
return context.active_object != None
def execute(self, context):
filepath = self.filepath
filepath = bpy.path.ensure_ext(filepath, self.filename_ext)
from . import export_block_shape
keywords = self.as_keywords(ignore=("filter_glob","check_existing"))
return export_block_shape.save(self, context, **keywords)
def draw(self, context):
layout = self.layout
row = layout.row()
row.prop(self, "apply_modifiers")
row = layout.row()
row.prop(self, "for_embed")
#UI Panel
bpy.types.Object.teraFullSide = BoolProperty(
name="Full Side",
description="Is this side of the block complete",
default = False)
bpy.types.Object.teraAABB = BoolProperty(
name="Is AABB Collider",
description="Is this object used to describe an AABB collider",
default = False)
bpy.types.Scene.teraAuthor = StringProperty(
name="Author",
description="Is this side of the block complete",
default = "")
bpy.types.Scene.teraAutoCollider = BoolProperty(
name="Auto-generate Collider",
description="Automatically generate an AABB collider that encapulates the block",
default = False)
class UIPanel(bpy.types.Panel):
bl_label = "Terasology Properties"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
def draw(self, context):
layout = self.layout
scene = context.scene
if not scene:
return
layout.prop(scene, 'teraAuthor')
layout.prop(scene, 'teraAutoCollider')
ob = context.object
if not ob:
return
if not ob.type == 'MESH':
return
layout.prop(ob, 'teraFullSide')
layout.prop(ob, 'teraAABB')
def menu_export(self, context):
self.layout.operator(ExportBlockShape.bl_idname, text="Terasology Block Shape (.groovy)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_export)
if __name__ == "__main__":
register()
|
davidbrazdil/nacl
|
toolchain_build/toolchain_build.py
|
Python
|
bsd-3-clause
| 39,585
| 0.004825
|
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Recipes for NativeClient toolchain packages.
The real entry plumbing is in toolchain_main.py.
"""
import collections
import fnmatch
import platform
import os
import re
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import pynacl.gsd_storage
import pynacl.platform
import command
import toolchain_main
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
NACL_DIR = os.path.dirname(SCRIPT_DIR)
# See command.GenerateGitPatches for the schema of entries in this dict.
# Additionally, each may contain a 'repo' key whose value is the name
# to use in place of the package name when calling GitUrl (below).
GIT_REVISIONS = {
'binutils': {
'rev': '7deface59753c3b249ac08d854d471951796343f',
'upstream-branch': 'upstream/binutils-2_24-branch',
'upstream-name': 'binutils-2.24',
# This is tag binutils-2_24, but Gerrit won't let us push
# non-annotated tags, and the upstream tag is not annotated.
'upstream-base': '237df3fa4a1d939e6fd1af0c3e5029a25a137310',
},
'gcc': {
'rev': 'faa3cdd0473b7fb82be937e32fd2b474fa0299e6',
'upstream-branch': 'upstream/gcc-4_8-branch',
'upstream-name': 'gcc-4.8.3',
# Upstream tag gcc-4_8_3-release:
'upstream-base': '6bbf0dec66c0e719b06cd2fe67559fda6df09000',
},
'newlib': {
'rev': 'e7b1ccd4b5eec14e840f6bf875f4d6fa4cff045c',
'upstream-branch': 'upstream/master',
'upstream-name': 'newlib-2.1.0',
# Upstream tag newlib_2_1_0:
'upstream-base': '99fc6c167467b41466ec90e8260e9c49cbe3d13c',
},
'gdb': {
'rev': '5deb4793a5e3f2f48d7899f424bb4484686020f8',
'repo': 'binutils',
'upstream-branch': 'upstream/gdb-7.7-branch',
'upstream-name': 'gdb-7.7.1',
# Upstream tag gdb-7.7-release:
'upstream-base': '4bd8fc3a1362970d9800a263987af8093798338b',
},
}
TAR_FILES = {
'gmp': command.path.join('gmp', 'gmp-6.0.0a.tar.bz2'),
'mpfr': command.path.join('mpfr', 'mpfr-3.1.2.tar.bz2'),
'mpc': command.path.join('mpc', 'mpc-1.0.2.tar.gz'),
'isl': command.path.join('cloog', 'isl-0.12.2.tar.bz2'),
'cloog': command.path.join('cloog', 'cloog-0.18.1.tar.gz'),
'expat': command.path.join('expat', 'expat-2.1.0.tar.gz'),
}
GIT_BASE_URL = 'https://chromium.googlesource.com/native_client'
GIT_PUSH_URL = 'ssh://gerrit.chromium.org/native_client'
KNOWN_MIRRORS = [('http://git.chromium.org/native_client', GIT_BASE_URL)]
PUSH_MIRRORS = [('http://git.chromium.org/native_client', GIT_PUSH_URL),
(GIT_BASE_URL, GIT_PUSH_URL)]
def GitUrl(package, push_url=False):
repo = GIT_REVISIONS[package].get('repo', package)
if push_url:
base_url = GIT_PUSH_URL
else:
base_url = GIT_BASE_URL
return '%s/nacl-%s.git' % (base_url, repo)
def CollectSources():
sources = {}
for package in TAR_FILES:
tar_file = TAR_FILES[package]
if fnmatch.fnmatch(tar_file, '*.bz2'):
extract = EXTRACT_STRIP_TBZ2
elif fnmatch.fnmatch(tar_file, '*.gz'):
extract = EXTRACT_STRIP_TGZ
else:
raise Exception('unexpected file name pattern in TAR_FILES[%r]' % package)
sources[package] = {
'type': 'source',
'commands': [
command.Command(extract + [com
|
mand.path.join('%(abs_top_srcdir)s',
'..', 'third_party',
tar_file)],
|
cwd='%(output)s'),
],
}
patch_packages = []
patch_commands = []
for package, info in GIT_REVISIONS.iteritems():
sources[package] = {
'type': 'source',
'commands': command.SyncGitRepoCmds(GitUrl(package), '%(output)s',
info['rev'],
git_cache='%(git_cache_dir)s',
push_url=GitUrl(package, True),
known_mirrors=KNOWN_MIRRORS,
push_mirrors=PUSH_MIRRORS),
}
patch_packages.append(package)
patch_info = {'name': package}
patch_info.update(info)
patch_commands.append(
command.GenerateGitPatches('%(' + package + ')s/.git', patch_info))
sources['patches'] = {
'type': 'build',
'dependencies': patch_packages,
'commands': patch_commands,
}
# The gcc_libs component gets the whole GCC source tree.
sources['gcc_libs'] = sources['gcc']
# The gcc component omits all the source directories that are used solely
# for building target libraries. We don't want those included in the
# input hash calculation so that we don't rebuild the compiler when the
# the only things that have changed are target libraries.
sources['gcc'] = {
'type': 'source',
'dependencies': ['gcc_libs'],
'commands': [command.CopyTree('%(gcc_libs)s', '%(output)s', [
'boehm-gc',
'libada',
'libatomic',
'libffi',
'libgcc',
'libgfortran',
'libgo',
'libgomp',
'libitm',
'libjava',
'libmudflap',
'libobjc',
'libquadmath',
'libsanitizer',
'libssp',
'libstdc++-v3',
])]
}
# We have to populate the newlib source tree with the "exported" form of
# some headers from the native_client source tree. The newlib build
# needs these to be in the expected place. By doing this in the source
# target, these files will be part of the input hash and so we don't need
# to do anything else to keep track of when they might have changed in
# the native_client source tree.
newlib_sys_nacl = command.path.join('%(output)s',
'newlib', 'libc', 'sys', 'nacl')
newlib_unpack = [command.RemoveDirectory(command.path.join(newlib_sys_nacl,
dirname))
for dirname in ['bits', 'sys', 'machine']]
newlib_unpack.append(command.Command([
'python',
command.path.join('%(top_srcdir)s', 'src',
'trusted', 'service_runtime', 'export_header.py'),
command.path.join('%(top_srcdir)s', 'src',
'trusted', 'service_runtime', 'include'),
newlib_sys_nacl,
]))
sources['newlib']['commands'] += newlib_unpack
return sources
# Canonical tuples we use for hosts.
WINDOWS_HOST_TUPLE = pynacl.platform.PlatformTriple('win', 'x86-32')
MAC_HOST_TUPLE = pynacl.platform.PlatformTriple('darwin', 'x86-64')
ARM_HOST_TUPLE = pynacl.platform.PlatformTriple('linux', 'arm')
LINUX_X86_32_TUPLE = pynacl.platform.PlatformTriple('linux', 'x86-32')
LINUX_X86_64_TUPLE = pynacl.platform.PlatformTriple('linux', 'x86-64')
# Map of native host tuple to extra tuples that it cross-builds for.
EXTRA_HOSTS_MAP = {
LINUX_X86_64_TUPLE: [
LINUX_X86_32_TUPLE,
ARM_HOST_TUPLE,
WINDOWS_HOST_TUPLE,
],
}
# Map of native host tuple to host tuples that are "native enough".
# For these hosts, we will do a native-style build even though it's
# not the native tuple, just passing some extra compiler flags.
NATIVE_ENOUGH_MAP = {
LINUX_X86_64_TUPLE: {
LINUX_X86_32_TUPLE: ['-m32'],
},
}
# The list of targets to build toolchains for.
TARGET_LIST = ['arm', 'i686']
# List upload targets for each host we want to upload packages for.
TARGET = collections.namedtuple('TARGET', ['name', 'pkg_prefix'])
HOST_TARGET = collections.namedtuple('HOST_TARGET',
['os', 'arch', 'differ3264', 'targets'])
STANDARD_TARGETS = [TARGET('arm', '')]
LINUX_X86_64_TARGETS = [TARGET('arm', ''), TARGET('i686', 'ng_')]
UPLOAD_HOST_TARGETS = [
HOST_TARGET('win', 'x86-32', False, S
|
CoderHito/wx_demo
|
app/util/component_access_token.py
|
Python
|
mit
| 772
| 0.001319
|
impo
|
rt redis
from app.config import get_config_obj
from app.util.httputil import Http_util
class Component_access_token():
def __init__(self):
self.component_appid = get_config_obj().component_appid
self.component_app
|
secret = get_config_obj().component_secret
self.r = redis.Redis(host='localhost', port=6379, db=0)
def get_component_verify_ticket(self):
# TODO 读取保存的ticket
component_verify_ticket = self.r.get('component_verify_ticket')
return component_verify_ticket
def get_commponent_access_token(self):
token_json_data = Http_util().post_get_component_access_token(self.get_component_verify_ticket())
# TODO 保存
return token_json_data.get("component_access_token")
|
rwl/PyCIM
|
CIM15/CDPSM/Balanced/IEC61970/Wires/EnergyConsumer.py
|
Python
|
mit
| 4,290
| 0.005128
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.CDPSM.Balanced.IEC61970.Core.IdentifiedObject import IdentifiedObject
class EnergyConsumer(IdentifiedObject):
"""Generic user of energy - a point of consumption on the power system model
"""
def __init__(self, customerCount=0, pfixedPct=0.0, qfixedPct=0.0, qfixed=0.0, pfixed=0.0, LoadResponse=None, *args, **kw_args):
"""Initialises a new 'EnergyConsumer' instance.
@param customerCount: Number of individual customers represented by this Demand
@param pfixedPct: Fixed active power as per cent of load group fixed active power. Load sign convention is used, i.e. positive sign means flow out from a node.
@param qfixedPct: Fixed reactive power as per cent of load group fixed reactive power. Load sign convention is used, i.e. positive sign means flow out from a node.
@param qfixed: Reactive power of the load that is a fixed quantity. Load sign convention is used, i.e. positive sign means flow out from a node.
@param pfixed: Active power of the load that is a fixed quantity. Load sign convention is used, i.e. positive sign means flow out from a node.
@param LoadResponse: The load response characteristic of this load.
"""
#: Number of individual customers represented by this Demand
self.customerCount = customerCount
#: Fixed active power as per cent of load g
|
roup fixed active po
|
wer. Load sign convention is used, i.e. positive sign means flow out from a node.
self.pfixedPct = pfixedPct
#: Fixed reactive power as per cent of load group fixed reactive power. Load sign convention is used, i.e. positive sign means flow out from a node.
self.qfixedPct = qfixedPct
#: Reactive power of the load that is a fixed quantity. Load sign convention is used, i.e. positive sign means flow out from a node.
self.qfixed = qfixed
#: Active power of the load that is a fixed quantity. Load sign convention is used, i.e. positive sign means flow out from a node.
self.pfixed = pfixed
self._LoadResponse = None
self.LoadResponse = LoadResponse
super(EnergyConsumer, self).__init__(*args, **kw_args)
_attrs = ["customerCount", "pfixedPct", "qfixedPct", "qfixed", "pfixed"]
_attr_types = {"customerCount": int, "pfixedPct": float, "qfixedPct": float, "qfixed": float, "pfixed": float}
_defaults = {"customerCount": 0, "pfixedPct": 0.0, "qfixedPct": 0.0, "qfixed": 0.0, "pfixed": 0.0}
_enums = {}
_refs = ["LoadResponse"]
_many_refs = []
def getLoadResponse(self):
"""The load response characteristic of this load.
"""
return self._LoadResponse
def setLoadResponse(self, value):
if self._LoadResponse is not None:
filtered = [x for x in self.LoadResponse.EnergyConsumer if x != self]
self._LoadResponse._EnergyConsumer = filtered
self._LoadResponse = value
if self._LoadResponse is not None:
if self not in self._LoadResponse._EnergyConsumer:
self._LoadResponse._EnergyConsumer.append(self)
LoadResponse = property(getLoadResponse, setLoadResponse)
|
mansweet/GaussianLDA
|
FastGaussianLDA2.py
|
Python
|
apache-2.0
| 27,002
| 0.004037
|
from __future__ import division
import random
import time
from collections import defaultdict, Counter
import gensim
import numpy as np
from numpy import log, pi
from scipy import linalg
from scipy.special import gammaln
import copy
from sklearn.cluster import KMeans
from numba import jit
import cholesky
__author__ = "Michael Mansour, Jared Thompson, Mike Rinehart"
class Wishart(object):
def __init__(self, word_vecs):
self.nu = None
self.kappa = None
self.psi = None
self.mu = None
self.set_params(word_vecs)
# ======================================================================================================================
def set_params(self, word_vecs):
word_vecs = np.vstack(word_vecs.values()) # turn dict of word vectors into a matrix
self.nu = word_vecs.shape[1] # dimensionality of word-vectors
self.kappa = 0.1
# self.psi = word_vecs.T.dot(word_vecs)#, axis=0) # sum of squres -- from Murphy(2012)
# self.psi = np.identity(
# word_vecs.shape[1]) * 3. # changed this to identity matrix as in paper. No intuition here
# self.mu = np.mean(word_vecs, axis=0)
# ======================================================================================================================
class Gauss_LDA(object):
def __init__(self, num_topics, corpus, word_vector_filepath=None,
word_vector_model=None, alpha=0.2, outputfile=None, preprocess=False):
self.doc_topic_CT = None
self.corpus = corpus
self.priors = None
self.word_vecs = {}
self.numtopics = num_topics
self.vocab = set([])
self.topic_params = defaultdict(dict)
self.wordvecFP = word_vector_filepath
self.word_vec_size = None
self.alpha = alpha
self.solver = cholesky.Helper()
self.wvmodel = word_vector_model
self.test_word_topics = defaultdict(list)
self.test_word_topic_count = defaultdict(int)
self.word_topics = {}
self.output_file_name = outputfile
self.preprocess = preprocess
# ======================================================================================================================
def process_corpus(self, documents):
"""
Tokenizes documents into dict of lists of tokens
:param documents: expects list of strings
:return: dict{document ID: list of tokens
"""
if not self.preprocess:
temp_corpus = defaultdict(dict)
random.shuffle(documents) # making sure topics are formed semi-randomly
for index, doc in enumerate(documents):
words = doc.split()
temp_corpus[index]['words'] = words
temp_corpus[index]['topics'] = np.empty(len(words)) # Random topic assign
# temp_corpus[index]['topics'] = np.random.randint(0, self.numtopics, size=len(words)) # Random topic assign
for word in words:
self.vocab.add(word)
self.corpus = temp_corpus
print "Done processing corpus with {} documents".format(len(documents))
else: # Docs are tokenized and such, just add it into class
temp_corpus = defaultdict(dict)
for idx, doc in enumerate(documents):
temp_corpus[idx]["words"] = doc
temp_corpus[idx]["topics"] = np.empty(len(doc))
for word in doc:
self.vocab.add((word))
self.corpus = temp_corpus
# ======================================================================================================================
def process_wordvectors(self, filepath=None):
"""
Takes a trained Word2Vec model, tests each word in vocab against it, and only keeps word vectors that
are in your document corpus, and that are in the word2vec corpus.
Decreases memory requirements for holding word vector info.
:param filepath: filepath of word-vector file. Requires 2 things at top of .txt document:
number of tokens trained on & dimensionality of word-vectors
:return: None - sets class-variable (self.word_vecs) to be a dict{word: word-vector}
"""
if filepath:
print "Processing word-vectors, this takes a moment"
self.wvmodel = gensim.models.Word2Vec.load_word2vec_format(fname=filepath, binary=False)
useable_vocab = 0
unusable_vocab = 0
self.word_vec_size = self.wvmodel.vector_size
for word in self.vocab:
try:
self.word_vecs[word] = self.wvmodel[word]
useable_vocab += 1
except KeyError:
unusable_vocab += 1
print "There are {0} words that could be converted to word vectors in your corpus \n" \
"There are {1} words that could NOT be converted to word vectors".format(useable_vocab,
unusable_vocab)
else:
|
useable_vocab = 0
unusable_vocab = 0
self.word_vec_size = self.wvmodel.vector_size
for word in self.vocab:
try:
self.word_vecs[word] = self.wvmodel[word]
useable_vocab += 1
except KeyError:
unusable_vocab += 1
print "There a
|
re {0} words that could be converted to word vectors in your corpus \n" \
"There are {1} words that could NOT be converted to word vectors".format(useable_vocab,
unusable_vocab)
# ======================================================================================================================
def clean_docs(self):
print "cleaning out docs of words not in your Word2Vec model"
approved_words = set(self.word_vecs.vocab.keys())
for idx, doc in self.corpus.iteritems():
self.corpus[idx] = [word for word in doc if word in approved_words]
print "Done cleaning out docs of bad words"
# ======================================================================================================================
def fit(self, iterations=1, init=True):
if init:
self.init()
init = False
print "Starting fit"
self.display_results()
for i in range(iterations):
self.sample()
print "{0} iterations complete".format(i)
if self.output_file_name: #TODO: fix such that it prints regardless of outputfilename
for k in xrange(self.numtopics):
for param, name in zip(("mean", "cov"),
(self.topic_params[k]["Topic Mean"], self.topic_params[k]["Topic Covariance"])):
self.output_file_name = self.output_file_name + "{}_{}"
results_file = self.output_file_name.format(k, param)
open(results_file, 'w')
np.savetxt(results_file, param)
# ======================================================================================================================
def init(self):
self.process_corpus(self.corpus)
self.process_wordvectors(self.wordvecFP)
self.priors = Wishart(self.word_vecs) # set wishhart priors
self.doc_topic_CT = np.zeros((len(self.corpus.keys()), self.numtopics)) # Init document-topic counts matrix
centroids, km = self.smart_centroids() # Init topic means with KMeans cluster centroids>>faster convergence
# Prior mean
mu_0 = np.zeros(self.word_vec_size)
count = 0
for docID in self.corpus.keys(): # hard setting word-topic assignments as per cluster membership to help model along
for i, word in enumerate(self.corpus[docID]['words']):
self.corpus[docID]['topics'][i] = self.word_topics[word] # word_topics from KMeans cluster m
|
GreenDelta/olca-updates
|
update-src/001_db_schema_update_v6_f0..b7/script.py
|
Python
|
mpl-2.0
| 1,555
| 0.036656
|
import org.openlca.core.database.CategoryDao as CategoryDao
dbUtil.createTable("tbl_dq_systems",
"CREATE TABLE tbl_dq_systems ( "
+ "id BIGINT NOT NULL, "
+ "name VARCHAR(255), "
+ "ref_id VARCHAR(36), "
+ "version BIGINT, "
+ "last_change BIGINT, "
+ "f_category BIGINT, "
+ "f_source BIGINT, "
+ "description CLOB(64 K), "
+ "has_uncertainties SMALLINT default 0, "
+ "PRIMARY KEY (id)) ")
dbUtil.createTable("tbl_dq_indicators",
"CREATE TABLE tbl_dq_indicators ( "
+ "id BIGINT NOT NULL, "
+ "name VARC
|
HAR(255), "
+ "position INTEGER NOT NULL, "
+ "f_dq_system BIGINT, "
+ "PRIMARY KEY (id)) ")
dbUtil.createTable("
|
tbl_dq_scores", "CREATE TABLE tbl_dq_scores ( "
+ "id BIGINT NOT NULL, "
+ "position INTEGER NOT NULL, "
+ "description CLOB(64 K), "
+ "label VARCHAR(255), "
+ "uncertainty DOUBLE default 0, "
+ "f_dq_indicator BIGINT, "
+ "PRIMARY KEY (id)) ")
dbUtil.createColumn("tbl_processes", "dq_entry", "dq_entry VARCHAR(50)")
dbUtil.createColumn("tbl_processes", "f_dq_system", "f_dq_system BIGINT")
dbUtil.createColumn("tbl_processes", "f_exchange_dq_system", "f_exchange_dq_system BIGINT")
dbUtil.createColumn("tbl_processes", "f_social_dq_system", "f_social_dq_system BIGINT")
dbUtil.renameColumn("tbl_exchanges", "pedigree_uncertainty", "dq_entry", "VARCHAR(50)")
dbUtil.createColumn("tbl_product_systems", "cutoff", "cutoff DOUBLE")
dao = CategoryDao(db)
roots = dao.getRootCategories()
for category in roots:
dao.update(category)
dbUtil.setVersion(6)
|
ryanmcgrath/drinkkitcom
|
urls.py
|
Python
|
mit
| 1,223
| 0.023712
|
from django.conf.urls.defaults import *
from django.contrib import admin
# Set up the admin shit
admin.autodiscover()
urlpatterns = patterns('',
(r'^knux/', include(admin.site.urls)),
# Viewing and adding tips/locations
(r'^locations/add/$', 'drinkkit.redditors.views.add_location'),
(r'^locations/search/$', 'drinkkit.redditors.views.find_locations'),
(r'^locations/nearby/$', 'drinkkit.redditors.views.nearby_locations'),
(r'^locations/(?P<location_id>[a-zA-Z0-9_.-]+)/add_tip/$', 'drinkkit.redditors.views.add_tip'),
(r'^locations/(?P<location_id>[a-zA-Z0-9_.-]+)/checkin/$', 'dr
|
inkkit.redditors.views.checkin_location'),
(r'^locations/(?P<location_id>[a-zA-Z0-9_.-]+)/$', 'drinkkit.redditors.views.view_location'),
# Query and see who's getting into what
(r'^redditor/(?P<redditor_name>[a-zA-Z0-9_.-]+)/$', 'drinkkit.redditors.views.view_redditor'),
# Registration
(r'^register/$', 'drinkkit.redditors.views.register'),
# User forms - password, logout, login, etc.
(r'^password_reset/$', 'django.contrib.auth.views.password_reset'),
(r'^unauth/$', 'django.contrib.auth.views
|
.logout_then_login'),
(r'^auth/$', 'django.contrib.auth.views.login'),
(r'^/*', 'drinkkit.redditors.views.home'),
)
|
ActiveState/code
|
recipes/Python/496703_importonce/recipe-496703.py
|
Python
|
mit
| 1,231
| 0.02762
|
def import_once(modulenames, silent=1):
## import_once
## Fedmich Last modified: 3:38 PM 5/15/2006
## version 1.1
## Usage:
## import_once('os')
## import_once( ["os", 'sys'] )
if type(modulenames) is list:
pass
elif type(modulenames) is tuple:
pass
else:
modulenames = [modulenames]
imported = 0
for modulename in modulenames:
print modulename
if globals().has_key(modulename):
if not silent: print """Already imported module "%s"...""" % modulename
imported +=1
else:
try:
if not silent: print """%s is not yet imported so import it now...""" % modulename
globals()[modulename] = __import__(modulename, globals(), locals(), [])
imported += 1
except:
if not silent: print """Error while importing "%s"...""" % mod
|
ulenam
|
e
return (imported == len(modulenames) ) #return true if every modules are successfuly imported
print import_once( ("os", "sys") )
import_once( "sys")
import_once("oyster")
import_once("psyco", silent=0) #silent is used for debugging...
print os
print sys
print os.path.basename(r"c:\WINNT")
|
dhaase-de/dh-python-dh
|
dh/image/pipeline.py
|
Python
|
mit
| 13,563
| 0.007299
|
"""
Image pipeline viewer.
Note: currently under heavy construction.
"""
import tkinter
import tkinter.ttk
import dh.gui.tk
import dh.image
##
## basic classes
##
class Viewer():
def __init__(self):
self.images = []
self.n = None
self.pipeline = Pipeline()
self.pipeline.add("core.convert")
self.pipeline.add("core.asgray")
#self.pipeline.add("core.invert")
#self.pipeline.add("core.normalize")
self.pipeline.add("core.shift")
#self.pipeline.add("core.fft")
#self.pipeline.add("core.normalize")
self.pipeline.add("core.log")
#self.pipeline.add("core.gamma")
#self.pipeline.add("core.threshold")
#self.pipeline.add("core.rotate")
def select(self, n):
N = len(self.images)
if N == 0:
self.n = None
else:
self.n = n % N
return self.n
def first(self):
self.select(0)
def prev(self):
try:
self.select(self.n - 1)
except TypeError:
pass
def next(self):
try:
self.select(self.n + 1)
except TypeError:
pass
def last(self):
self.select(-1)
def add(self, I):
self.images.append(I.copy())
self.last()
def clear(self):
self.images = []
self.first()
def show(self):
window = _ViewerWindow(self)
window.run()
def view(self, I):
self.add(I)
self.show()
def selectedImage(self):
return self.images[self.n]
def applyPipeline(self):
return self.pipeline(self.selectedImage())
class _ViewerWindow(dh.gui.tk.Application):
def __init__(self, viewer):
super(_ViewerWindow, self).__init__(
title="Viewer",
minSize=(250, 250),
)
self.viewer = viewer
self.updateFilterFrame()
self.updateImage()
def initWidgets(self):
# key bindings
self.bind("<Escape>", lambda _: self.close())
self.bind("<q>", lambda _: self.close())
self.bind("<Left>", lambda _: (self.viewer.prev(), self.updateImage()))
self.bind("<Right>", lambda _: (self.viewer.next(), self.updateImage()))
# main frame
self.mainFrame = tkinter.ttk.Frame(self)
self.mainFrame.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=tkinter.YES)
# filter frame
self.filterFrame = tkinter.ttk.Frame(self.mainFrame)
self.filterFrame.pack(side=tkinter.LEFT, anchor=tkinter.N, padx=2, pady=2)
# image canvas
self.imageCanvas = dh.gui.tk.ImageCanvas(self.mainFrame)
self.imageCanvas.pack(side=tkinter.LEFT, anchor=tkinter.N, fill=tkinter.BOTH, expand=tkinter.YES)
# status bar
self.statusBar = dh.gui.tk.StatusBar(self)
self.statusBar.pack(side=tkinter.BOTTOM, fill=tkinter.X, expand=tkinter.NO)
def updateFilterFrame(self):
for node in self.viewer.pipeline.nodes:
node.gui(parent=self.filterFrame, onChangeCallback=self.updateImage).pack(fill="x", padx=1, pady=1, expand=True)
def updateImage(self, *args, **kwargs):
with dh.utils.Timer() as t:
I = self
|
.viewer.applyPipeline()
self.imageCanvas.setImage(I)
self.updateStatusBar("{shape}, {dtype}, {time}ms".format(
shape=I.shape,
dtype=I.dtype,
time=dh.utils.around(t() * 1
|
000.0),
))
def updateStatusBar(self, text):
self.statusBar.setText(text)
##
## pipeline framework
##
class Pipeline():
def __init__(self):
# nodes
self.nodes = []
self.add("core.source")
def __call__(self, I):
J = I.copy()
for node in self.nodes:
J = node(J)
return J
def add(self, node, position=None):
"""
Inserts processing before the `position`-th slot of the pipeline.
"""
if position is None:
position = len(self.nodes)
if isinstance(node, str):
uid = node
node = Node.instances[uid]
self.nodes.insert(position, node)
def remove(self, position):
del self.nodes[position]
def save(self, filename):
raise NotImplementedError()
def load(self, filename):
raise NotImplementedError()
class Node():
"""
Class for a processing pipeline element (node), which automatically
registers its instances.
"""
# keeps references to all instances of this class
instances = {}
def __init__(self, uid, description=None, tags=None, f=None, parameters=(), cache=False):
# register this instance
if uid not in type(self).instances:
type(self).instances[uid] = self
else:
raise ValueError("Node with uid '{uid}' is already registered".format(uid=uid))
# other properties
self.uid = uid
self.description = description
self.tags = tags
self.f = f
self.parameters = list(parameters)
# cache
self.useCache = cache
self.cache = {}
def __call__(self, *args, **kwargs):
kwargs.update(self.parameterValues())
if self.useCache:
key = dh.utils.ohash((args, kwargs), "hex", 64)
if key not in self.cache:
self.cache[key] = self.f(*args, **kwargs)
return self.cache[key]
else:
return self.f(*args, **kwargs)
def parameterValues(self):
return {parameter.name: parameter() for parameter in self.parameters}
def gui(self, parent, onChangeCallback):
"""
Constructs and returns a GUI frame for this filter.
"""
# master frame
frame = tkinter.ttk.Frame(parent, relief="raised")
# usable part of the frame
innerFrame = tkinter.ttk.Frame(frame)
innerFrame.pack(fill="x", expand=True, padx=6, pady=3)
# header line
header = tkinter.ttk.Frame(innerFrame)
header.pack(side = tkinter.TOP, fill = "x", expand = True)
tkinter.ttk.Label(header, text=self.uid, font="Sans 10 bold", anchor = tkinter.W, justify = tkinter.LEFT).pack(side = tkinter.LEFT, fill = "x", expand = True)
# description line
if self.description is not None:
details = tkinter.ttk.Frame(innerFrame)
details.pack(side = tkinter.TOP, fill = "x", expand = True)
tkinter.ttk.Label(details, text=self.description, font="Sans 8 italic", anchor = tkinter.W, justify = tkinter.LEFT).pack(side = tkinter.LEFT, fill = "x", expand = True)
# parameter frame
parameterFrame = tkinter.ttk.Frame(innerFrame)
parameterFrame.pack(side = tkinter.TOP, fill = "x", expand = True)
for (row, parameter) in enumerate(self.parameters):
(labelFrame, valueFrame) = parameter.gui(parent=parameterFrame, onChangeCallback=onChangeCallback)
labelFrame.grid(row = row, column = 0, padx = 0, sticky = tkinter.W)
valueFrame.grid(row = row, column = 1, padx = 10, sticky = tkinter.W)
#tkinter.ttk.Scale(parameterFrame, from_=0, to=100).grid(row = n, column = 1)
return frame
class SwitchableNode(Node):
"""
Processing node which automatically has one bool parameter to enable or
disable the processing.
"""
def __init__(self, *args, **kwargs):
# parent initialization
super().__init__(*args, **kwargs)
# add "enabled" parameter
self.parameters = [
BoolNodeParameter(
name="enabled",
default=True,
)
] + self.parameters
# wrap function
self.g = self.f
def f(I, enabled, **kwargs):
if enabled:
return self.g(I=I, **kwargs)
else:
return I
self.f = f
class NodeParameter():
def __init__(self, name, label=None):
self.name = name
if label is not None:
self.label = label
else:
self.label = name
def guiLabelFrame(self, pare
|
Dob3r/python_seleniumwebdriver
|
Tests/test_Browser_logs.py
|
Python
|
apache-2.0
| 819
| 0.004884
|
from termcolor import colored
def test_Browser_logs(app):
wd = app.wd
sidebar = wd.find_element_by_xpath("//td[@id='sidebar']")
sidebar.find_element_by_xpath(".//span[normalize-space(.)='Catalog']").click()
wd.find_element_by_xpath("//i[@class='fa fa-folder']/../a").click()
products_qty = len(wd.find_elements_by_xpath(("//input[con
|
tains(@name,'products')]/../..")))
wd.get_log("browser")
for i in range(1, products_qty + 1):
wd.find_element_by_xpath("//inp
|
ut[contains(@name,'products[%s]')]/../..//a[not(contains(@title,'Edit'))]" % i).click()
log = wd.get_log("browser")
if len(log) != 0:
print(colored("WARNING! LOG EXIST(S)", "red"))
for entry in log:
print(entry)
wd.find_element_by_name("cancel").click()
|
obulpathi/cdn1
|
cdn/manager/default/v1.py
|
Python
|
apache-2.0
| 1,291
| 0
|
# C
|
opyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cdn.manager import base
JSON_HOME = {
"resources": {
"rel/cdn": {
"href-template": "services{?marker,limit}",
"href-vars": {
"marker": "param/marker",
"limit": "param/limit"
},
"hints": {
"allow": [
"GET"
],
"formats": {
"application/json": {}
}
}
}
}
}
class DefaultV1Controller(base.V1Controller):
def __init__(self, manager):
super(DefaultV1Controller, self).__init__(manager)
self.JSON_HOME = JSON_HOME
def get(self):
return self.JSON_HOME
|
neocogent/electrum
|
electrum/plugins/digitalbitbox/digitalbitbox.py
|
Python
|
mit
| 32,370
| 0.004541
|
# ----------------------------------------------------------------------------------
# Electrum plugin for the Digital Bitbox hardware wallet by Shift Devices AG
# digitalbitbox.com
#
import base64
import binascii
import hashlib
import hmac
import json
import math
import os
import re
import struct
import sys
import time
from electrum.crypto import sha256d, EncodeAES_base64, EncodeAES_bytes, DecodeAES_bytes, hmac_oneshot
from electrum.bitcoin import (TYPE_ADDRESS, push_script, var_int, public_key_to_p2pkh,
is_address)
from electrum.bip32 import BIP32Node
from electrum import ecc
from electrum.ecc import msg_magic
from electrum.wallet import Standard_Wallet
from electrum import constants
from electrum.transaction import Transaction
from electrum.i18n import _
from electrum.keystore import Hardware_KeyStore
from ..hw_wallet import HW_PluginBase
from electrum.util import to_string, UserCancelled, UserFacingException
from electrum.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum.network import Network
from electrum.logging import get_logger
_logger = get_logger(__name__)
try:
import hid
DIGIBOX = True
except ImportError as e:
DIGIBOX = False
# ----------------------------------------------------------------------------------
# USB HID interface
#
def to_hexstr(s):
return binascii.hexlify(s).decode('ascii')
def derive_keys(x):
h = sha256d(x)
h = hashlib.sha512(h).digest()
return (h[:32],h[32:])
MIN_MAJOR_VERSION = 5
ENCRYPTION_PRIVKEY_KEY = 'encryptionprivkey'
CHANNEL_ID_KEY = 'comserverchannelid'
class DigitalBitbox_Client():
def __init__(self, plugin, hidDevice):
self.plugin = plugin
self.dbb_hid = hidDevice
self.opened = True
self.password = None
self.isInitialized = False
self.setupRunning = False
self.usbReportSize = 64 # firmware > v2.0.0
def close(self):
if self.opened:
try:
self.dbb_hid.close()
except:
pass
self.opened = False
def timeout(self, cutoff):
pass
def label(self):
return " "
def is_pairable(self):
return True
def is_initialized(self):
return self.dbb_has_password()
def is_paired(self):
return self.password is not None
def has_usable_connection_with_device(self):
try:
self.dbb_has_password()
except BaseException:
return False
return True
def _get_xpub(self, bip32_path):
if self.check_device_dialog():
return self.hid_send_encrypt(('{"xpub": "%s"}' % bip32_path).encode('utf8'))
def get_xpub(self, bip32_path, xtype):
assert xtype in self.plugin.SUPPORTED_XTYPES
reply = self._get_xpub(bip32_path)
if reply:
xpub = reply['xpub']
# Change type of xpub to the requested type. The firmware
# only ever returns the mainnet standard type, but it is agnostic
# to the type when signing.
if xtype != 'standard' or constants.net.TESTNET:
node = BIP32Node.from_xkey(xpub, net=constants.BitcoinMainnet)
xpub = node._replace(xtype=xtype).to_xpub()
return xpub
else:
raise Exception('no reply')
def dbb_has_password(self):
reply = self.hid_send_plain(b'{"ping":""}')
if 'ping' not in reply:
raise UserFacingException(_('Device communication error. Please unplug and replug your Digital Bitbox.'))
if reply['ping'] == 'password':
return True
return False
def stretch_key(self, key: bytes):
return to_hexstr(hashlib.pbkdf2_hmac('sha512', key, b'Digital Bitbox', iterations = 20480))
def backup_password_dialog(self):
msg = _("Enter the password used when the backup was created:")
while True:
password = self.handler.get_passphrase(msg, False)
if password is None:
return None
if len(password) < 4:
|
msg = _("Password must have at least 4 characters.") \
+ "\n\n" + _("Enter password:")
elif len(password) > 64:
msg = _("Password must have less than 64 characters.") \
+ "\n\n" + _("Enter password:")
else:
return password.encode('utf8')
def password_dialog(self, msg):
while True:
|
password = self.handler.get_passphrase(msg, False)
if password is None:
return False
if len(password) < 4:
msg = _("Password must have at least 4 characters.") + \
"\n\n" + _("Enter password:")
elif len(password) > 64:
msg = _("Password must have less than 64 characters.") + \
"\n\n" + _("Enter password:")
else:
self.password = password.encode('utf8')
return True
def check_device_dialog(self):
match = re.search(r'v([0-9])+\.[0-9]+\.[0-9]+', self.dbb_hid.get_serial_number_string())
if match is None:
raise Exception("error detecting firmware version")
major_version = int(match.group(1))
if major_version < MIN_MAJOR_VERSION:
raise Exception("Please upgrade to the newest firmware using the BitBox Desktop app: https://shiftcrypto.ch/start")
# Set password if fresh device
if self.password is None and not self.dbb_has_password():
if not self.setupRunning:
return False # A fresh device cannot connect to an existing wallet
msg = _("An uninitialized Digital Bitbox is detected.") + " " + \
_("Enter a new password below.") + "\n\n" + \
_("REMEMBER THE PASSWORD!") + "\n\n" + \
_("You cannot access your coins or a backup without the password.") + "\n" + \
_("A backup is saved automatically when generating a new wallet.")
if self.password_dialog(msg):
reply = self.hid_send_plain(b'{"password":"' + self.password + b'"}')
else:
return False
# Get password from user if not yet set
msg = _("Enter your Digital Bitbox password:")
while self.password is None:
if not self.password_dialog(msg):
raise UserCancelled()
reply = self.hid_send_encrypt(b'{"led":"blink"}')
if 'error' in reply:
self.password = None
if reply['error']['code'] == 109:
msg = _("Incorrect password entered.") + "\n\n" + \
reply['error']['message'] + "\n\n" + \
_("Enter your Digital Bitbox password:")
else:
# Should never occur
msg = _("Unexpected error occurred.") + "\n\n" + \
reply['error']['message'] + "\n\n" + \
_("Enter your Digital Bitbox password:")
# Initialize device if not yet initialized
if not self.setupRunning:
self.isInitialized = True # Wallet exists. Electrum code later checks if the device matches the wallet
elif not self.isInitialized:
reply = self.hid_send_encrypt(b'{"device":"info"}')
if reply['device']['id'] != "":
self.recover_or_erase_dialog() # Already seeded
else:
self.seed_device_dialog() # Seed if not initialized
self.mobile_pairing_dialog()
return self.isInitialized
def recover_or_erase_dialog(self):
msg = _("The Digital Bitbox is already seeded. Choose an option:") + "\n"
choices = [
(_("Create a wallet using the current seed")),
(_("Load a wallet from the micro SD card (the current seed is overwritten)")),
(_("Erase the Digital Bitbox"))
]
try:
reply = self.handler.win.query_choice(msg, ch
|
Arcanewinds/FDL-LunarResources
|
DataPreparation/LOLA_DEM/img_split.py
|
Python
|
gpl-3.0
| 3,075
| 0.014634
|
#Written by Timothy Seabrook
#timothy.seabrook@cs.ox.ac.uk
#This script is used to split the LOLA_DEM South Pole Large Tiles into smaller tiles f
|
or ingestion.
import glob, os
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from skimage import io
from osgeo import gdal
full_size = [30400, 30400]
p_size = [3800, 3800]
cut_size = [32,32]
stride = np.divide(cut_size, 2)
thisDir = os.path.dirname(os.path.abspath(__file__))
rootDir = os.path.join(thisDir, os.pardir, os.pardir)
dataDir = os.path.join(rootDir, 'Data')
DEMDir = os.path.join(dataDir, 'LOLA_DEM', 'South_Pole')
DEMLargeDir =
|
os.path.join(DEMDir, 'Large_Tiles')
DEMSmallDir = os.path.join(DEMDir, 'Small_Tiles')
base_filename = "hs-45-45_lola20sp"
#https://stackoverflow.com/questions/273946/how-do-i-resize-an-image-using-pil-and-maintain-its-aspect-ratio
v_pieces = np.floor_divide(full_size[0], p_size[0]) #Number of vertical divisions for large tiles
h_pieces = np.floor_divide(full_size[1], p_size[1]) #Number of horizontal divisions for large tiles
for n in (25,39,43,57,64):
if not os.path.isdir(os.path.join(DEMSmallDir,'P'+str(n+1),'')):
os.mkdir(os.path.join(DEMSmallDir,'P'+str(n+1),''))
curr_filename = os.path.join(DEMLargeDir,base_filename+'_p'+str(n+1)+'.tif')
ds = gdal.Open(curr_filename)
im = np.array(ds.GetRasterBand(1).ReadAsArray())
width = im.shape[1]
height = im.shape[0]
y_ind, x_ind = np.floor_divide(n, v_pieces), np.mod(n, v_pieces)
y_pos, x_pos = [0] * 2, [0] * 2
y_pos[0], x_pos[0] = np.multiply(p_size[0], y_ind), np.multiply(p_size[1], x_ind)
y_pos[1], x_pos[1] = y_pos[0] + p_size[0], x_pos[0] + p_size[1]
h_cuts = np.floor_divide(p_size[1], stride[1]) - (np.floor_divide(cut_size[1], stride[1])) + 1
v_cuts = np.floor_divide(p_size[0], stride[0]) - (np.floor_divide(cut_size[0], stride[0])) + 1
#The below is what was used to generate the tiles found in this github, however they are incorrect.
#The correct formula is given above.
#Once the data provided has been refactored, the below formula will be replaced.
w_cuts = np.multiply(np.floor_divide(width, cut_size[1]), np.divide(cut_size[1], stride[1]))
h_cuts = np.multiply(np.floor_divide(height, cut_size[0]), np.divide(cut_size[0], stride[0]))
for i in range(w_cuts+1):
for j in range(h_cuts+1):
x_off = np.multiply(i, stride[1])
y_off = np.multiply(j, stride[0])
#image = np.asarray(im)
image = im[y_off:y_off+cut_size[0], x_off:x_off+cut_size[1]]
ind = (i*w_cuts + j)
#x = i*cut_size[1]+x_pos[0]
#y = j*cut_size[0]+y_pos[0]
#filename = os.path.join(DEMSmallDir,'P'+str(n+1),base_filename+'_x'+str(x)+'_y'+str(y))
# Once existing data names have been refactored, the below filename will be replaced with the above.
filename = os.path.join(DEMSmallDir,'P'+str(n+1),base_filename+'_cut'+str(ind))
im2 = Image.fromarray(image)
im2.save(filename + '.tif')
|
quaddra/engage
|
install_extension.py
|
Python
|
apache-2.0
| 7,602
| 0.002894
|
#!/usr/bin/env python
"""This utility installs an engage extension into a deployment home.
"""
import os
import os.path
import sys
from optparse import OptionParser
import shutil
import re
import logging
logger = logging.getLogger(__name__)
# enable importing from the python_pkg sub-directory
base_src_dir=os.path.abspath(os.path.dirname(__file__))
python_pkg_dir = os.path.join(base_src_dir, "python_pkg")
assert os.path.exists(python_pkg_dir), "Python package directory %s does not exist" % python_pkg_dir
sys.path.append(python_pkg_dir)
from engage.extensions import installed_extensions, extension_versions
dist_root = os.path.abspath(os.path.dirname(__file__))
dist_root_parent = os.path.abspath(os.path.join(dist_root, ".."))
class EngageExtension(object):
def __init__(self, path, name, version, update):
self.path = path
self.name = name
self.version = version
self.update = update
def _copy_dir(self, src_dirname, target, dry_run=False):
src_dir = os.path.join(self.path, src_dirname)
dest_dir = os.path.join(os.path.join(target, src_dirname),
self.name)
if os.path.exists(src_dir):
logger.info("Copying %s to %s" % (src_dirname, dest_dir))
if os.path.exists(dest_dir):
if self.update:
logger.warn("removing old version of %s" % dest_dir)
if not dry_run:
shutil.rmtree(dest_dir)
else:
raise Exception("Target directory %s already exists" % dest_dir)
if not dry_run:
shutil.copytree(src_dir, dest_dir)
elif self.update and os.path.exists(dest_dir):
logger.warn("removing old version of %s" % dest_dir)
if not dry_run:
shutil.rmtree(dest_dir)
def install(self, dist_root, dry_run=False):
if not dry_run:
logger.info("Running install of %s to %s" % (self.name, dist_root))
else:
logger.info("Dry run install of %s to %s" % (self.name, dist_root))
self._copy_dir("metadata", dist_root, dry_run=dry_run)
dest_engage_pkg_dir = os.path.join(os.path.join(dist_root, "python_pkg"),
"engage")
self._copy_dir("drivers", dest_engage_pkg_dir, dry_run=dry_run)
self._copy_dir("tests", dest_engage_pkg_dir, dry_run=dry_run)
self._copy_dir("mgt_backends", dest_engage_pkg_dir, dry_run=dry_run)
# For the software packages we copy the individual files to the main package
# cache.
src_cache_dir = os.path.join(self.path, "sw_packages")
dest_cache_dir = os.path.join(dist_root, "sw_packages")
if os.path.exists(src_cache_dir):
logger.info("Copying software packages from %s to %s" %
(src_cache_dir, dest_cache_dir))
for fname in os.listdir(src_cache_dir):
src_file = os.path.join(src_cache_dir, fname)
dest_file = os.path.join(dest_cache_dir, fname)
logger.debug("Copying %s to %s" % (fname, dest_file))
shutil.copyfile(src_file, dest_file)
# update the extension file
if self.name not in installed_extensions:
installed_extensions.append(self.name)
extension_versions[self.name] = self.version
extns_file = os.path.join(dest_engage_pkg_dir, "extensions.py")
logger.info("Updating extensions file %s" % extns_file)
with open(extns_file, "rb") as ef:
lines = ef.read().split("\n")
updated_list = False
updated_versions = False
if not dry_run:
with open(extns_file, "wb") as ef:
for line in lines:
if re.match("^installed_extensions = ", line):
ef.write("installed_extensions = %s\n" %
installed_extensions.__repr__())
updated_list = True
elif re.match("^extension_versions = ", line):
ef.write("extension_versions = %s\n" %
extension_versions.__repr__())
updated_versions = True
else:
ef.write(line + "\n")
else:
for line in lines:
if re.match("^installed_extensions = ", line):
sys.stdout.write("installed_extensions = %s\n" %
installed_extensions.__repr__())
updated_list = True
elif re.match("^extension_versions = ", line):
sys.stdout.write("extension_versions = %s\n" %
extension_versions.__repr__())
updated_versions = True
else:
sys.stdout.write(line + "\n")
if ((not updated_list) or (not updated_versions)):
raise Exception("Extension registration file %s did not have correct format, unable to complete update" % extns_file)
logger.info("Successfully installed extension %s" % self.name)
def process_args(argv):
usage = "usage: %prog [options] path_to_extension"
parser = OptionParser(usage=usage)
parser.add_option("--dry-run", action="store_true",
help="If specified, don't make changes, just log what would be done",
default=False)
parser.add_option("--update", "-u", action="store_true",
help="If specified, override any existing version of the extension",
default=False)
(options, args) = parser.parse_args(args=argv)
if len(args)==0:
parser.print_help()
sys.exit(0)
elif len(args) > 1:
parser.error("Expecting exactly one argument, path to extension directory")
extension_path = os.path.abspath(args[0])
if not os.path.exists(extension_path):
parser.error("Extension directory %s does not exist" % extension_path)
extension_name = os.path.basename(extension_path)
if os.path.basename(dist_root_parent)=="src":
parser.error("Cannot install extension into source tree %s, run from distribution tree" % dist_root)
if extension_name in installed_extensions and not options.update:
parser.error("Extension %s already installed" % extension_name)
version_file = os.path.join(extension_path, "version.txt")
if not os.path.exists(version_file):
parser.error("Missing version file %s" % version_file)
with open(version_file, "rb") as vf:
extension
|
_version = vf.read().rstrip()
ext = EngageExtension(extension_path, extension_name,
extension_version, options.update)
return (ext, options)
def main(argv=sys.argv[1:]):
(ext, opts) = process_args(argv)
ext.install(dist_root, dry_run=opts.dry_run)
return 0
if __name__ == "__main__":
#formatter = logging.Formatter("[%(levelname)s][%(name)
|
s] %(message)s")
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
#console_handler.setFormatter(formatter)
root_logger.addHandler(console_handler)
logger.setLevel(logging.INFO)
sys.exit(main())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.