code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from .. syntax import (Expr, Assign, ExprStmt, ForLoop, If, Return, While, Comment, ParFor,
TypedFn, UntypedFn, Closure, ClosureElt, Select,
Attribute, Const, Index, PrimCall, Tuple, Var,
Alloc, Array, Call, Struct, Shape, Strides, Range, Ravel, Transpose,
AllocArray, ArrayView, Cast, Slice, TupleProj, TypeValue,
Map, Reduce, Scan, OuterMap, IndexMap, IndexReduce, IndexScan )
class SyntaxVisitor(object):
"""
Traverse the statement structure of a syntax block, optionally collecting
values
"""
def visit_if_expr(self, expr):
if isinstance(expr, Expr):
self.visit_expr(expr)
def visit_Var(self, expr):
pass
def visit_Const(self, expr):
pass
def visit_Fn(self, expr):
pass
def visit_ClosureElt(self, expr):
self.visit_expr(expr.closure)
def visit_Closure(self, expr):
self.visit_expr(expr.fn)
self.visit_expr_list(expr.args)
def visit_Tuple(self, expr):
for elt in expr.elts:
self.visit_expr(elt)
def visit_PrimCall(self, expr):
for arg in expr.args:
self.visit_expr(arg)
def visit_Attribute(self, expr):
self.visit_expr(expr.value)
def visit_Index(self, expr):
self.visit_expr(expr.value)
self.visit_expr(expr.index)
def visit_UntypedFn(self, expr):
pass
def visit_TypedFn(self, expr):
pass
def visit_Alloc(self, expr):
self.visit_expr(expr.count)
def visit_Struct(self, expr):
for arg in expr.args:
self.visit_expr(arg)
def visit_Array(self, expr):
for elt in expr.elts:
self.visit_expr(elt)
def visit_ArrayView(self, expr):
self.visit_expr(expr.data)
self.visit_expr(expr.shape)
self.visit_expr(expr.strides)
self.visit_expr(expr.offset)
self.visit_expr(expr.size)
def visit_AllocArray(self, expr):
self.visit_expr(expr.shape)
def visit_Ravel(self, expr):
self.visit_expr(expr.array)
def visit_Reshape(self, expr):
self.visit_expr(expr.array)
self.visit_expr(expr.shape)
def visit_Shape(self, expr):
self.visit_expr(expr.array)
def visit_ConstArray(self, expr):
self.visit_expr(expr.shape)
self.visit_expr(expr.value)
def visit_ConstArrayLike(self, expr):
self.visit_expr(expr.array)
self.visit_expr(expr.value)
def visit_Slice(self, expr):
self.visit_expr(expr.start)
self.visit_expr(expr.stop)
self.visit_expr(expr.step)
def visit_Transpose(self, expr):
self.visit_expr(expr.array)
def visit_IndexMap(self, expr):
self.visit_expr(expr.fn)
self.visit_expr(expr.shape)
def visit_IndexReduce(self, expr):
self.visit_expr(expr.fn)
self.visit_expr(expr.combine)
self.visit_expr(expr.shape)
self.visit_expr(expr.init)
def visit_IndexScan(self, expr):
self.visit_expr(expr.fn)
self.visit_expr(expr.combine)
self.visit_expr(expr.shape)
self.visit_expr(expr.init)
def visit_Map(self, expr):
self.visit_expr(expr.fn)
self.visit_if_expr(expr.axis)
for arg in expr.args:
self.visit_expr(arg)
def visit_OuterMap(self, expr):
self.visit_expr(expr.fn)
self.visit_if_expr(expr.axis)
for arg in expr.args:
self.visit_expr(arg)
def visit_Reduce(self, expr):
self.visit_expr(expr.fn)
self.visit_expr(expr.combine)
self.visit_if_expr(expr.axis)
self.visit_if_expr(expr.init)
for arg in expr.args:
self.visit_expr(arg)
def visit_Scan(self, expr):
self.visit_expr(expr.fn)
self.visit_expr(expr.combine)
self.visit_if_expr(expr.axis)
self.visit_if_expr(expr.init)
for arg in expr.args:
self.visit_expr(arg)
def visit_TupleProj(self, expr):
return self.visit_expr(expr.tuple)
def visit_Call(self, expr):
self.visit_expr(expr.fn)
for arg in expr.args:
self.visit_expr(arg)
def visit_Cast(self, expr):
return self.visit_expr(expr.value)
def visit_Range(self, expr):
self.visit_expr(expr.start)
self.visit_expr(expr.stop)
self.visit_expr(expr.step)
def visit_Select(self, expr):
self.visit_expr(expr.cond)
self.visit_expr(expr.true_value)
self.visit_expr(expr.false_value)
def visit_TypeValue(self, expr):
pass
def visit_generic_expr(self, expr):
for v in expr.children():
self.visit_expr(v)
_expr_method_names = {
Var : 'visit_Var',
Const : 'visit_Const',
PrimCall : 'visit_PrimCall',
Attribute : 'visit_Attribute',
Index : 'visit_Index',
Tuple : 'visit_Tuple',
TupleProj : 'visit_TupleProj',
Slice : 'visit_Slice',
Struct : 'visit_Struct',
AllocArray : 'visit_AllocArray',
ArrayView : 'visit_ArrayView',
Array : 'visit_Array',
Range : 'visit_Range',
Ravel : 'visit_Ravel',
Transpose : 'visit_Transpose',
Shape : 'visit_Shape',
Strides : 'visit_Strides',
Alloc : 'visit_Alloc',
Cast : 'visit_Cast',
Call : 'visit_Call',
Select : 'visit_Select',
Map : 'visit_Map',
OuterMap : 'visit_OuterMap',
IndexMap : 'visit_IndexMap',
Reduce : 'visit_Reduce',
IndexReduce : 'visit_IndexReduce',
Scan : 'visit_Scan',
IndexScan : 'visit_IndexScan',
Closure : 'visit_Closure',
ClosureElt : 'visit_ClosureElt',
UntypedFn : 'visit_UntypedFn',
TypedFn : 'visit_TypedFn',
TypeValue : 'visit_TypeValue',
}
def visit_expr(self, expr):
c = expr.__class__
if c is Var:
return self.visit_Var(expr)
elif c is Const:
return self.visit_Const(expr)
elif c is PrimCall:
return self.visit_PrimCall(expr)
elif c is Tuple:
return self.visit_Tuple(expr)
elif c is TupleProj:
return self.visit_TupleProj(expr)
elif c is Index:
return self.visit_Index(expr)
# try looking up the method in fast-path dictionary
method_name = self._expr_method_names.get(c)
if not method_name:
# ...otherwise, just construct a new string following the visit_Expr pattern
method_name = "visit_" + c.__name__
# if we found a method, call it
if method_name:
method = getattr(self, method_name)
if method:
return method(expr)
for child in expr.children():
self.visit_expr(child)
def visit_expr_list(self, exprs):
return [self.visit_expr(expr) for expr in exprs]
def visit_lhs_Var(self, lhs):
self.visit_Var(lhs)
def visit_lhs_Tuple(self, lhs):
self.visit_Tuple(lhs)
def visit_lhs_Index(self, lhs):
self.visit_Index(lhs)
def visit_lhs_Attribute(self, lhs):
self.visit_Attribute(lhs)
def visit_lhs(self, lhs):
c = lhs.__class__
if c is Var:
return self.visit_lhs_Var(lhs)
elif c is Tuple:
return self.visit_lhs_Tuple(lhs)
elif c is Index:
return self.visit_lhs_Index(lhs)
elif c is Attribute:
return self.visit_lhs_Attribute(lhs)
else:
assert False, "LHS not implemented: %s" % (lhs,)
def visit_block(self, stmts):
for s in stmts:
self.visit_stmt(s)
def visit_Assign(self, stmt):
self.visit_lhs(stmt.lhs)
self.visit_expr(stmt.rhs)
def visit_merge(self, phi_nodes):
for (_, (l,r)) in phi_nodes.iteritems():
self.visit_expr(l)
self.visit_expr(r)
def visit_merge_if(self, phi_nodes):
self.visit_merge(phi_nodes)
def visit_If(self, stmt):
self.visit_expr(stmt.cond)
self.visit_block(stmt.true)
self.visit_block(stmt.false)
self.visit_merge_if(stmt.merge)
def visit_ExprStmt(self, stmt):
self.visit_expr(stmt.value)
def visit_Return(self, stmt):
self.visit_expr(stmt.value)
def visit_merge_loop_start(self, phi_nodes):
pass
def visit_merge_loop_repeat(self, phi_nodes):
self.visit_merge(phi_nodes)
def visit_While(self, stmt):
self.visit_merge_loop_start(stmt.merge)
self.visit_expr(stmt.cond)
self.visit_block(stmt.body)
self.visit_merge_loop_repeat(stmt.merge)
def visit_ForLoop(self, stmt):
self.visit_lhs(stmt.var)
self.visit_expr(stmt.start)
self.visit_merge_loop_start(stmt.merge)
self.visit_expr(stmt.stop)
self.visit_block(stmt.body)
self.visit_expr(stmt.step)
self.visit_merge_loop_repeat(stmt.merge)
def visit_Comment(self, stmt):
pass
def visit_ParFor(self, expr):
self.visit_expr(expr.fn)
self.visit_expr(expr.bounds)
_stmt_method_names = {
Assign : 'visit_Assign',
Return : 'visit_Return',
While : 'visit_While',
ForLoop : 'visit_ForLoop',
If : 'visit_If',
ExprStmt : 'visit_ExprStmt',
ParFor : 'visit_ParFor',
Comment : 'visit_Comment',
}
def visit_stmt(self, stmt):
c = stmt.__class__
if c is Assign:
self.visit_Assign(stmt)
else:
getattr(self, self._stmt_method_names[c])(stmt)
def visit_fn(self, fn):
self.visit_block(fn.body)
|
pombredanne/parakeet
|
parakeet/analysis/syntax_visitor.py
|
Python
|
bsd-3-clause
| 9,020
|
from django.contrib.localflavor.gb.forms import GBPostcodeField
from utils import LocalFlavorTestCase
class GBLocalFlavorTests(LocalFlavorTestCase):
def test_GBPostcodeField(self):
error_invalid = [u'Enter a valid postcode.']
valid = {
'BT32 4PX': 'BT32 4PX',
'GIR 0AA': 'GIR 0AA',
'BT324PX': 'BT32 4PX',
' so11aa ': 'SO1 1AA',
' so1 1aa ': 'SO1 1AA',
'G2 3wt': 'G2 3WT',
'EC1A 1BB': 'EC1A 1BB',
'Ec1a1BB': 'EC1A 1BB',
}
invalid = {
'1NV 4L1D': error_invalid,
'1NV4L1D': error_invalid,
' b0gUS': error_invalid,
}
self.assertFieldOutput(GBPostcodeField, valid, invalid)
valid = {}
invalid = {
'1NV 4L1D': [u'Enter a bloody postcode!'],
}
kwargs = {'error_messages': {'invalid': 'Enter a bloody postcode!'}}
self.assertFieldOutput(GBPostcodeField, valid, invalid, field_kwargs=kwargs)
|
disqus/django-old
|
tests/regressiontests/forms/localflavor/gb.py
|
Python
|
bsd-3-clause
| 1,023
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Snippet.country'
db.delete_column(u'base_snippet', 'country')
def backwards(self, orm):
# Adding field 'Snippet.country'
db.add_column(u'base_snippet', 'country',
self.gf('snippets.base.fields.CountryField')(default='', max_length=16, blank=True),
keep_default=False)
models = {
u'base.clientmatchrule': {
'Meta': {'ordering': "('-modified',)", 'object_name': 'ClientMatchRule'},
'appbuildid': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
'build_target': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
'channel': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'distribution': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
'distribution_version': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_exclusion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locale': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
'os_version': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
'startpage_version': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'}),
'version': ('snippets.base.fields.RegexField', [], {'max_length': '255', 'blank': 'True'})
},
u'base.jsonsnippet': {
'Meta': {'ordering': "('-modified',)", 'object_name': 'JSONSnippet'},
'client_match_rules': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['base.ClientMatchRule']", 'symmetrical': 'False', 'blank': 'True'}),
'country': ('snippets.base.fields.CountryField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'icon': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'on_aurora': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'on_beta': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'on_nightly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'on_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'on_startpage_1': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'publish_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publish_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '100'})
},
u'base.jsonsnippetlocale': {
'Meta': {'object_name': 'JSONSnippetLocale'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('snippets.base.fields.LocaleField', [], {'default': "'en-US'", 'max_length': '32'}),
'snippet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'locale_set'", 'to': u"orm['base.JSONSnippet']"})
},
u'base.searchprovider': {
'Meta': {'ordering': "('id',)", 'object_name': 'SearchProvider'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'base.snippet': {
'Meta': {'ordering': "('-modified',)", 'object_name': 'Snippet'},
'client_match_rules': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['base.ClientMatchRule']", 'symmetrical': 'False', 'blank': 'True'}),
'countries': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['base.TargetedCountry']", 'symmetrical': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'exclude_from_search_providers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['base.SearchProvider']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'on_aurora': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'on_beta': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'on_nightly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'on_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'on_startpage_1': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'on_startpage_2': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'on_startpage_3': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'on_startpage_4': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'publish_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publish_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.SnippetTemplate']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '100'})
},
u'base.snippetlocale': {
'Meta': {'object_name': 'SnippetLocale'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('snippets.base.fields.LocaleField', [], {'default': "'en-US'", 'max_length': '32'}),
'snippet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'locale_set'", 'to': u"orm['base.Snippet']"})
},
u'base.snippettemplate': {
'Meta': {'object_name': 'SnippetTemplate'},
'code': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'base.snippettemplatevariable': {
'Meta': {'object_name': 'SnippetTemplateVariable'},
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variable_set'", 'to': u"orm['base.SnippetTemplate']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'base.targetedcountry': {
'Meta': {'object_name': 'TargetedCountry'},
'code': ('snippets.base.fields.CountryField', [], {'default': "u'us'", 'max_length': '16'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'base.uploadedfile': {
'Meta': {'object_name': 'UploadedFile'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['base']
|
bensternthal/snippets-service
|
snippets/base/migrations/0016_auto__del_field_snippet_country.py
|
Python
|
bsd-3-clause
| 10,256
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import subprocess
from telemetry.core import exceptions
from telemetry.internal.platform import android_platform_backend as \
android_platform_backend_module
from telemetry.core import util
from telemetry.internal.backends import android_command_line_backend
from telemetry.internal.backends import browser_backend
from telemetry.internal.backends.chrome import chrome_browser_backend
from telemetry.internal import forwarders
from pylib.device import intent
class AndroidBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
"""The backend for controlling a browser instance running on Android."""
def __init__(self, android_platform_backend, browser_options,
backend_settings, output_profile_path, extensions_to_load,
target_arch):
assert isinstance(android_platform_backend,
android_platform_backend_module.AndroidPlatformBackend)
super(AndroidBrowserBackend, self).__init__(
android_platform_backend,
supports_tab_control=backend_settings.supports_tab_control,
supports_extensions=False, browser_options=browser_options,
output_profile_path=output_profile_path,
extensions_to_load=extensions_to_load)
self._port_keeper = util.PortKeeper()
# Use the port hold by _port_keeper by default.
self._port = self._port_keeper.port
if len(extensions_to_load) > 0:
raise browser_backend.ExtensionsNotSupportedException(
'Android browser does not support extensions.')
# Initialize fields so that an explosion during init doesn't break in Close.
self._backend_settings = backend_settings
self._target_arch = target_arch
self._saved_sslflag = ''
# TODO(wuhu): Move to network controller backend.
self.platform_backend.InstallTestCa()
# Kill old browser.
self._KillBrowser()
if self.device.HasRoot() or self.device.NeedsSU():
if self.browser_options.profile_dir:
self.platform_backend.PushProfile(
self._backend_settings.package,
self.browser_options.profile_dir)
elif not self.browser_options.dont_override_profile:
self.platform_backend.RemoveProfile(
self._backend_settings.package,
self._backend_settings.profile_ignore_list)
if self.browser_options.netsim:
assert self.platform_backend.use_rndis_forwarder, (
'Netsim requires RNDIS forwarding.')
self.wpr_port_pairs = forwarders.PortPairs(
http=forwarders.PortPair(0, 80),
https=forwarders.PortPair(0, 443),
dns=forwarders.PortPair(0, 53))
# Set the debug app if needed.
self.platform_backend.SetDebugApp(self._backend_settings.package)
@property
def log_file_path(self):
return None
@property
def device(self):
return self.platform_backend.device
def _KillBrowser(self):
if self.device.IsUserBuild():
self.platform_backend.StopApplication(self._backend_settings.package)
else:
self.platform_backend.KillApplication(self._backend_settings.package)
def Start(self):
self.device.RunShellCommand('logcat -c')
if self.browser_options.startup_url:
url = self.browser_options.startup_url
elif self.browser_options.profile_dir:
url = None
else:
# If we have no existing tabs start with a blank page since default
# startup with the NTP can lead to race conditions with Telemetry
url = 'about:blank'
self.platform_backend.DismissCrashDialogIfNeeded()
browser_startup_args = self.GetBrowserStartupArgs()
with android_command_line_backend.SetUpCommandLineFlags(
self.device, self._backend_settings, browser_startup_args):
self.device.StartActivity(
intent.Intent(package=self._backend_settings.package,
activity=self._backend_settings.activity,
action=None, data=url, category=None),
blocking=True)
remote_devtools_port = self._backend_settings.GetDevtoolsRemotePort(
self.device)
try:
# Release reserved port right before forwarding host to device.
self._port_keeper.Release()
assert self._port == self._port_keeper.port, (
'Android browser backend must use reserved port by _port_keeper')
self.platform_backend.ForwardHostToDevice(
self._port, remote_devtools_port)
except Exception:
logging.exception('Failed to forward %s to %s.',
str(self._port), str(remote_devtools_port))
logging.warning('Currently forwarding:')
try:
for line in self.device.adb.ForwardList().splitlines():
logging.warning(' %s', line)
except Exception:
logging.warning('Exception raised while listing forwarded '
'connections.')
logging.warning('Host tcp ports in use:')
try:
for line in subprocess.check_output(['netstat', '-t']).splitlines():
logging.warning(' %s', line)
except Exception:
logging.warning('Exception raised while listing tcp ports.')
logging.warning('Device unix domain sockets in use:')
try:
for line in self.device.ReadFile('/proc/net/unix', as_root=True,
force_pull=True).splitlines():
logging.warning(' %s', line)
except Exception:
logging.warning('Exception raised while listing unix domain sockets.')
raise
try:
self._WaitForBrowserToComeUp()
self._InitDevtoolsClientBackend(remote_devtools_port)
except exceptions.BrowserGoneException:
logging.critical('Failed to connect to browser.')
if not (self.device.HasRoot() or self.device.NeedsSU()):
logging.critical(
'Resolve this by either: '
'(1) Flashing to a userdebug build OR '
'(2) Manually enabling web debugging in Chrome at '
'Settings > Developer tools > Enable USB Web debugging.')
self.Close()
raise
except:
self.Close()
raise
def GetBrowserStartupArgs(self):
args = super(AndroidBrowserBackend, self).GetBrowserStartupArgs()
args.append('--enable-remote-debugging')
args.append('--disable-fre')
args.append('--disable-external-intent-requests')
return args
@property
def pid(self):
pids = self.device.GetPids(self._backend_settings.package)
if not pids or self._backend_settings.package not in pids:
raise exceptions.BrowserGoneException(self.browser)
if len(pids[self._backend_settings.package]) > 1:
raise Exception(
'At most one instance of process %s expected but found pids: '
'%s' % (self._backend_settings.package, pids))
return int(pids[self._backend_settings.package][0])
@property
def browser_directory(self):
return None
@property
def profile_directory(self):
return self._backend_settings.profile_dir
@property
def package(self):
return self._backend_settings.package
@property
def activity(self):
return self._backend_settings.activity
def __del__(self):
self.Close()
def Close(self):
super(AndroidBrowserBackend, self).Close()
self.platform_backend.RemoveTestCa()
self._KillBrowser()
self.platform_backend.StopForwardingHost(self._port)
if self._output_profile_path:
self.platform_backend.PullProfile(
self._backend_settings.package, self._output_profile_path)
def IsBrowserRunning(self):
return self.platform_backend.IsAppRunning(self._backend_settings.package)
def GetStandardOutput(self):
return self.platform_backend.GetStandardOutput()
def GetStackTrace(self):
return self.platform_backend.GetStackTrace(self._target_arch)
@property
def should_ignore_certificate_errors(self):
return not self.platform_backend.is_test_ca_installed
|
Pluto-tv/chromium-crosswalk
|
tools/telemetry/telemetry/internal/backends/chrome/android_browser_backend.py
|
Python
|
bsd-3-clause
| 8,130
|
##########################################################################
#
# Copyright (c) 2009, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import os.path
import math
import unittest
import imath
import IECore
import IECoreScene
class CurveExtrudeOpTest( unittest.TestCase ) :
def testIt( self ) :
c = IECore.Reader.create( "test/IECore/data/cobFiles/torusCurves.cob" ).read()
assert( c.arePrimitiveVariablesValid() )
op = IECoreScene.CurveExtrudeOp()
patchGroup = op(
curves = c,
resolution = imath.V2i( 6, 30 )
)
self.assertEqual( len( patchGroup.children() ), 193 )
for child in patchGroup.children() :
self.assert_( child.arePrimitiveVariablesValid() )
if __name__ == "__main__":
unittest.main()
|
appleseedhq/cortex
|
test/IECoreScene/CurveExtrudeOp.py
|
Python
|
bsd-3-clause
| 2,392
|
from pydatacsvext import *
|
jnmclarty/trump
|
trump/extensions/source/tx-pydatacsv/__init__.py
|
Python
|
bsd-3-clause
| 26
|
import sys
import re
from django import template
from django.template import TemplateSyntaxError
from django.template.defaulttags import URLNode
from openflow.common.utils.OptinThemeManager import OptinThemeManager
'''
@author: omoya
OCF Custom URL templatetag to support different themes with transparency
'''
#registering
register = template.Library()
# Regex for token keyword arguments
kwarg_re = re.compile(r"(?:(\w+)=)?(.+)")
def url(parser, token):
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument"
" (path to a view)" % bits[0])
viewname = bits[1]
args = []
kwargs = {}
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
# Backwards compatibility: check for the old comma separated format
# {% url urlname arg1,arg2 %}
# Initial check - that the first space separated bit has a comma in it
if bits and ',' in bits[0]:
check_old_format = True
# In order to *really* be old format, there must be a comma
# in *every* space separated bit, except the last.
for bit in bits[1:-1]:
if ',' not in bit:
# No comma in this bit. Either the comma we found
# in bit 1 was a false positive (e.g., comma in a string),
# or there is a syntax problem with missing commas
check_old_format = False
break
else:
# No comma found - must be new format.
check_old_format = False
if check_old_format:
# Confirm that this is old format by trying to parse the first
# argument. An exception will be raised if the comma is
# unexpected (i.e. outside of a static string).
match = kwarg_re.match(bits[0])
if match:
value = match.groups()[1]
try:
parser.compile_filter(value)
except TemplateSyntaxError:
bits = ''.join(bits).split(',')
# Now all the bits are parsed into new format,
# process them as template vars
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
viewname = OptinThemeManager.getThemeStaticUrl(viewname, args)
return URLNode(viewname, args, kwargs, asvar)
url = register.tag(url)
|
avlach/univbris-ocf
|
optin_manager/src/python/openflow/common/utils/templatetags/url.py
|
Python
|
bsd-3-clause
| 2,734
|
from __future__ import absolute_import
from datetime import datetime
from django.core.urlresolvers import reverse
from sentry.testutils import APITestCase
class OrganizationProjectsSentFirstEventEndpointTest(APITestCase):
def setUp(self):
self.foo = self.create_user("foo@example.com")
self.org = self.create_organization(owner=self.user)
self.team = self.create_team(organization=self.org)
self.url = reverse(
"sentry-api-0-organization-sent-first-event",
kwargs={"organization_slug": self.org.slug},
)
def test_simple_sent_first_event(self):
self.create_project(teams=[self.team], first_event=datetime.now())
self.create_member(organization=self.org, user=self.foo, teams=[self.team])
self.login_as(user=self.foo)
response = self.client.get(self.url)
assert response.status_code == 200
assert response.data["sentFirstEvent"]
def test_simple_no_first_event(self):
self.create_project(teams=[self.team])
self.create_member(organization=self.org, user=self.foo, teams=[self.team])
self.login_as(user=self.foo)
response = self.client.get(self.url)
assert response.status_code == 200
assert not response.data["sentFirstEvent"]
def test_first_event_in_org(self):
self.create_project(teams=[self.team], first_event=datetime.now())
self.create_member(organization=self.org, user=self.foo)
self.login_as(user=self.foo)
response = self.client.get(self.url)
assert response.status_code == 200
assert response.data["sentFirstEvent"]
def test_no_first_event_in_member_projects(self):
self.create_project(teams=[self.team], first_event=datetime.now())
self.create_member(organization=self.org, user=self.foo)
self.login_as(user=self.foo)
response = self.client.get(u"{}?is_member=true".format(self.url))
assert response.status_code == 200
assert not response.data["sentFirstEvent"]
def test_first_event_from_project_ids(self):
project = self.create_project(teams=[self.team], first_event=datetime.now())
self.create_member(organization=self.org, user=self.foo)
self.login_as(user=self.foo)
response = self.client.get(u"{}?project={}".format(self.url, project.id))
assert response.status_code == 200
assert response.data["sentFirstEvent"]
|
beeftornado/sentry
|
tests/sentry/api/endpoints/test_organization_projects_sent_first_event.py
|
Python
|
bsd-3-clause
| 2,479
|
# encoding: utf-8
"""
attribute/__init__.py
Created by Thomas Mangin on 2009-11-05.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
# Every Attribute should be imported from this file
# as it makes sure that all the registering decorator are run
from exabgp.bgp.message.update.attribute.attribute import Attribute
from exabgp.bgp.message.update.attribute.attributes import Attributes
from exabgp.bgp.message.update.attribute.generic import GenericAttribute
from exabgp.bgp.message.update.attribute.origin import Origin
from exabgp.bgp.message.update.attribute.aspath import ASPath
from exabgp.bgp.message.update.attribute.aspath import AS4Path
from exabgp.bgp.message.update.attribute.nexthop import NextHop
from exabgp.bgp.message.update.attribute.med import MED
from exabgp.bgp.message.update.attribute.localpref import LocalPreference
from exabgp.bgp.message.update.attribute.atomicaggregate import AtomicAggregate
from exabgp.bgp.message.update.attribute.aggregator import Aggregator
from exabgp.bgp.message.update.attribute.aggregator import Aggregator4
from exabgp.bgp.message.update.attribute.community.communities import Communities
from exabgp.bgp.message.update.attribute.community.extended import ExtendedCommunities
from exabgp.bgp.message.update.attribute.originatorid import OriginatorID
from exabgp.bgp.message.update.attribute.clusterlist import ClusterList
from exabgp.bgp.message.update.attribute.clusterlist import ClusterID
from exabgp.bgp.message.update.attribute.mprnlri import MPRNLRI
from exabgp.bgp.message.update.attribute.mprnlri import EMPTY_MPRNLRI
from exabgp.bgp.message.update.attribute.mpurnlri import MPURNLRI
from exabgp.bgp.message.update.attribute.mpurnlri import EMPTY_MPURNLRI
from exabgp.bgp.message.update.attribute.pmsi import PMSI
from exabgp.bgp.message.update.attribute.aigp import AIGP
|
dneiter/exabgp
|
lib/exabgp/bgp/message/update/attribute/__init__.py
|
Python
|
bsd-3-clause
| 1,850
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Takes care of manipulating the chrome's HTTP cache.
"""
from datetime import datetime
import json
import os
import re
import shutil
import subprocess
import sys
import tempfile
import zipfile
_SRC_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
sys.path.append(os.path.join(_SRC_DIR, 'build', 'android'))
from pylib import constants
import device_setup
import options
OPTIONS = options.OPTIONS
# Cache back-end types supported by cachetool.
BACKEND_TYPES = ['simple']
# Default build output directory.
OUT_DIRECTORY = os.getenv('CR_OUT_FULL', os.path.join(
os.path.dirname(__file__), '../../../out/Release'))
# Default cachetool binary location.
CACHETOOL_BIN_PATH = os.path.join(OUT_DIRECTORY, 'cachetool')
# Default content_decoder_tool binary location.
CONTENT_DECODER_TOOL_BIN_PATH = os.path.join(OUT_DIRECTORY,
'content_decoder_tool')
# Regex used to parse HTTP headers line by line.
HEADER_PARSING_REGEX = re.compile(r'^(?P<header>\S+):(?P<value>.*)$')
def _EnsureCleanCacheDirectory(directory_dest_path):
"""Ensure that a cache directory is created and clean.
Args:
directory_dest_path: Path of the cache directory to ensure cleanliness.
"""
if os.path.isdir(directory_dest_path):
shutil.rmtree(directory_dest_path)
elif not os.path.isdir(os.path.dirname(directory_dest_path)):
os.makedirs(os.path.dirname(directory_dest_path))
assert not os.path.exists(directory_dest_path)
def _RemoteCacheDirectory():
"""Returns the path of the cache directory's on the remote device."""
return '/data/data/{}/cache/Cache'.format(
constants.PACKAGE_INFO[OPTIONS.chrome_package_name].package)
def _AdbShell(adb, cmd):
adb.Shell(subprocess.list2cmdline(cmd))
def PullBrowserCache(device):
"""Pulls the browser cache from the device and saves it locally.
Cache is saved with the same file structure as on the device. Timestamps are
important to preserve because indexing and eviction depends on them.
Returns:
Temporary directory containing all the browser cache.
"""
_INDEX_DIRECTORY_NAME = 'index-dir'
_REAL_INDEX_FILE_NAME = 'the-real-index'
remote_cache_directory = _RemoteCacheDirectory()
save_target = tempfile.mkdtemp(suffix='.cache')
# Pull the cache recursively.
device.adb.Pull(remote_cache_directory, save_target)
# Update the modification time stamp on the local cache copy.
def _UpdateTimestampFromAdbStat(filename, stat):
assert os.path.exists(filename)
os.utime(filename, (stat.st_time, stat.st_time))
for filename, stat in device.adb.Ls(remote_cache_directory):
if filename == '..':
continue
if filename == '.':
cache_directory_stat = stat
continue
original_file = os.path.join(remote_cache_directory, filename)
saved_file = os.path.join(save_target, filename)
_UpdateTimestampFromAdbStat(saved_file, stat)
if filename == _INDEX_DIRECTORY_NAME:
# The directory containing the index was pulled recursively, update the
# timestamps for known files. They are ignored by cache backend, but may
# be useful for debugging.
index_dir_stat = stat
saved_index_dir = os.path.join(save_target, _INDEX_DIRECTORY_NAME)
saved_index_file = os.path.join(saved_index_dir, _REAL_INDEX_FILE_NAME)
for sub_file, sub_stat in device.adb.Ls(original_file):
if sub_file == _REAL_INDEX_FILE_NAME:
_UpdateTimestampFromAdbStat(saved_index_file, sub_stat)
break
_UpdateTimestampFromAdbStat(saved_index_dir, index_dir_stat)
# Store the cache directory modification time. It is important to update it
# after all files in it have been written. The timestamp is compared with
# the contents of the index file when freshness is determined.
_UpdateTimestampFromAdbStat(save_target, cache_directory_stat)
return save_target
def PushBrowserCache(device, local_cache_path):
"""Pushes the browser cache saved locally to the device.
Args:
device: Android device.
local_cache_path: The directory's path containing the cache locally.
"""
remote_cache_directory = _RemoteCacheDirectory()
# Clear previous cache.
_AdbShell(device.adb, ['rm', '-rf', remote_cache_directory])
_AdbShell(device.adb, ['mkdir', remote_cache_directory])
# Push cache content.
device.adb.Push(local_cache_path, remote_cache_directory)
# Command queue to touch all files with correct timestamp.
command_queue = []
# Walk through the local cache to update mtime on the device.
def MirrorMtime(local_path):
cache_relative_path = os.path.relpath(local_path, start=local_cache_path)
remote_path = os.path.join(remote_cache_directory, cache_relative_path)
timestamp = os.stat(local_path).st_mtime
touch_stamp = datetime.fromtimestamp(timestamp).strftime('%Y%m%d.%H%M%S')
command_queue.append(['touch', '-t', touch_stamp, remote_path])
for local_directory_path, dirnames, filenames in os.walk(
local_cache_path, topdown=False):
for filename in filenames:
MirrorMtime(os.path.join(local_directory_path, filename))
for dirname in dirnames:
MirrorMtime(os.path.join(local_directory_path, dirname))
MirrorMtime(local_cache_path)
device_setup.DeviceSubmitShellCommandQueue(device, command_queue)
def ZipDirectoryContent(root_directory_path, archive_dest_path):
"""Zip a directory's content recursively with all the directories'
timestamps preserved.
Args:
root_directory_path: The directory's path to archive.
archive_dest_path: Archive destination's path.
"""
with zipfile.ZipFile(archive_dest_path, 'w') as zip_output:
timestamps = {}
root_directory_stats = os.stat(root_directory_path)
timestamps['.'] = {
'atime': root_directory_stats.st_atime,
'mtime': root_directory_stats.st_mtime}
for directory_path, dirnames, filenames in os.walk(root_directory_path):
for dirname in dirnames:
subdirectory_path = os.path.join(directory_path, dirname)
subdirectory_relative_path = os.path.relpath(subdirectory_path,
root_directory_path)
subdirectory_stats = os.stat(subdirectory_path)
timestamps[subdirectory_relative_path] = {
'atime': subdirectory_stats.st_atime,
'mtime': subdirectory_stats.st_mtime}
for filename in filenames:
file_path = os.path.join(directory_path, filename)
file_archive_name = os.path.join('content',
os.path.relpath(file_path, root_directory_path))
file_stats = os.stat(file_path)
timestamps[file_archive_name[8:]] = {
'atime': file_stats.st_atime,
'mtime': file_stats.st_mtime}
zip_output.write(file_path, arcname=file_archive_name)
zip_output.writestr('timestamps.json',
json.dumps(timestamps, indent=2))
def UnzipDirectoryContent(archive_path, directory_dest_path):
"""Unzip a directory's content recursively with all the directories'
timestamps preserved.
Args:
archive_path: Archive's path to unzip.
directory_dest_path: Directory destination path.
"""
_EnsureCleanCacheDirectory(directory_dest_path)
with zipfile.ZipFile(archive_path) as zip_input:
timestamps = None
for file_archive_name in zip_input.namelist():
if file_archive_name == 'timestamps.json':
timestamps = json.loads(zip_input.read(file_archive_name))
elif file_archive_name.startswith('content/'):
file_relative_path = file_archive_name[8:]
file_output_path = os.path.join(directory_dest_path, file_relative_path)
file_parent_directory_path = os.path.dirname(file_output_path)
if not os.path.exists(file_parent_directory_path):
os.makedirs(file_parent_directory_path)
with open(file_output_path, 'w') as f:
f.write(zip_input.read(file_archive_name))
assert timestamps
for relative_path, stats in timestamps.iteritems():
output_path = os.path.join(directory_dest_path, relative_path)
if not os.path.exists(output_path):
os.makedirs(output_path)
os.utime(output_path, (stats['atime'], stats['mtime']))
def CopyCacheDirectory(directory_src_path, directory_dest_path):
"""Copies a cache directory recursively with all the directories'
timestamps preserved.
Args:
directory_src_path: Path of the cache directory source.
directory_dest_path: Path of the cache directory destination.
"""
assert os.path.isdir(directory_src_path)
_EnsureCleanCacheDirectory(directory_dest_path)
shutil.copytree(directory_src_path, directory_dest_path)
class CacheBackend(object):
"""Takes care of reading and deleting cached keys.
"""
def __init__(self, cache_directory_path, cache_backend_type,
cachetool_bin_path=CACHETOOL_BIN_PATH):
"""Chrome cache back-end constructor.
Args:
cache_directory_path: The directory path where the cache is locally
stored.
cache_backend_type: A cache back-end type in BACKEND_TYPES.
cachetool_bin_path: Path of the cachetool binary.
"""
assert os.path.isdir(cache_directory_path)
assert cache_backend_type in BACKEND_TYPES
assert os.path.isfile(cachetool_bin_path), 'invalid ' + cachetool_bin_path
self._cache_directory_path = cache_directory_path
self._cache_backend_type = cache_backend_type
self._cachetool_bin_path = cachetool_bin_path
# Make sure cache_directory_path is a valid cache.
self._CachetoolCmd('validate')
def ListKeys(self):
"""Lists cache's keys.
Returns:
A list of all keys stored in the cache.
"""
return [k.strip() for k in self._CachetoolCmd('list_keys').split('\n')[:-1]]
def GetStreamForKey(self, key, index):
"""Gets a key's stream.
Args:
key: The key to access the stream.
index: The stream index:
index=0 is the HTTP response header;
index=1 is the transport encoded content;
index=2 is the compiled content.
Returns:
String holding stream binary content.
"""
return self._CachetoolCmd('get_stream', key, str(index))
def DeleteKey(self, key):
"""Deletes a key from the cache.
Args:
key: The key delete.
"""
self._CachetoolCmd('delete_key', key)
def _CachetoolCmd(self, operation, *args):
"""Runs the cache editor tool and return the stdout.
Args:
operation: Cachetool operation.
*args: Additional operation argument to append to the command line.
Returns:
Cachetool's stdout string.
"""
editor_tool_cmd = [
self._cachetool_bin_path,
self._cache_directory_path,
self._cache_backend_type,
operation]
editor_tool_cmd.extend(args)
process = subprocess.Popen(editor_tool_cmd, stdout=subprocess.PIPE)
stdout_data, _ = process.communicate()
assert process.returncode == 0
return stdout_data
def GetDecodedContentForKey(self, key):
"""Gets a key's decoded content.
HTTP cache is storing into key's index stream 1 the transport layer resource
binary. However, the resources might be encoded using a compression
algorithm specified in the Content-Encoding response header. This method
takes care of returning decoded binary content of the resource.
Args:
key: The key to access the decoded content.
Returns:
String holding binary content.
"""
response_headers = self.GetStreamForKey(key, 0)
content_encoding = None
for response_header_line in response_headers.split('\n'):
match = HEADER_PARSING_REGEX.match(response_header_line)
if not match:
continue
if match.group('header').lower() == 'content-encoding':
content_encoding = match.group('value')
break
encoded_content = self.GetStreamForKey(key, 1)
if content_encoding == None:
return encoded_content
cmd = [CONTENT_DECODER_TOOL_BIN_PATH]
cmd.extend([s.strip() for s in content_encoding.split(',')])
process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
decoded_content, _ = process.communicate(input=encoded_content)
assert process.returncode == 0
return decoded_content
def ApplyUrlWhitelistToCacheArchive(cache_archive_path,
whitelisted_urls,
output_cache_archive_path):
"""Generate a new cache archive containing only whitelisted urls.
Args:
cache_archive_path: Path of the cache archive to apply the white listing.
whitelisted_urls: Set of url to keep in cache.
output_cache_archive_path: Destination path of cache archive containing only
white-listed urls.
"""
cache_temp_directory = tempfile.mkdtemp(suffix='.cache')
try:
UnzipDirectoryContent(cache_archive_path, cache_temp_directory)
backend = CacheBackend(cache_temp_directory, 'simple')
cached_urls = backend.ListKeys()
for cached_url in cached_urls:
if cached_url not in whitelisted_urls:
backend.DeleteKey(cached_url)
for cached_url in backend.ListKeys():
assert cached_url in whitelisted_urls
ZipDirectoryContent(cache_temp_directory, output_cache_archive_path)
finally:
shutil.rmtree(cache_temp_directory)
def ManualTestMain():
import argparse
parser = argparse.ArgumentParser(description='Tests cache back-end.')
parser.add_argument('cache_archive_path', type=str)
parser.add_argument('backend_type', type=str, choices=BACKEND_TYPES)
command_line_args = parser.parse_args()
cache_path = tempfile.mkdtemp()
UnzipDirectoryContent(command_line_args.cache_archive_path, cache_path)
cache_backend = CacheBackend(
cache_directory_path=cache_path,
cache_backend_type=command_line_args.backend_type)
keys = sorted(cache_backend.ListKeys())
selected_key = None
for key in keys:
if key.endswith('.js'):
selected_key = key
break
assert selected_key
print '{}\'s HTTP response header:'.format(selected_key)
print cache_backend.GetStreamForKey(selected_key, 0)
print cache_backend.GetDecodedContentForKey(selected_key)
cache_backend.DeleteKey(keys[1])
assert keys[1] not in cache_backend.ListKeys()
shutil.rmtree(cache_path)
if __name__ == '__main__':
ManualTestMain()
|
was4444/chromium.src
|
tools/android/loading/chrome_cache.py
|
Python
|
bsd-3-clause
| 14,619
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, DataCanvasIO
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
A mini sqoop2 REST api library.
"""
__version__ = "0.0.2"
__author__ = "xiaolin"
import requests
import json
import copy
import urllib
import re
import time
def current_milli_time():
return int(round(time.time() * 1000))
class MySqoop(object):
def __init__(self, host, port):
self._svc_root = "http://%s:%s/sqoop" % (host, port)
def version(self):
return requests.get(self._svc_root + "/version").json()
def framework(self):
return requests.get(self._svc_root + "/v1/framework").json()
def connection(self, xid=None):
if not xid:
return requests.get(self._svc_root + "/v1/connection/all").json()
else:
return requests.get(self._svc_root + "/v1/connection/%s" % str(xid)).json()
def connector(self, cid=None):
if not cid:
return requests.get(self._svc_root + "/v1/connector/all").json()
else:
return requests.get(self._svc_root + "/v1/connector/%s" % str(cid)).json()
def get_connection_by_name(self, name):
conn_dict = {c['name']:c for c in self.connection()['all']}
if name not in conn_dict:
raise Exception("Sqoop2 Connection '%s' not found" % name)
return conn_dict[name]
def get_connection_inputs(self, connection_name):
connection = self.get_connection_by_name(connection_name)
input_dict = {
c['name']:urllib.unquote(c["value"])
for c in connection['connector'][0]['inputs']
if "value" in c
}
return input_dict
def _create_connection(self, name, framework_params, connector_params):
r = self.framework()
framework_import_form = copy.deepcopy(r['con-forms'])
for f in framework_import_form:
for fi in f['inputs']:
if fi['name'] in framework_params:
fi['value'] = urllib.quote(framework_params[fi['name']], '')
connector = self.connector()['all'][0]
connector_forms = copy.deepcopy(connector['con-forms'])
# pp(connector_forms)
for c in connector_forms:
for ci in c['inputs']:
if ci['name'] in connector_params:
ci['value'] = urllib.quote(connector_params[ci['name']], '')
now_time = current_milli_time()
new_d = {
"id": -1,
"enabled": True,
"update-date": now_time,
"creation-date": now_time,
"name": name,
"connector": connector_forms,
"connector-id": connector['id'],
"framework": framework_import_form,
"update-user": None,
"creation-user" : "xiaolin"
}
all_d = { "all": [ new_d ] }
print("=====================")
pp(all_d)
print("=====================")
r = requests.post(self._svc_root + "/v1/connection", data=json.dumps(all_d), headers={'content-type': 'application/json'})
if r.status_code != 200:
print("--------------------")
pp(all_d)
pp(r.status_code)
pp(r.json())
raise Exception("Failed to create a connection")
else:
return r.json()
def create_connection(self, conn_name, conn_str, username, password):
jdbc_cfg = parse_jdbc(conn_str)
jdbc_driver_dict = {
"sqlserver" : "com.microsoft.sqlserver.jdbc.SQLServerDriver",
"postgresql" : "org.postgresql.Driver"
}
if not jdbc_cfg['name'] in jdbc_driver_dict:
raise ValueError("Do not support jdbc driver '%s'" % jdbc_cfg['name'])
fw_ps = { }
co_ps = {
"connection.jdbcDriver": jdbc_driver_dict[jdbc_cfg['name']],
"connection.connectionString": conn_str,
"connection.username": username,
"connection.password": password
}
return self._create_connection(conn_name, fw_ps, co_ps)
def delete_connection_by_id(self, cid):
print "Delete connection %d" % int(cid)
return requests.delete(self._svc_root + "/v1/connection/%d" % int(cid)).json()
def get_job(self, jid=None):
if not jid:
return requests.get(self._svc_root + "/v1/job/all").json()
else:
return requests.get(self._svc_root + "/v1/job/" + str(jid)).json()
def create_job(self, job_name, connection, framework_params, job_params, job_type):
if not job_type.upper() in ["IMPORT", "EXPORT"]:
raise ValueError("Invalid type job type")
job_type = job_type.upper()
r = self.framework()
framework_form = copy.deepcopy(r['job-forms'][job_type])
for f in framework_form:
for fi in f['inputs']:
if fi['name'] in framework_params:
fi['value'] = urllib.quote(framework_params[fi['name']], '')
connector = self.connector(connection['connector'][0]['id'])
connector_job_form = copy.deepcopy(connector['all'][0]['job-forms'][job_type])
for c in connector_job_form:
for ci in c['inputs']:
if ci['name'] in job_params:
ci['value'] = urllib.quote(job_params[ci['name']], '')
now_time = current_milli_time()
new_d = {
'connection-id': connection['id'],
'connector': connector_job_form,
'connector-id': 1,
"creation-date": now_time,
"creation-user": None,
"enabled": True,
"framework": framework_form,
"id": -1,
"name": job_name,
"type": job_type,
"update-date": now_time,
"update-user": None
}
all_d = { "all":[ new_d ] }
r = requests.post(self._svc_root + "/v1/job", data=json.dumps(all_d), headers={'content-type': 'application/json'})
if r.status_code != 200:
pp(all_d)
raise Exception("Failed to create a '%s' job" % job_type)
else:
return r.json()
def create_import_job(self, job_name, connection_id, framework_params, job_params):
connection = self.connection(connection_id)['all'][0]
return self.create_job(job_name, connection, framework_params, job_params, job_type="IMPORT")
def create_export_job(self, job_name, connection_id, framework_params, job_params):
connection = self.connection(connection_id)['all'][0]
return self.create_job(job_name, connection, framework_params, job_params, job_type="EXPORT")
def delete_job(self, jid):
r = requests.delete(self._svc_root + "/v1/job/" + str(jid))
if r.status_code != 200:
raise Exception("Failed to delete a job: '%s', status_code=%s" % (str(jid), r.status_code))
else:
return r.json()
def delete_all_jobs(self):
jobs = self.get_job()
for j in jobs['all']:
self.delete_job(j['id'])
def run_job(self, jid):
r = requests.post(self._svc_root + "/v1/submission/action/" + str(jid))
if r.status_code != 200:
raise Exception("Failed to run a job: '%s'" % str(jid))
else:
return r.json()
def wait_job(self, jid):
while True:
time.sleep(1)
r = requests.get(self._svc_root + "/v1/submission/history/" + str(jid))
if r.status_code != 200:
raise Exception("Failed to run a job: '%s'" % str(jid))
ret = r.json()
print("Job status '%s'" % ret['all'][0]['status'])
if ret['all'][0]['status'] in ['FAILURE_ON_SUBMIT']:
raise Exception("Failed to run a job: '%s'" % str(jid))
if ret['all'][0]['status'] in ['SUCCEEDED', 'UNKNOWN', 'FAILED']:
return r
def pp(j):
print(json.dumps(j, indent=4, sort_keys=True))
def parse_jdbc(name):
pattern = re.compile(r'''
jdbc:
(?P<name>[\w\+]+)://
(?:
(?P<username>[^:/]*)
(?::(?P<password>.*))?
@)?
(?:
(?:
\[(?P<ipv6host>[^/]+)\] |
(?P<ipv4host>[^/:;]+)
)?
(?::(?P<port>[^/]*))?
)?
(?:/(?P<database>[^;]*))?
(?P<meta_params>;.*)?
''', re.X)
meta_pattern = re.compile(r'''
(?P<key>[^=;]+)=(?P<val>[^=;]+)
''', re.VERBOSE)
m = pattern.match(name)
if m is not None:
ret = m.groupdict()
if m.group("meta_params"):
metas = meta_pattern.findall(m.group("meta_params"))
ret['meta'] = {k:v for k,v in metas}
return ret
else:
raise ValueError("Invalid jdbc string")
def pymssql_delete_table(cfg, tablename):
import pymssql
server = cfg['ipv4host']
def get_username(cfg):
username = cfg.get('username', None)
if username:
return username
if 'meta' not in cfg:
return None
return cfg['meta'].get('user', None)
def get_password(cfg):
password = cfg.get('password', None)
if password:
return password
if 'meta' not in cfg:
return None
return cfg['meta'].get('password', None)
user = get_username(cfg)
password = get_password(cfg)
dbname = cfg['meta']['databaseName']
conn = pymssql.connect(server, user, password, dbname, tablename)
cursor = conn.cursor()
# TODO:
cursor.execute("DELETE FROM %s" % tablename)
conn.commit()
if __name__ == "__main__":
main()
|
dkuner/example-modules
|
modules/modeling/CDH4/demo_hero/sqoop2_importer/pysqoop2.py
|
Python
|
bsd-3-clause
| 11,284
|
import os
SITE_ID = 1
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.messages',
'testapp',
'feincms',
'feincms.module.page',
'form_designer',
]
MEDIA_ROOT = '/media/'
STATIC_URL = '/static/'
BASEDIR = os.path.dirname(__file__)
MEDIA_ROOT = os.path.join(BASEDIR, 'media/')
STATIC_ROOT = os.path.join(BASEDIR, 'static/')
SECRET_KEY = 'supersikret'
LOGIN_REDIRECT_URL = '/?login=1'
ROOT_URLCONF = 'testapp.urls'
LANGUAGES = (('en', 'English'), ('de', 'German'))
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
DEFAULT_FROM_EMAIL = 'no-reply@example.com'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
)
|
michaelkuty/form_designer
|
tests/testapp/settings.py
|
Python
|
bsd-3-clause
| 1,646
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from tornado.websocket import websocket_connect
# Bokeh imports
from bokeh.server.server import Server
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'http_get',
'ManagedServerLoop',
'url',
'websocket_open',
'ws_url',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def url(server, prefix=""):
return "http://localhost:" + str(server.port) + prefix + "/"
def ws_url(server, prefix=""):
return "ws://localhost:" + str(server.port) + prefix + "/ws"
def http_get(io_loop, url):
result = {}
@gen.coroutine
def handle_request(response):
result['response'] = response
io_loop.stop()
# for some reason passing a loop to AsyncHTTPClient is deprecated
assert io_loop is IOLoop.current()
http_client = AsyncHTTPClient()
headers = dict()
resp = http_client.fetch(url, headers=headers)
io_loop.add_future(resp, handle_request)
io_loop.start()
if 'response' not in result:
raise RuntimeError("Failed to http get")
response = result['response'].result()
if response.error:
raise response.error
else:
return response
def websocket_open(io_loop, url, origin=None):
result = {}
@gen.coroutine
def handle_connection(future):
result['connection'] = future
io_loop.stop()
request = HTTPRequest(url)
if origin is not None:
request.headers['Origin'] = origin
resp = websocket_connect(request)
io_loop.add_future(resp, handle_connection)
io_loop.start()
if 'connection' not in result:
raise RuntimeError("Failed to handle websocket connect")
future = result['connection']
if future.exception():
raise future.exception()
else:
future.result().close()
return None
# lets us use a current IOLoop with "with"
# and ensures the server unlistens
class ManagedServerLoop(object):
def __init__(self, application, **server_kwargs):
loop = IOLoop()
loop.make_current()
server_kwargs['io_loop'] = loop
self._server = Server(application, **server_kwargs)
def __exit__(self, type, value, traceback):
self._server.unlisten()
self._server.stop()
self._server.io_loop.close()
def __enter__(self):
self._server.start()
return self._server
@property
def io_loop(self):
return self.s_server.io_loop
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
stonebig/bokeh
|
bokeh/server/tests/utils.py
|
Python
|
bsd-3-clause
| 4,151
|
from numpy import asarray, cumprod, convolve, exp, ones
from numpy.random import lognormal, gamma, uniform
from bokeh.layouts import row, column, gridplot
from bokeh.models import ColumnDataSource, Slider, Select
from bokeh.plotting import curdoc, figure
from bokeh.driving import count
BUFSIZE = 200
MA12, MA26, EMA12, EMA26 = '12-tick Moving Avg', '26-tick Moving Avg', '12-tick EMA', '26-tick EMA'
source = ColumnDataSource(dict(
time=[], average=[], low=[], high=[], open=[], close=[],
ma=[], macd=[], macd9=[], macdh=[], color=[]
))
p = figure(plot_height=500, tools="xpan,xwheel_zoom,xbox_zoom,reset", x_axis_type=None, y_axis_location="right")
p.x_range.follow = "end"
p.x_range.follow_interval = 100
p.x_range.range_padding = 0
p.line(x='time', y='average', alpha=0.2, line_width=3, color='navy', source=source)
p.line(x='time', y='ma', alpha=0.8, line_width=2, color='orange', source=source)
p.segment(x0='time', y0='low', x1='time', y1='high', line_width=2, color='black', source=source)
p.segment(x0='time', y0='open', x1='time', y1='close', line_width=8, color='color', source=source)
p2 = figure(plot_height=250, x_range=p.x_range, tools="xpan,xwheel_zoom,xbox_zoom,reset", y_axis_location="right")
p2.line(x='time', y='macd', color='red', source=source)
p2.line(x='time', y='macd9', color='blue', source=source)
p2.segment(x0='time', y0=0, x1='time', y1='macdh', line_width=6, color='black', alpha=0.5, source=source)
mean = Slider(title="mean", value=0, start=-0.01, end=0.01, step=0.001)
stddev = Slider(title="stddev", value=0.04, start=0.01, end=0.1, step=0.01)
mavg = Select(value=MA12, options=[MA12, MA26, EMA12, EMA26])
curdoc().add_root(column(row(mean, stddev, mavg), gridplot([[p], [p2]], toolbar_location="left", plot_width=1000)))
def _create_prices(t):
last_average = 100 if t==0 else source.data['average'][-1]
returns = asarray(lognormal(mean.value, stddev.value, 1))
average = last_average * cumprod(returns)
high = average * exp(abs(gamma(1, 0.03, size=1)))
low = average / exp(abs(gamma(1, 0.03, size=1)))
delta = high - low
open = low + delta * uniform(0.05, 0.95, size=1)
close = low + delta * uniform(0.05, 0.95, size=1)
return open[0], high[0], low[0], close[0], average[0]
def _moving_avg(prices, days=10):
if len(prices) < days: return [100]
return convolve(prices[-days:], ones(days, dtype=float), mode="valid") / days
def _ema(prices, days=10):
if len(prices) < days or days < 2: return [prices[-1]]
a = 2.0 / (days+1)
kernel = ones(days, dtype=float)
kernel[1:] = 1 - a
kernel = a * cumprod(kernel)
# The 0.8647 normalizes out that we stop the EMA after a finite number of terms
return convolve(prices[-days:], kernel, mode="valid") / (0.8647)
@count()
def update(t):
open, high, low, close, average = _create_prices(t)
color = "green" if open < close else "red"
new_data = dict(
time=[t],
open=[open],
high=[high],
low=[low],
close=[close],
average=[average],
color=[color],
)
close = source.data['close'] + [close]
ma12 = _moving_avg(close[-12:], 12)[0]
ma26 = _moving_avg(close[-26:], 26)[0]
ema12 = _ema(close[-12:], 12)[0]
ema26 = _ema(close[-26:], 26)[0]
if mavg.value == MA12: new_data['ma'] = [ma12]
elif mavg.value == MA26: new_data['ma'] = [ma26]
elif mavg.value == EMA12: new_data['ma'] = [ema12]
elif mavg.value == EMA26: new_data['ma'] = [ema26]
macd = ema12 - ema26
new_data['macd'] = [macd]
macd_series = source.data['macd'] + [macd]
macd9 = _ema(macd_series[-26:], 9)[0]
new_data['macd9'] = [macd9]
new_data['macdh'] = [macd - macd9]
source.stream(new_data, 300)
curdoc().add_periodic_callback(update, 50)
curdoc().title = "OHLC"
|
phobson/bokeh
|
examples/app/ohlc/main.py
|
Python
|
bsd-3-clause
| 3,828
|
from os.path import join as join_path
import os
abspath = os.path.dirname(os.path.abspath(__file__))
lock_file = '/home/ai/lock'
data_dir = join_path(abspath, '..', '99_data')
train_dir = join_path(data_dir, 'train')
validation_dir = join_path(data_dir, 'validation')
result_dir = join_path(abspath, '..', '90_result')
img_height = 150
img_width = 150
channels = 3
syserr = 'S001'
inputerr = 'B001'
locked = 'B002'
|
mrm-xiefan/lunania-ai
|
vggtest/config.py
|
Python
|
mit
| 420
|
"""This is the actual code we use to score people's solutions
server-side. The interfaces here are not yet stable, but we include
them so that people can reproduce our scoring calculations
independently.
We correspondly do not currently import this module.
"""
import numpy as np
import requests
import gym
def score_from_remote(url):
result = requests.get(url)
parsed = result.json()
episode_lengths = parsed['episode_lengths']
episode_rewards = parsed['episode_rewards']
timestamps = parsed['timestamps']
# Handle legacy entries where initial_reset_timestamp wasn't set
initial_reset_timestamp = parsed.get('initial_reset_timestamp', timestamps[0])
env_id = parsed['env_id']
spec = gym.spec(env_id)
return score_from_merged(episode_lengths, episode_rewards, timestamps, initial_reset_timestamp, spec.trials, spec.reward_threshold)
def score_from_local(directory):
"""Calculate score from a local results directory"""
results = gym.monitoring.monitor.load_results(directory)
# No scores yet saved
if results is None:
return None
episode_lengths = results['episode_lengths']
episode_rewards = results['episode_rewards']
timestamps = results['timestamps']
initial_reset_timestamp = results['initial_reset_timestamp']
spec = gym.spec(results['env_info']['env_id'])
return score_from_merged(episode_lengths, episode_rewards, timestamps, initial_reset_timestamp, spec.trials, spec.reward_threshold)
def score_from_merged(episode_lengths, episode_rewards, timestamps, initial_reset_timestamp, trials, reward_threshold):
"""Method to calculate the score from merged monitor files.
"""
# Make sure everything is a float -- no pesky ints.
episode_rewards = np.array(episode_rewards, dtype='float64')
episode_t_value = timestep_t_value = mean = error = None
seconds_to_solve = seconds_in_total = None
if len(timestamps) > 0:
# This is: time from the first reset to the end of the last episode
seconds_in_total = timestamps[-1] - initial_reset_timestamp
if len(episode_rewards) >= trials:
means = running_mean(episode_rewards, trials)
if reward_threshold is not None:
# Compute t-value by finding the first index at or above
# the threshold. It comes out as a singleton tuple.
(indexes_above_threshold, ) = np.where(means >= reward_threshold)
if len(indexes_above_threshold) > 0:
# Grab the first episode index that is above the threshold value
episode_t_value = indexes_above_threshold[0]
# Find timestep corresponding to this episode
cumulative_timesteps = np.cumsum(np.insert(episode_lengths, 0, 0))
# Convert that into timesteps
timestep_t_value = cumulative_timesteps[episode_t_value]
# This is: time from the first reset to the end of the first solving episode
seconds_to_solve = timestamps[episode_t_value] - initial_reset_timestamp
# Find the window with the best mean
best_idx = np.argmax(means)
best_rewards = episode_rewards[best_idx:best_idx+trials]
mean = np.mean(best_rewards)
if trials == 1: # avoid NaN
error = 0.
else:
error = np.std(best_rewards) / (np.sqrt(trials) - 1)
return {
'episode_t_value': episode_t_value,
'timestep_t_value': timestep_t_value,
'mean': mean,
'error': error,
'number_episodes': len(episode_rewards),
'number_timesteps': sum(episode_lengths),
'seconds_to_solve': seconds_to_solve,
'seconds_in_total': seconds_in_total,
}
def running_mean(x, N):
x = np.array(x, dtype='float64')
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / N
def compute_graph_stats(episode_lengths, episode_rewards, timestamps, initial_reset_timestamp, buckets):
"""Method to compute the aggregates for the graphs."""
# Not a dependency of OpenAI Gym generally.
import scipy.stats
num_episodes = len(episode_lengths)
# Catch for if no files written which causes error with scipy.stats.binned_statistic
if num_episodes == 0:
return None
episode_rewards = np.array(episode_rewards)
episode_lengths = np.array(episode_lengths)
# The index of the start of each episode
x_timestep = np.cumsum(np.insert(episode_lengths, 0, 0))[:-1]
assert len(x_timestep) == num_episodes
# Delta since the beginning of time
x_seconds = [timestamp - initial_reset_timestamp for timestamp in timestamps]
# The index of each episode
x_episode = range(num_episodes)
# Calculate the appropriate x/y statistics
x_timestep_y_reward = scipy.stats.binned_statistic(x_timestep, episode_rewards, 'median', buckets)
x_timestep_y_length = scipy.stats.binned_statistic(x_timestep, episode_lengths, 'median', buckets)
x_episode_y_reward = scipy.stats.binned_statistic(x_episode, episode_rewards, 'median', buckets)
x_episode_y_length = scipy.stats.binned_statistic(x_episode, episode_lengths, 'median', buckets)
x_seconds_y_reward = scipy.stats.binned_statistic(x_seconds, episode_rewards, 'median', buckets)
x_seconds_y_length = scipy.stats.binned_statistic(x_seconds, episode_lengths, 'median', buckets)
return {
'initial_reset_timestamp': initial_reset_timestamp,
'x_timestep_y_reward': graphable_binned_statistic(x_timestep_y_reward),
'x_timestep_y_length': graphable_binned_statistic(x_timestep_y_length),
'x_episode_y_reward': graphable_binned_statistic(x_episode_y_reward),
'x_episode_y_length': graphable_binned_statistic(x_episode_y_length),
'x_seconds_y_length': graphable_binned_statistic(x_seconds_y_length),
'x_seconds_y_reward': graphable_binned_statistic(x_seconds_y_reward),
}
def graphable_binned_statistic(binned):
x = running_mean(binned.bin_edges, 2)
y = binned.statistic
assert len(x) == len(y)
# Get rid of nasty NaNs
valid = np.logical_not(np.isnan(x)) & np.logical_not(np.isnan(y))
x = x[valid]
y = y[valid]
return {
'x': x,
'y': y,
}
|
machinaut/gym
|
gym/scoreboard/scoring.py
|
Python
|
mit
| 6,275
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Fabian Barkhau <fabian.barkhau@gmail.com>
# License: MIT (see LICENSE.TXT file)
import os
import random
from django.http import HttpResponseRedirect
from django.views.decorators.http import require_http_methods
from apps.common.shortcuts import render_response
from apps.site.forms import TeamSelectForm
from config.settings import PROJECT_DIR
@require_http_methods(['GET', 'POST'])
def index(request):
if request.method == 'POST':
form = TeamSelectForm(request.POST)
if form.is_valid():
team_link = form.cleaned_data['team'].link
return HttpResponseRedirect("/%s" % team_link)
else:
form = TeamSelectForm()
imgdir = os.path.join("apps", "site", "static", "site", "splash")
img = random.choice(os.listdir(os.path.join(PROJECT_DIR, imgdir)))
splash_bg = os.path.join("/static", "site", "splash", img)
args = { 'form' : form, "splash_bg" : splash_bg }
return render_response(request, 'site/index.html', args)
@require_http_methods(['GET'])
def terms(request):
return render_response(request, 'site/terms.html', {})
@require_http_methods(['GET'])
def donate(request):
return render_response(request, 'site/donate.html', {})
@require_http_methods(['GET'])
def google_site_verification(request):
return render_response(request, 'google437fc9f7c48a6697.html', {})
|
F483/bikesurf.org
|
apps/site/views.py
|
Python
|
mit
| 1,402
|
from collections import deque
from contextlib import contextmanager
import json
from jsonschema import FormatChecker, ValidationError
from jsonschema.tests.compat import mock, unittest
from jsonschema.validators import (
RefResolutionError, UnknownType, Draft3Validator,
Draft4Validator, RefResolver, create, extend, validator_for, validate,
)
class TestCreateAndExtend(unittest.TestCase):
def setUp(self):
self.meta_schema = {u"properties" : {u"smelly" : {}}}
self.smelly = mock.MagicMock()
self.validators = {u"smelly" : self.smelly}
self.types = {u"dict" : dict}
self.Validator = create(
meta_schema=self.meta_schema,
validators=self.validators,
default_types=self.types,
)
self.validator_value = 12
self.schema = {u"smelly" : self.validator_value}
self.validator = self.Validator(self.schema)
def test_attrs(self):
self.assertEqual(self.Validator.VALIDATORS, self.validators)
self.assertEqual(self.Validator.META_SCHEMA, self.meta_schema)
self.assertEqual(self.Validator.DEFAULT_TYPES, self.types)
def test_init(self):
self.assertEqual(self.validator.schema, self.schema)
def test_iter_errors(self):
instance = "hello"
self.smelly.return_value = []
self.assertEqual(list(self.validator.iter_errors(instance)), [])
error = mock.Mock()
self.smelly.return_value = [error]
self.assertEqual(list(self.validator.iter_errors(instance)), [error])
self.smelly.assert_called_with(
self.validator, self.validator_value, instance, self.schema,
)
def test_if_a_version_is_provided_it_is_registered(self):
with mock.patch("jsonschema.validators.validates") as validates:
validates.side_effect = lambda version : lambda cls : cls
Validator = create(meta_schema={u"id" : ""}, version="my version")
validates.assert_called_once_with("my version")
self.assertEqual(Validator.__name__, "MyVersionValidator")
def test_if_a_version_is_not_provided_it_is_not_registered(self):
with mock.patch("jsonschema.validators.validates") as validates:
create(meta_schema={u"id" : "id"})
self.assertFalse(validates.called)
def test_extend(self):
validators = dict(self.Validator.VALIDATORS)
new = mock.Mock()
Extended = extend(self.Validator, validators={u"a new one" : new})
validators.update([(u"a new one", new)])
self.assertEqual(Extended.VALIDATORS, validators)
self.assertNotIn(u"a new one", self.Validator.VALIDATORS)
self.assertEqual(Extended.META_SCHEMA, self.Validator.META_SCHEMA)
self.assertEqual(Extended.DEFAULT_TYPES, self.Validator.DEFAULT_TYPES)
class TestIterErrors(unittest.TestCase):
def setUp(self):
self.validator = Draft3Validator({})
def test_iter_errors(self):
instance = [1, 2]
schema = {
u"disallow" : u"array",
u"enum" : [["a", "b", "c"], ["d", "e", "f"]],
u"minItems" : 3
}
got = (e.message for e in self.validator.iter_errors(instance, schema))
expected = [
"%r is disallowed for [1, 2]" % (schema["disallow"],),
"[1, 2] is too short",
"[1, 2] is not one of %r" % (schema["enum"],),
]
self.assertEqual(sorted(got), sorted(expected))
def test_iter_errors_multiple_failures_one_validator(self):
instance = {"foo" : 2, "bar" : [1], "baz" : 15, "quux" : "spam"}
schema = {
u"properties" : {
"foo" : {u"type" : "string"},
"bar" : {u"minItems" : 2},
"baz" : {u"maximum" : 10, u"enum" : [2, 4, 6, 8]},
}
}
errors = list(self.validator.iter_errors(instance, schema))
self.assertEqual(len(errors), 4)
class TestValidationErrorMessages(unittest.TestCase):
def message_for(self, instance, schema, *args, **kwargs):
kwargs.setdefault("cls", Draft3Validator)
with self.assertRaises(ValidationError) as e:
validate(instance, schema, *args, **kwargs)
return e.exception.message
def test_single_type_failure(self):
message = self.message_for(instance=1, schema={u"type" : u"string"})
self.assertEqual(message, "1 is not of type %r" % u"string")
def test_single_type_list_failure(self):
message = self.message_for(instance=1, schema={u"type" : [u"string"]})
self.assertEqual(message, "1 is not of type %r" % u"string")
def test_multiple_type_failure(self):
types = u"string", u"object"
message = self.message_for(instance=1, schema={u"type" : list(types)})
self.assertEqual(message, "1 is not of type %r, %r" % types)
def test_object_without_title_type_failure(self):
type = {u"type" : [{u"minimum" : 3}]}
message = self.message_for(instance=1, schema={u"type" : [type]})
self.assertEqual(message, "1 is not of type %r" % (type,))
def test_object_with_name_type_failure(self):
name = "Foo"
schema = {u"type" : [{u"name" : name, u"minimum" : 3}]}
message = self.message_for(instance=1, schema=schema)
self.assertEqual(message, "1 is not of type %r" % (name,))
def test_minimum(self):
message = self.message_for(instance=1, schema={"minimum" : 2})
self.assertEqual(message, "1 is less than the minimum of 2")
def test_maximum(self):
message = self.message_for(instance=1, schema={"maximum" : 0})
self.assertEqual(message, "1 is greater than the maximum of 0")
def test_dependencies_failure_has_single_element_not_list(self):
depend, on = "bar", "foo"
schema = {u"dependencies" : {depend : on}}
message = self.message_for({"bar" : 2}, schema)
self.assertEqual(message, "%r is a dependency of %r" % (on, depend))
def test_additionalItems_single_failure(self):
message = self.message_for(
[2], {u"items" : [], u"additionalItems" : False},
)
self.assertIn("(2 was unexpected)", message)
def test_additionalItems_multiple_failures(self):
message = self.message_for(
[1, 2, 3], {u"items" : [], u"additionalItems" : False}
)
self.assertIn("(1, 2, 3 were unexpected)", message)
def test_additionalProperties_single_failure(self):
additional = "foo"
schema = {u"additionalProperties" : False}
message = self.message_for({additional : 2}, schema)
self.assertIn("(%r was unexpected)" % (additional,), message)
def test_additionalProperties_multiple_failures(self):
schema = {u"additionalProperties" : False}
message = self.message_for(dict.fromkeys(["foo", "bar"]), schema)
self.assertIn(repr("foo"), message)
self.assertIn(repr("bar"), message)
self.assertIn("were unexpected)", message)
def test_invalid_format_default_message(self):
checker = FormatChecker(formats=())
check_fn = mock.Mock(return_value=False)
checker.checks(u"thing")(check_fn)
schema = {u"format" : u"thing"}
message = self.message_for("bla", schema, format_checker=checker)
self.assertIn(repr("bla"), message)
self.assertIn(repr("thing"), message)
self.assertIn("is not a", message)
class TestValidationErrorDetails(unittest.TestCase):
# TODO: These really need unit tests for each individual validator, rather
# than just these higher level tests.
def test_anyOf(self):
instance = 5
schema = {
"anyOf": [
{"minimum": 20},
{"type": "string"}
]
}
validator = Draft4Validator(schema)
errors = list(validator.iter_errors(instance))
self.assertEqual(len(errors), 1)
e = errors[0]
self.assertEqual(e.validator, "anyOf")
self.assertEqual(e.validator_value, schema["anyOf"])
self.assertEqual(e.instance, instance)
self.assertEqual(e.schema, schema)
self.assertIsNone(e.parent)
self.assertEqual(e.path, deque([]))
self.assertEqual(e.relative_path, deque([]))
self.assertEqual(e.absolute_path, deque([]))
self.assertEqual(e.schema_path, deque(["anyOf"]))
self.assertEqual(e.relative_schema_path, deque(["anyOf"]))
self.assertEqual(e.absolute_schema_path, deque(["anyOf"]))
self.assertEqual(len(e.context), 2)
e1, e2 = sorted_errors(e.context)
self.assertEqual(e1.validator, "minimum")
self.assertEqual(e1.validator_value, schema["anyOf"][0]["minimum"])
self.assertEqual(e1.instance, instance)
self.assertEqual(e1.schema, schema["anyOf"][0])
self.assertIs(e1.parent, e)
self.assertEqual(e1.path, deque([]))
self.assertEqual(e1.absolute_path, deque([]))
self.assertEqual(e1.relative_path, deque([]))
self.assertEqual(e1.schema_path, deque([0, "minimum"]))
self.assertEqual(e1.relative_schema_path, deque([0, "minimum"]))
self.assertEqual(
e1.absolute_schema_path, deque(["anyOf", 0, "minimum"]),
)
self.assertFalse(e1.context)
self.assertEqual(e2.validator, "type")
self.assertEqual(e2.validator_value, schema["anyOf"][1]["type"])
self.assertEqual(e2.instance, instance)
self.assertEqual(e2.schema, schema["anyOf"][1])
self.assertIs(e2.parent, e)
self.assertEqual(e2.path, deque([]))
self.assertEqual(e2.relative_path, deque([]))
self.assertEqual(e2.absolute_path, deque([]))
self.assertEqual(e2.schema_path, deque([1, "type"]))
self.assertEqual(e2.relative_schema_path, deque([1, "type"]))
self.assertEqual(e2.absolute_schema_path, deque(["anyOf", 1, "type"]))
self.assertEqual(len(e2.context), 0)
def test_type(self):
instance = {"foo": 1}
schema = {
"type": [
{"type": "integer"},
{
"type": "object",
"properties": {
"foo": {"enum": [2]}
}
}
]
}
validator = Draft3Validator(schema)
errors = list(validator.iter_errors(instance))
self.assertEqual(len(errors), 1)
e = errors[0]
self.assertEqual(e.validator, "type")
self.assertEqual(e.validator_value, schema["type"])
self.assertEqual(e.instance, instance)
self.assertEqual(e.schema, schema)
self.assertIsNone(e.parent)
self.assertEqual(e.path, deque([]))
self.assertEqual(e.relative_path, deque([]))
self.assertEqual(e.absolute_path, deque([]))
self.assertEqual(e.schema_path, deque(["type"]))
self.assertEqual(e.relative_schema_path, deque(["type"]))
self.assertEqual(e.absolute_schema_path, deque(["type"]))
self.assertEqual(len(e.context), 2)
e1, e2 = sorted_errors(e.context)
self.assertEqual(e1.validator, "type")
self.assertEqual(e1.validator_value, schema["type"][0]["type"])
self.assertEqual(e1.instance, instance)
self.assertEqual(e1.schema, schema["type"][0])
self.assertIs(e1.parent, e)
self.assertEqual(e1.path, deque([]))
self.assertEqual(e1.relative_path, deque([]))
self.assertEqual(e1.absolute_path, deque([]))
self.assertEqual(e1.schema_path, deque([0, "type"]))
self.assertEqual(e1.relative_schema_path, deque([0, "type"]))
self.assertEqual(e1.absolute_schema_path, deque(["type", 0, "type"]))
self.assertFalse(e1.context)
self.assertEqual(e2.validator, "enum")
self.assertEqual(e2.validator_value, [2])
self.assertEqual(e2.instance, 1)
self.assertEqual(e2.schema, {u"enum" : [2]})
self.assertIs(e2.parent, e)
self.assertEqual(e2.path, deque(["foo"]))
self.assertEqual(e2.relative_path, deque(["foo"]))
self.assertEqual(e2.absolute_path, deque(["foo"]))
self.assertEqual(
e2.schema_path, deque([1, "properties", "foo", "enum"]),
)
self.assertEqual(
e2.relative_schema_path, deque([1, "properties", "foo", "enum"]),
)
self.assertEqual(
e2.absolute_schema_path,
deque(["type", 1, "properties", "foo", "enum"]),
)
self.assertFalse(e2.context)
def test_single_nesting(self):
instance = {"foo" : 2, "bar" : [1], "baz" : 15, "quux" : "spam"}
schema = {
"properties" : {
"foo" : {"type" : "string"},
"bar" : {"minItems" : 2},
"baz" : {"maximum" : 10, "enum" : [2, 4, 6, 8]},
}
}
validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2, e3, e4 = sorted_errors(errors)
self.assertEqual(e1.path, deque(["bar"]))
self.assertEqual(e2.path, deque(["baz"]))
self.assertEqual(e3.path, deque(["baz"]))
self.assertEqual(e4.path, deque(["foo"]))
self.assertEqual(e1.relative_path, deque(["bar"]))
self.assertEqual(e2.relative_path, deque(["baz"]))
self.assertEqual(e3.relative_path, deque(["baz"]))
self.assertEqual(e4.relative_path, deque(["foo"]))
self.assertEqual(e1.absolute_path, deque(["bar"]))
self.assertEqual(e2.absolute_path, deque(["baz"]))
self.assertEqual(e3.absolute_path, deque(["baz"]))
self.assertEqual(e4.absolute_path, deque(["foo"]))
self.assertEqual(e1.validator, "minItems")
self.assertEqual(e2.validator, "enum")
self.assertEqual(e3.validator, "maximum")
self.assertEqual(e4.validator, "type")
def test_multiple_nesting(self):
instance = [1, {"foo" : 2, "bar" : {"baz" : [1]}}, "quux"]
schema = {
"type" : "string",
"items" : {
"type" : ["string", "object"],
"properties" : {
"foo" : {"enum" : [1, 3]},
"bar" : {
"type" : "array",
"properties" : {
"bar" : {"required" : True},
"baz" : {"minItems" : 2},
}
}
}
}
}
validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2, e3, e4, e5, e6 = sorted_errors(errors)
self.assertEqual(e1.path, deque([]))
self.assertEqual(e2.path, deque([0]))
self.assertEqual(e3.path, deque([1, "bar"]))
self.assertEqual(e4.path, deque([1, "bar", "bar"]))
self.assertEqual(e5.path, deque([1, "bar", "baz"]))
self.assertEqual(e6.path, deque([1, "foo"]))
self.assertEqual(e1.schema_path, deque(["type"]))
self.assertEqual(e2.schema_path, deque(["items", "type"]))
self.assertEqual(
list(e3.schema_path), ["items", "properties", "bar", "type"],
)
self.assertEqual(
list(e4.schema_path),
["items", "properties", "bar", "properties", "bar", "required"],
)
self.assertEqual(
list(e5.schema_path),
["items", "properties", "bar", "properties", "baz", "minItems"]
)
self.assertEqual(
list(e6.schema_path), ["items", "properties", "foo", "enum"],
)
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "type")
self.assertEqual(e3.validator, "type")
self.assertEqual(e4.validator, "required")
self.assertEqual(e5.validator, "minItems")
self.assertEqual(e6.validator, "enum")
def test_recursive(self):
schema = {
"definitions": {
"node": {
"anyOf": [{
"type": "object",
"required": ["name", "children"],
"properties": {
"name": {
"type": "string",
},
"children": {
"type": "object",
"patternProperties": {
"^.*$": {
"$ref": "#/definitions/node",
},
},
},
},
}],
},
},
"type": "object",
"required": ["root"],
"properties": {
"root": {"$ref": "#/definitions/node"},
}
}
instance = {
"root": {
"name": "root",
"children": {
"a": {
"name": "a",
"children": {
"ab": {
"name": "ab",
# missing "children"
}
}
},
},
},
}
validator = Draft4Validator(schema)
e, = validator.iter_errors(instance)
self.assertEqual(e.absolute_path, deque(["root"]))
self.assertEqual(
e.absolute_schema_path, deque(["properties", "root", "anyOf"]),
)
e1, = e.context
self.assertEqual(e1.absolute_path, deque(["root", "children", "a"]))
self.assertEqual(
e1.absolute_schema_path, deque(
[
"properties",
"root",
"anyOf",
0,
"properties",
"children",
"patternProperties",
"^.*$",
"anyOf",
],
),
)
e2, = e1.context
self.assertEqual(
e2.absolute_path, deque(
["root", "children", "a", "children", "ab"],
),
)
self.assertEqual(
e2.absolute_schema_path, deque(
[
"properties",
"root",
"anyOf",
0,
"properties",
"children",
"patternProperties",
"^.*$",
"anyOf",
0,
"properties",
"children",
"patternProperties",
"^.*$",
"anyOf"
],
),
)
def test_additionalProperties(self):
instance = {"bar": "bar", "foo": 2}
schema = {
"additionalProperties" : {"type": "integer", "minimum": 5}
}
validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
self.assertEqual(e1.path, deque(["bar"]))
self.assertEqual(e2.path, deque(["foo"]))
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "minimum")
def test_patternProperties(self):
instance = {"bar": 1, "foo": 2}
schema = {
"patternProperties" : {
"bar": {"type": "string"},
"foo": {"minimum": 5}
}
}
validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
self.assertEqual(e1.path, deque(["bar"]))
self.assertEqual(e2.path, deque(["foo"]))
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "minimum")
def test_additionalItems(self):
instance = ["foo", 1]
schema = {
"items": [],
"additionalItems" : {"type": "integer", "minimum": 5}
}
validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
self.assertEqual(e1.path, deque([0]))
self.assertEqual(e2.path, deque([1]))
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "minimum")
def test_additionalItems_with_items(self):
instance = ["foo", "bar", 1]
schema = {
"items": [{}],
"additionalItems" : {"type": "integer", "minimum": 5}
}
validator = Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
self.assertEqual(e1.path, deque([1]))
self.assertEqual(e2.path, deque([2]))
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "minimum")
class ValidatorTestMixin(object):
def setUp(self):
self.instance = mock.Mock()
self.schema = {}
self.resolver = mock.Mock()
self.validator = self.validator_class(self.schema)
def test_valid_instances_are_valid(self):
errors = iter([])
with mock.patch.object(
self.validator, "iter_errors", return_value=errors,
):
self.assertTrue(
self.validator.is_valid(self.instance, self.schema)
)
def test_invalid_instances_are_not_valid(self):
errors = iter([mock.Mock()])
with mock.patch.object(
self.validator, "iter_errors", return_value=errors,
):
self.assertFalse(
self.validator.is_valid(self.instance, self.schema)
)
def test_non_existent_properties_are_ignored(self):
instance, my_property, my_value = mock.Mock(), mock.Mock(), mock.Mock()
validate(instance=instance, schema={my_property : my_value})
def test_it_creates_a_ref_resolver_if_not_provided(self):
self.assertIsInstance(self.validator.resolver, RefResolver)
def test_it_delegates_to_a_ref_resolver(self):
resolver = RefResolver("", {})
schema = {"$ref" : mock.Mock()}
@contextmanager
def resolving():
yield {"type": "integer"}
with mock.patch.object(resolver, "resolving") as resolve:
resolve.return_value = resolving()
with self.assertRaises(ValidationError):
self.validator_class(schema, resolver=resolver).validate(None)
resolve.assert_called_once_with(schema["$ref"])
def test_is_type_is_true_for_valid_type(self):
self.assertTrue(self.validator.is_type("foo", "string"))
def test_is_type_is_false_for_invalid_type(self):
self.assertFalse(self.validator.is_type("foo", "array"))
def test_is_type_evades_bool_inheriting_from_int(self):
self.assertFalse(self.validator.is_type(True, "integer"))
self.assertFalse(self.validator.is_type(True, "number"))
def test_is_type_raises_exception_for_unknown_type(self):
with self.assertRaises(UnknownType):
self.validator.is_type("foo", object())
class TestDraft3Validator(ValidatorTestMixin, unittest.TestCase):
validator_class = Draft3Validator
def test_is_type_is_true_for_any_type(self):
self.assertTrue(self.validator.is_valid(mock.Mock(), {"type": "any"}))
def test_is_type_does_not_evade_bool_if_it_is_being_tested(self):
self.assertTrue(self.validator.is_type(True, "boolean"))
self.assertTrue(self.validator.is_valid(True, {"type": "any"}))
def test_non_string_custom_types(self):
schema = {'type': [None]}
cls = self.validator_class(schema, types={None: type(None)})
cls.validate(None, schema)
class TestDraft4Validator(ValidatorTestMixin, unittest.TestCase):
validator_class = Draft4Validator
class TestBuiltinFormats(unittest.TestCase):
"""
The built-in (specification-defined) formats do not raise type errors.
If an instance or value is not a string, it should be ignored.
"""
for format in FormatChecker.checkers:
def test(self, format=format):
v = Draft4Validator({"format": format}, format_checker=FormatChecker())
v.validate(123)
name = "test_{0}_ignores_non_strings".format(format)
test.__name__ = name
setattr(TestBuiltinFormats, name, test)
del test # Ugh py.test. Stop discovering top level tests.
class TestValidatorFor(unittest.TestCase):
def test_draft_3(self):
schema = {"$schema" : "http://json-schema.org/draft-03/schema"}
self.assertIs(validator_for(schema), Draft3Validator)
schema = {"$schema" : "http://json-schema.org/draft-03/schema#"}
self.assertIs(validator_for(schema), Draft3Validator)
def test_draft_4(self):
schema = {"$schema" : "http://json-schema.org/draft-04/schema"}
self.assertIs(validator_for(schema), Draft4Validator)
schema = {"$schema" : "http://json-schema.org/draft-04/schema#"}
self.assertIs(validator_for(schema), Draft4Validator)
def test_custom_validator(self):
Validator = create(meta_schema={"id" : "meta schema id"}, version="12")
schema = {"$schema" : "meta schema id"}
self.assertIs(validator_for(schema), Validator)
def test_validator_for_jsonschema_default(self):
self.assertIs(validator_for({}), Draft4Validator)
def test_validator_for_custom_default(self):
self.assertIs(validator_for({}, default=None), None)
class TestValidate(unittest.TestCase):
def test_draft3_validator_is_chosen(self):
schema = {"$schema" : "http://json-schema.org/draft-03/schema#"}
with mock.patch.object(Draft3Validator, "check_schema") as chk_schema:
validate({}, schema)
chk_schema.assert_called_once_with(schema)
# Make sure it works without the empty fragment
schema = {"$schema" : "http://json-schema.org/draft-03/schema"}
with mock.patch.object(Draft3Validator, "check_schema") as chk_schema:
validate({}, schema)
chk_schema.assert_called_once_with(schema)
def test_draft4_validator_is_chosen(self):
schema = {"$schema" : "http://json-schema.org/draft-04/schema#"}
with mock.patch.object(Draft4Validator, "check_schema") as chk_schema:
validate({}, schema)
chk_schema.assert_called_once_with(schema)
def test_draft4_validator_is_the_default(self):
with mock.patch.object(Draft4Validator, "check_schema") as chk_schema:
validate({}, {})
chk_schema.assert_called_once_with({})
class TestRefResolver(unittest.TestCase):
base_uri = ""
stored_uri = "foo://stored"
stored_schema = {"stored" : "schema"}
def setUp(self):
self.referrer = {}
self.store = {self.stored_uri : self.stored_schema}
self.resolver = RefResolver(self.base_uri, self.referrer, self.store)
def test_it_does_not_retrieve_schema_urls_from_the_network(self):
ref = Draft3Validator.META_SCHEMA["id"]
with mock.patch.object(self.resolver, "resolve_remote") as remote:
with self.resolver.resolving(ref) as resolved:
self.assertEqual(resolved, Draft3Validator.META_SCHEMA)
self.assertFalse(remote.called)
def test_it_resolves_local_refs(self):
ref = "#/properties/foo"
self.referrer["properties"] = {"foo" : object()}
with self.resolver.resolving(ref) as resolved:
self.assertEqual(resolved, self.referrer["properties"]["foo"])
def test_it_resolves_local_refs_with_id(self):
schema = {"id": "foo://bar/schema#", "a": {"foo": "bar"}}
resolver = RefResolver.from_schema(schema)
with resolver.resolving("#/a") as resolved:
self.assertEqual(resolved, schema["a"])
with resolver.resolving("foo://bar/schema#/a") as resolved:
self.assertEqual(resolved, schema["a"])
def test_it_retrieves_stored_refs(self):
with self.resolver.resolving(self.stored_uri) as resolved:
self.assertIs(resolved, self.stored_schema)
self.resolver.store["cached_ref"] = {"foo" : 12}
with self.resolver.resolving("cached_ref#/foo") as resolved:
self.assertEqual(resolved, 12)
def test_it_retrieves_unstored_refs_via_requests(self):
ref = "http://bar#baz"
schema = {"baz" : 12}
with mock.patch("jsonschema.validators.requests") as requests:
requests.get.return_value.json.return_value = schema
with self.resolver.resolving(ref) as resolved:
self.assertEqual(resolved, 12)
requests.get.assert_called_once_with("http://bar")
def test_it_retrieves_unstored_refs_via_urlopen(self):
ref = "http://bar#baz"
schema = {"baz" : 12}
with mock.patch("jsonschema.validators.requests", None):
with mock.patch("jsonschema.validators.urlopen") as urlopen:
urlopen.return_value.read.return_value = (
json.dumps(schema).encode("utf8"))
with self.resolver.resolving(ref) as resolved:
self.assertEqual(resolved, 12)
urlopen.assert_called_once_with("http://bar")
def test_it_can_construct_a_base_uri_from_a_schema(self):
schema = {"id" : "foo"}
resolver = RefResolver.from_schema(schema)
self.assertEqual(resolver.base_uri, "foo")
with resolver.resolving("") as resolved:
self.assertEqual(resolved, schema)
with resolver.resolving("#") as resolved:
self.assertEqual(resolved, schema)
with resolver.resolving("foo") as resolved:
self.assertEqual(resolved, schema)
with resolver.resolving("foo#") as resolved:
self.assertEqual(resolved, schema)
def test_it_can_construct_a_base_uri_from_a_schema_without_id(self):
schema = {}
resolver = RefResolver.from_schema(schema)
self.assertEqual(resolver.base_uri, "")
with resolver.resolving("") as resolved:
self.assertEqual(resolved, schema)
with resolver.resolving("#") as resolved:
self.assertEqual(resolved, schema)
def test_custom_uri_scheme_handlers(self):
schema = {"foo": "bar"}
ref = "foo://bar"
foo_handler = mock.Mock(return_value=schema)
resolver = RefResolver("", {}, handlers={"foo": foo_handler})
with resolver.resolving(ref) as resolved:
self.assertEqual(resolved, schema)
foo_handler.assert_called_once_with(ref)
def test_cache_remote_on(self):
ref = "foo://bar"
foo_handler = mock.Mock()
resolver = RefResolver(
"", {}, cache_remote=True, handlers={"foo" : foo_handler},
)
with resolver.resolving(ref):
pass
with resolver.resolving(ref):
pass
foo_handler.assert_called_once_with(ref)
def test_cache_remote_off(self):
ref = "foo://bar"
foo_handler = mock.Mock()
resolver = RefResolver(
"", {}, cache_remote=False, handlers={"foo" : foo_handler},
)
with resolver.resolving(ref):
pass
with resolver.resolving(ref):
pass
self.assertEqual(foo_handler.call_count, 2)
def test_if_you_give_it_junk_you_get_a_resolution_error(self):
ref = "foo://bar"
foo_handler = mock.Mock(side_effect=ValueError("Oh no! What's this?"))
resolver = RefResolver("", {}, handlers={"foo" : foo_handler})
with self.assertRaises(RefResolutionError) as err:
with resolver.resolving(ref):
pass
self.assertEqual(str(err.exception), "Oh no! What's this?")
def sorted_errors(errors):
def key(error):
return (
[str(e) for e in error.path],
[str(e) for e in error.schema_path]
)
return sorted(errors, key=key)
|
vivekgalatage/libtracing
|
third-party/jsonschema/jsonschema/tests/test_validators.py
|
Python
|
mit
| 32,355
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from base64 import b64decode, b64encode
import datetime
import decimal
from enum import Enum
import json
import re
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
import chardet
import isodate
from .exceptions import (
ValidationError,
SerializationError,
DeserializationError,
raise_with_traceback)
try:
basestring
except NameError:
basestring = str
class Model(object):
"""Mixin for all client request body/response body models to support
serialization and deserialization.
"""
_subtype_map = {}
_attribute_map = {}
_validation = {}
def __init__(self, *args, **kwargs):
"""Allow attribute setting via kwargs on initialization."""
for k in kwargs:
setattr(self, k, kwargs[k])
def __eq__(self, other):
"""Compare objects by comparing all attributes."""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
"""Compare objects by comparing all attributes."""
return not self.__eq__(other)
def __str__(self):
return str(self.__dict__)
@classmethod
def _get_subtype_map(cls):
attr = '_subtype_map'
parents = cls.__bases__
for base in parents:
if hasattr(base, attr) and base._subtype_map:
return base._subtype_map
return {}
@classmethod
def _classify(cls, response, objects):
"""Check the class _subtype_map for any child classes.
We want to ignore any inheirited _subtype_maps.
"""
try:
map = cls.__dict__.get('_subtype_map', {})
for _type, _classes in map.items():
classification = response.get(_type)
try:
return objects[_classes[classification]]
except KeyError:
pass
for c in _classes:
try:
_cls = objects[_classes[c]]
return _cls._classify(response, objects)
except (KeyError, TypeError):
continue
raise TypeError("Object cannot be classified futher.")
except AttributeError:
raise TypeError("Object cannot be classified futher.")
class Serializer(object):
"""Request object model serializer."""
basic_types = {str: 'str', int: 'int', bool: 'bool', float: 'float'}
days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu",
4: "Fri", 5: "Sat", 6: "Sun"}
months = {1: "Jan", 2: "Feb", 3: "Mar", 4: "Apr", 5: "May", 6: "Jun",
7: "Jul", 8: "Aug", 9: "Sep", 10: "Oct", 11: "Nov", 12: "Dec"}
validation = {
"min_length": lambda x, y: len(x) < y,
"max_length": lambda x, y: len(x) > y,
"minimum": lambda x, y: x < y,
"maximum": lambda x, y: x > y,
"minimum_ex": lambda x, y: x <= y,
"maximum_ex": lambda x, y: x >= y,
"min_items": lambda x, y: len(x) < y,
"max_items": lambda x, y: len(x) > y,
"pattern": lambda x, y: not re.match(y, x),
"unique": lambda x, y: len(x) != len(set(x)),
"multiple": lambda x, y: x % y != 0
}
flattten = re.compile(r"(?<!\\)\.")
def __init__(self):
self.serialize_type = {
'iso-8601': Serializer.serialize_iso,
'rfc-1123': Serializer.serialize_rfc,
'duration': Serializer.serialize_duration,
'date': Serializer.serialize_date,
'decimal': Serializer.serialize_decimal,
'long': Serializer.serialize_long,
'bytearray': Serializer.serialize_bytearray,
'object': self.serialize_object,
'[]': self.serialize_iter,
'{}': self.serialize_dict
}
def _serialize(self, target_obj, data_type=None, **kwargs):
"""Serialize data into a string according to type.
:param target_obj: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: str, dict
:raises: SerializationError if serialization fails.
"""
if target_obj is None:
return None
serialized = {}
attr_name = None
class_name = target_obj.__class__.__name__
if data_type:
return self.serialize_data(
target_obj, data_type, **kwargs)
if not hasattr(target_obj, "_attribute_map"):
data_type = type(target_obj).__name__
if data_type in self.basic_types.values():
return self.serialize_data(
target_obj, data_type, **kwargs)
try:
attributes = target_obj._attribute_map
self._classify_data(target_obj, class_name, serialized)
for attr, map in attributes.items():
attr_name = attr
try:
keys = self.flattten.split(map['key'])
keys = [k.replace('\\.', '.') for k in keys]
attr_type = map['type']
orig_attr = getattr(target_obj, attr)
validation = target_obj._validation.get(attr_name, {})
orig_attr = self.validate(
orig_attr, attr_name, **validation)
new_attr = self.serialize_data(
orig_attr, attr_type, **kwargs)
for k in reversed(keys):
unflattened = {k: new_attr}
new_attr = unflattened
_new_attr = new_attr
_serialized = serialized
for k in keys:
if k not in _serialized:
_serialized.update(_new_attr)
_new_attr = _new_attr[k]
_serialized = _serialized[k]
except ValueError:
continue
except (AttributeError, KeyError, TypeError) as err:
msg = "Attribute {} in object {} cannot be serialized.".format(
attr_name, class_name)
raise_with_traceback(SerializationError, msg, err)
else:
return serialized
def _classify_data(self, target_obj, class_name, serialized):
"""Check whether this object is a child and therefor needs to be
classified in the message.
"""
try:
for _type, _classes in target_obj._get_subtype_map().items():
for ref, name in _classes.items():
if name == class_name:
serialized[_type] = ref
except AttributeError:
pass # TargetObj has no _subtype_map so we don't need to classify.
def body(self, data, data_type, **kwargs):
"""Serialize data intended for a request body.
:param data: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: dict
:raises: SerializationError if serialization fails.
:raises: ValueError if data is None
"""
if data is None:
raise ValidationError("required", "body", True)
return self._serialize(data, data_type, **kwargs)
def url(self, name, data, data_type, **kwargs):
"""Serialize data intended for a URL path.
:param data: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: str
:raises: TypeError if serialization fails.
:raises: ValueError if data is None
"""
data = self.validate(data, name, required=True, **kwargs)
try:
output = self.serialize_data(data, data_type, **kwargs)
if data_type == 'bool':
output = json.dumps(output)
if kwargs.get('skip_quote') is True:
output = str(output)
else:
output = quote(str(output), safe='')
except SerializationError:
raise TypeError("{} must be type {}.".format(name, data_type))
else:
return output
def query(self, name, data, data_type, **kwargs):
"""Serialize data intended for a URL query.
:param data: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: str
:raises: TypeError if serialization fails.
:raises: ValueError if data is None
"""
data = self.validate(data, name, required=True, **kwargs)
try:
if data_type in ['[str]']:
data = ["" if d is None else d for d in data]
output = self.serialize_data(data, data_type, **kwargs)
if data_type == 'bool':
output = json.dumps(output)
if kwargs.get('skip_quote') is True:
output = str(output)
else:
output = quote(str(output), safe='')
except SerializationError:
raise TypeError("{} must be type {}.".format(name, data_type))
else:
return str(output)
def header(self, name, data, data_type, **kwargs):
"""Serialize data intended for a request header.
:param data: The data to be serialized.
:param str data_type: The type to be serialized from.
:rtype: str
:raises: TypeError if serialization fails.
:raises: ValueError if data is None
"""
data = self.validate(data, name, required=True, **kwargs)
try:
if data_type in ['[str]']:
data = ["" if d is None else d for d in data]
output = self.serialize_data(data, data_type, **kwargs)
if data_type == 'bool':
output = json.dumps(output)
except SerializationError:
raise TypeError("{} must be type {}.".format(name, data_type))
else:
return str(output)
def validate(self, data, name, **kwargs):
"""Validate that a piece of data meets certain conditions"""
required = kwargs.get('required', False)
if required and data is None:
raise ValidationError("required", name, True)
elif data is None:
return
elif kwargs.get('readonly'):
return
try:
for key, value in kwargs.items():
validator = self.validation.get(key, lambda x, y: False)
if validator(data, value):
raise ValidationError(key, name, value)
except TypeError:
raise ValidationError("unknown", name)
else:
return data
def serialize_data(self, data, data_type, **kwargs):
"""Serialize generic data according to supplied data type.
:param data: The data to be serialized.
:param str data_type: The type to be serialized from.
:param bool required: Whether it's essential that the data not be
empty or None
:raises: AttributeError if required data is None.
:raises: ValueError if data is None
:raises: SerializationError if serialization fails.
"""
if data is None:
raise ValueError("No value for given attribute")
try:
if data_type in self.basic_types.values():
return self.serialize_basic(data, data_type)
elif data_type in self.serialize_type:
return self.serialize_type[data_type](data, **kwargs)
elif isinstance(data, Enum):
return data.value
iter_type = data_type[0] + data_type[-1]
if iter_type in self.serialize_type:
return self.serialize_type[iter_type](
data, data_type[1:-1], **kwargs)
except (ValueError, TypeError) as err:
msg = "Unable to serialize value: {!r} as type: {!r}."
raise_with_traceback(
SerializationError, msg.format(data, data_type), err)
else:
return self._serialize(data, **kwargs)
def serialize_basic(self, data, data_type):
"""Serialize basic builting data type.
Serializes objects to str, int, float or bool.
:param data: Object to be serialized.
:param str data_type: Type of object in the iterable.
"""
if data_type == 'str':
return self.serialize_unicode(data)
return eval(data_type)(data)
def serialize_unicode(self, data):
"""Special handling for serializing unicode strings in Py2.
Encode to UTF-8 if unicode, otherwise handle as a str.
:param data: Object to be serialized.
:rtype: str
"""
try:
if isinstance(data, unicode):
return data.encode(encoding='utf-8')
except NameError:
return str(data)
else:
return str(data)
def serialize_iter(self, data, iter_type, div=None, **kwargs):
"""Serialize iterable.
:param list attr: Object to be serialized.
:param str iter_type: Type of object in the iterable.
:param bool required: Whether the objects in the iterable must
not be None or empty.
:param str div: If set, this str will be used to combine the elements
in the iterable into a combined string. Default is 'None'.
:rtype: list, str
"""
serialized = []
for d in data:
try:
serialized.append(
self.serialize_data(d, iter_type, **kwargs))
except ValueError:
serialized.append(None)
if div:
serialized = ['' if s is None else s for s in serialized]
serialized = div.join(serialized)
return serialized
def serialize_dict(self, attr, dict_type, **kwargs):
"""Serialize a dictionary of objects.
:param dict attr: Object to be serialized.
:param str dict_type: Type of object in the dictionary.
:param bool required: Whether the objects in the dictionary must
not be None or empty.
:rtype: dict
"""
serialized = {}
for key, value in attr.items():
try:
serialized[str(key)] = self.serialize_data(
value, dict_type, **kwargs)
except ValueError:
serialized[str(key)] = None
return serialized
def serialize_object(self, attr, **kwargs):
"""Serialize a generic object.
This will be handled as a dictionary. If object passed in is not
a basic type (str, int, float, dict, list) it will simply be
cast to str.
:param dict attr: Object to be serialized.
:rtype: dict or str
"""
obj_type = type(attr)
if obj_type in self.basic_types:
return self.serialize_basic(attr, self.basic_types[obj_type])
if obj_type == dict:
serialized = {}
for key, value in attr.items():
try:
serialized[str(key)] = self.serialize_object(
value, **kwargs)
except ValueError:
serialized[str(key)] = None
return serialized
if obj_type == list:
serialized = []
for obj in attr:
try:
serialized.append(self.serialize_object(
obj, **kwargs))
except ValueError:
pass
return serialized
else:
return str(attr)
@staticmethod
def serialize_bytearray(attr, **kwargs):
"""Serialize bytearray into base-64 string.
:param attr: Object to be serialized.
:rtype: str
"""
return b64encode(attr).decode()
@staticmethod
def serialize_decimal(attr, **kwargs):
"""Serialize Decimal object to float.
:param attr: Object to be serialized.
:rtype: float
"""
return float(attr)
@staticmethod
def serialize_long(attr, **kwargs):
"""Serialize long (Py2) or int (Py3).
:param attr: Object to be serialized.
:rtype: int/long
"""
try:
return long(attr)
except NameError:
return int(attr)
@staticmethod
def serialize_date(attr, **kwargs):
"""Serialize Date object into ISO-8601 formatted string.
:param Date attr: Object to be serialized.
:rtype: str
"""
t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day)
return t
@staticmethod
def serialize_duration(attr, **kwargs):
"""Serialize TimeDelta object into ISO-8601 formatted string.
:param TimeDelta attr: Object to be serialized.
:rtype: str
"""
return isodate.duration_isoformat(attr)
@staticmethod
def serialize_rfc(attr, **kwargs):
"""Serialize Datetime object into RFC-1123 formatted string.
:param Datetime attr: Object to be serialized.
:rtype: str
:raises: TypeError if format invalid.
"""
try:
utc = attr.utctimetuple()
except AttributeError:
raise TypeError("RFC1123 object must be valid Datetime object.")
return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format(
Serializer.days[utc.tm_wday], utc.tm_mday,
Serializer.months[utc.tm_mon], utc.tm_year,
utc.tm_hour, utc.tm_min, utc.tm_sec)
@staticmethod
def serialize_iso(attr, **kwargs):
"""Serialize Datetime object into ISO-8601 formatted string.
:param Datetime attr: Object to be serialized.
:rtype: str
:raises: SerializationError if format invalid.
"""
if isinstance(attr, str):
attr = isodate.parse_datetime(attr)
try:
utc = attr.utctimetuple()
if utc.tm_year > 9999 or utc.tm_year < 1:
raise OverflowError("Hit max or min date")
microseconds = str(float(attr.microsecond)*1e-6)[1:].ljust(4, '0')
date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format(
utc.tm_year, utc.tm_mon, utc.tm_mday,
utc.tm_hour, utc.tm_min, utc.tm_sec)
return date + microseconds + 'Z'
except (ValueError, OverflowError) as err:
msg = "Unable to serialize datetime object."
raise_with_traceback(SerializationError, msg, err)
class Deserializer(object):
"""Response object model deserializer.
:param dict classes: Class type dictionary for deserializing
complex types.
"""
basic_types = {str: 'str', int: 'int', bool: 'bool', float: 'float'}
valid_date = re.compile(
r'\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}'
'\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?')
flatten = re.compile(r"(?<!\\)\.")
def __init__(self, classes={}):
self.deserialize_type = {
'iso-8601': Deserializer.deserialize_iso,
'rfc-1123': Deserializer.deserialize_rfc,
'duration': Deserializer.deserialize_duration,
'date': Deserializer.deserialize_date,
'decimal': Deserializer.deserialize_decimal,
'long': Deserializer.deserialize_long,
'bytearray': Deserializer.deserialize_bytearray,
'object': self.deserialize_object,
'[]': self.deserialize_iter,
'{}': self.deserialize_dict
}
self.dependencies = dict(classes)
def __call__(self, target_obj, response_data):
"""Call the deserializer to process a REST response.
:param str target_obj: Target data type to deserialize to.
:param requests.Response response_data: REST response object.
:raises: DeserializationError if deserialization fails.
:return: Deserialized object.
"""
data = self._unpack_content(response_data)
response, class_name = self._classify_target(target_obj, data)
if isinstance(response, basestring):
return self.deserialize_data(data, response)
elif isinstance(response, Enum) or class_name == 'EnumMeta':
return self.deserialize_enum(data, response)
if data is None:
return data
try:
attributes = response._attribute_map
d_attrs = {}
for attr, map in attributes.items():
attr_type = map['type']
key = map['key']
working_data = data
while '.' in key:
dict_keys = self.flatten.split(key)
if len(dict_keys) == 1:
key = dict_keys[0].replace('\\.', '.')
break
working_key = dict_keys[0].replace('\\.', '.')
working_data = working_data.get(working_key, data)
key = '.'.join(dict_keys[1:])
raw_value = working_data.get(key)
value = self.deserialize_data(raw_value, attr_type)
d_attrs[attr] = value
except (AttributeError, TypeError, KeyError) as err:
msg = "Unable to deserialize to object: " + class_name
raise_with_traceback(DeserializationError, msg, err)
else:
return self._instantiate_model(response, d_attrs)
def _classify_target(self, target, data):
"""Check to see whether the deserialization target object can
be classified into a subclass.
Once classification has been determined, initialize object.
:param str target: The target object type to deserialize to.
:param str/dict data: The response data to deseralize.
"""
if target is None:
return None, None
if isinstance(target, basestring):
try:
target = self.dependencies[target]
except KeyError:
return target, target
try:
target = target._classify(data, self.dependencies)
except (TypeError, AttributeError):
pass # Target has no subclasses, so can't classify further.
return target, target.__class__.__name__
def _unpack_content(self, raw_data):
"""Extract data from the body of a REST response object.
:param raw_data: Data to be processed. This could be a
requests.Response object, in which case the json content will be
be returned.
"""
if raw_data and isinstance(raw_data, bytes):
data = raw_data.decode(
encoding=chardet.detect(raw_data)['encoding'])
else:
data = raw_data
if hasattr(raw_data, 'content'):
if not raw_data.content:
return None
if isinstance(raw_data.content, bytes):
encoding = chardet.detect(raw_data.content)["encoding"]
data = raw_data.content.decode(encoding=encoding)
else:
data = raw_data.content
try:
return json.loads(data)
except (ValueError, TypeError):
return data
return data
def _instantiate_model(self, response, attrs):
"""Instantiate a response model passing in deserialized args.
:param response: The response model class.
:param d_attrs: The deserialized response attributes.
"""
if callable(response):
subtype = response._get_subtype_map()
try:
readonly = [k for k, v in response._validation.items()
if v.get('readonly')]
const = [k for k, v in response._validation.items()
if v.get('constant')]
kwargs = {k: v for k, v in attrs.items()
if k not in subtype and k not in readonly + const}
response_obj = response(**kwargs)
for attr in readonly:
setattr(response_obj, attr, attrs.get(attr))
return response_obj
except TypeError as err:
msg = "Unable to deserialize {} into model {}. ".format(
kwargs, response)
raise DeserializationError(msg + str(err))
else:
try:
for attr, value in attrs.items():
setattr(response, attr, value)
return response
except Exception as exp:
msg = "Unable to populate response model. "
msg += "Type: {}, Error: {}".format(type(response), exp)
raise DeserializationError(msg)
def deserialize_data(self, data, data_type):
"""Process data for deserialization according to data type.
:param str data: The response string to be deserialized.
:param str data_type: The type to deserialize to.
:raises: DeserializationError if deserialization fails.
:return: Deserialized object.
"""
if data is None:
return data
try:
if not data_type:
return data
if data_type in self.basic_types.values():
return self.deserialize_basic(data, data_type)
if data_type in self.deserialize_type:
data_val = self.deserialize_type[data_type](data)
return data_val
iter_type = data_type[0] + data_type[-1]
if iter_type in self.deserialize_type:
return self.deserialize_type[iter_type](data, data_type[1:-1])
obj_type = self.dependencies[data_type]
if issubclass(obj_type, Enum):
return self.deserialize_enum(data, obj_type)
except (ValueError, TypeError, AttributeError) as err:
msg = "Unable to deserialize response data."
msg += " Data: {}, {}".format(data, data_type)
raise_with_traceback(DeserializationError, msg, err)
else:
return self(obj_type, data)
def deserialize_iter(self, attr, iter_type):
"""Deserialize an iterable.
:param list attr: Iterable to be deserialized.
:param str iter_type: The type of object in the iterable.
:rtype: list
"""
if not attr and not isinstance(attr, list):
return None
return [self.deserialize_data(a, iter_type) for a in attr]
def deserialize_dict(self, attr, dict_type):
"""Deserialize a dictionary.
:param dict/list attr: Dictionary to be deserialized. Also accepts
a list of key, value pairs.
:param str dict_type: The object type of the items in the dictionary.
:rtype: dict
"""
if isinstance(attr, list):
return {str(x['key']): self.deserialize_data(
x['value'], dict_type) for x in attr}
return {str(k): self.deserialize_data(
v, dict_type) for k, v in attr.items()}
def deserialize_object(self, attr, **kwargs):
"""Deserialize a generic object.
This will be handled as a dictionary.
:param dict attr: Dictionary to be deserialized.
:rtype: dict
:raises: TypeError if non-builtin datatype encountered.
"""
if attr is None:
return None
if isinstance(attr, basestring):
return self.deserialize_basic(attr, 'str')
obj_type = type(attr)
if obj_type in self.basic_types:
return self.deserialize_basic(attr, self.basic_types[obj_type])
if obj_type == dict:
deserialized = {}
for key, value in attr.items():
try:
deserialized[str(key)] = self.deserialize_object(
value, **kwargs)
except ValueError:
deserialized[str(key)] = None
return deserialized
if obj_type == list:
deserialized = []
for obj in attr:
try:
deserialized.append(self.deserialize_object(
obj, **kwargs))
except ValueError:
pass
return deserialized
else:
error = "Cannot deserialize generic object with type: "
raise TypeError(error + str(obj_type))
def deserialize_basic(self, attr, data_type):
"""Deserialize baisc builtin data type from string.
Will attempt to convert to str, int, float and bool.
This function will also accept '1', '0', 'true' and 'false' as
valid bool values.
:param str attr: response string to be deserialized.
:param str data_type: deserialization data type.
:rtype: str, int, float or bool
:raises: TypeError if string format is not valid.
"""
if data_type == 'bool':
if attr in [True, False, 1, 0]:
return bool(attr)
elif isinstance(attr, basestring):
if attr.lower() in ['true', '1']:
return True
elif attr.lower() in ['false', '0']:
return False
raise TypeError("Invalid boolean value: {}".format(attr))
if data_type == 'str':
return self.deserialize_unicode(attr)
return eval(data_type)(attr)
def deserialize_unicode(self, data):
"""Preserve unicode objects in Python 2, otherwise return data
as a string.
:param str data: response string to be deserialized.
:rtype: str or unicode
"""
try:
if isinstance(data, unicode):
return data
except NameError:
return str(data)
else:
return str(data)
def deserialize_enum(self, data, enum_obj):
"""Deserialize string into enum object.
:param str data: response string to be deserialized.
:param Enum enum_obj: Enum object to deserialize to.
:rtype: Enum
:raises: DeserializationError if string is not valid enum value.
"""
if isinstance(data, int):
# Workaround. We might consider remove it in the future.
# https://github.com/Azure/azure-rest-api-specs/issues/141
try:
return list(enum_obj.__members__.values())[data]
except IndexError:
error = "{!r} is not a valid index for enum {!r}"
raise DeserializationError(error.format(data, enum_obj))
try:
return enum_obj(str(data))
except ValueError:
for enum_value in enum_obj:
if enum_value.value.lower() == str(data).lower():
return enum_value
error = "{!r} is not valid value for enum {!r}"
raise DeserializationError(error.format(data, enum_obj))
@staticmethod
def deserialize_bytearray(attr):
"""Deserialize string into bytearray.
:param str attr: response string to be deserialized.
:rtype: bytearray
:raises: TypeError if string format invalid.
"""
return bytearray(b64decode(attr))
@staticmethod
def deserialize_decimal(attr):
"""Deserialize string into Decimal object.
:param str attr: response string to be deserialized.
:rtype: Decimal
:raises: DeserializationError if string format invalid.
"""
try:
return decimal.Decimal(attr)
except decimal.DecimalException as err:
msg = "Invalid decimal {}".format(attr)
raise_with_traceback(DeserializationError, msg, err)
@staticmethod
def deserialize_long(attr):
"""Deserialize string into long (Py2) or int (Py3).
:param str attr: response string to be deserialized.
:rtype: long or int
:raises: ValueError if string format invalid.
"""
try:
return long(attr)
except NameError:
return int(attr)
@staticmethod
def deserialize_duration(attr):
"""Deserialize ISO-8601 formatted string into TimeDelta object.
:param str attr: response string to be deserialized.
:rtype: TimeDelta
:raises: DeserializationError if string format invalid.
"""
try:
duration = isodate.parse_duration(attr)
except(ValueError, OverflowError, AttributeError) as err:
msg = "Cannot deserialize duration object."
raise_with_traceback(DeserializationError, msg, err)
else:
return duration
@staticmethod
def deserialize_date(attr):
"""Deserialize ISO-8601 formatted string into Date object.
:param str attr: response string to be deserialized.
:rtype: Date
:raises: DeserializationError if string format invalid.
"""
return isodate.parse_date(attr)
@staticmethod
def deserialize_rfc(attr):
"""Deserialize RFC-1123 formatted string into Datetime object.
:param str attr: response string to be deserialized.
:rtype: Datetime
:raises: DeserializationError if string format invalid.
"""
try:
date_obj = datetime.datetime.strptime(
attr, "%a, %d %b %Y %H:%M:%S %Z")
date_obj = date_obj.replace(tzinfo=UTC())
except ValueError as err:
msg = "Cannot deserialize to rfc datetime object."
raise_with_traceback(DeserializationError, msg, err)
else:
return date_obj
@staticmethod
def deserialize_iso(attr):
"""Deserialize ISO-8601 formatted string into Datetime object.
:param str attr: response string to be deserialized.
:rtype: Datetime
:raises: DeserializationError if string format invalid.
"""
try:
attr = attr.upper()
match = Deserializer.valid_date.match(attr)
if not match:
raise ValueError("Invalid datetime string: " + attr)
check_decimal = attr.split('.')
if len(check_decimal) > 1:
decimal = ""
for digit in check_decimal[1]:
if digit.isdigit():
decimal += digit
else:
break
if len(decimal) > 6:
attr = attr.replace(decimal, decimal[0:-1])
date_obj = isodate.parse_datetime(attr)
test_utc = date_obj.utctimetuple()
if test_utc.tm_year > 9999 or test_utc.tm_year < 1:
raise OverflowError("Hit max or min date")
except(ValueError, OverflowError, AttributeError) as err:
msg = "Cannot deserialize datetime object."
raise_with_traceback(DeserializationError, msg, err)
else:
return date_obj
class UTC(datetime.tzinfo):
"""Time Zone info for handling UTC"""
def utcoffset(self, dt):
"""UTF offset for UTC is 0."""
return datetime.timedelta(hours=0, minutes=0)
def tzname(self, dt):
"""Timestamp representation."""
return "Z"
def dst(self, dt):
"""No daylight saving for UTC."""
return datetime.timedelta(0)
|
BurtBiel/autorest
|
ClientRuntimes/Python/msrest/msrest/serialization.py
|
Python
|
mit
| 36,865
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Azure Killer
# version 0.1
# Clean Azure resources automatically
# Feiyue Yu
import os
import json
import datetime
# Please set parameters
# Subscription ID
subscription = '0b1f6471-1bf0-4dda-aec3-cb9272f09590'
# Prefix of resource group that will be deleted
prefixes = ['cli_test', 'clitest']
# Maximum survival time, in days
TTL = 1
def main():
print('Azure Killer, version 0.1')
print('Configuration:')
print(' Subscription: ' + subscription)
# print(' Resource group prefix: ' + str(prefixes))
# print(' Maximum survival time: %d days' % TTL)
print()
cmd = 'az group list --subscription %s --query [].name'
result = os.popen(cmd % subscription).read()
rgs = json.loads(result)
for rg in rgs:
clean_rg(rg)
def clean_rg(rg):
"""
Clean resource group.
:param rg: Resource group name
:return:
"""
print('Processing resource group: ' + rg)
cmd = 'az group delete -y -g %s --subscription %s' % (rg, subscription)
print(cmd)
os.popen(cmd)
def old_enough(dates):
"""
Whether it's old enough.
:param dates: Array of dates
:return: bool
"""
if not dates:
print('Duration: too old')
return True
date = dates[-1]
date = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%f+00:00')
now = datetime.datetime.utcnow()
duration = now - date
print('Duration: ' + str(duration))
return duration.days > TTL
def target_rg(rg):
"""
Whether rg has certain prefix.
:param rg: Resource group name
:return: bool
"""
return any(rg.startswith(prefix) for prefix in prefixes)
if __name__ == '__main__':
main()
|
yugangw-msft/azure-cli
|
scripts/live_test/clean.py
|
Python
|
mit
| 2,035
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-05-09 21:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0067_auto_20170427_1442'),
]
operations = [
migrations.AddField(
model_name='channel',
name='thumbnail_encoding',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='contentnode',
name='thumbnail_encoding',
field=models.TextField(blank=True, null=True),
),
]
|
fle-internal/content-curation
|
contentcuration/contentcuration/migrations/0068_auto_20170509_1456.py
|
Python
|
mit
| 654
|
import gc
import sys
import unittest
import collections
import weakref
import operator
import contextlib
import copy
from test import support
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assertTrue(ref1() is None,
"expected reference to be invalidated")
self.assertTrue(ref2() is None,
"expected reference to be invalidated")
self.assertTrue(self.cbcalled == 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(ReferenceError, check, ref1)
self.assertRaises(ReferenceError, check, ref2)
self.assertRaises(ReferenceError, bool, weakref.proxy(C()))
self.assertEqual(self.cbcalled, 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assertTrue(ref() is not None,
"weak reference to live object should be live")
o2 = ref()
self.assertTrue(o is o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assertTrue(self.cbcalled == 1,
"callback did not properly set 'cbcalled'")
self.assertTrue(ref() is None,
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assertTrue(ref1 is ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assertTrue(ref1 is ref2,
"reference object w/out callback should be re-used")
self.assertTrue(weakref.getweakrefcount(o) == 2,
"wrong weak ref count for object")
del proxy
self.assertTrue(weakref.getweakrefcount(o) == 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assertTrue(proxy1 is proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = collections.UserList()
p = weakref.proxy(L)
self.assertFalse(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.assertTrue(p, "proxy for non-empty UserList should be true")
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.assertIn(3, p, "proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = collections.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = collections.UserList(range(10))
p3 = weakref.proxy(L3)
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
def test_proxy_unicode(self):
# See bug 5037
class C(object):
def __str__(self):
return "string"
def __bytes__(self):
return b"bytes"
instance = C()
self.assertIn("__bytes__", dir(weakref.proxy(instance)))
self.assertEqual(bytes(weakref.proxy(instance)), b"bytes")
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assertTrue(p1 is p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assertTrue(p1 is p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assertTrue(p1 is p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assertTrue(p1 is p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assertTrue(type(ref1) is weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assertTrue(o.bar == 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assertTrue(o.bar == 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assertTrue(proxy.foo == 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assertTrue(proxy.foo == 2,
"proxy does not reflect attribute modification")
del o.foo
self.assertTrue(not hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assertTrue(o.foo == 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assertTrue(
o.foo == 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assertTrue(not hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assertTrue(weakref.getweakrefcount(o) == 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assertTrue(weakref.getweakrefcount(o) == 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assertTrue(weakref.getweakrefcount(o) == 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assertTrue(weakref.getweakrefcount(1) == 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assertTrue(weakref.getweakrefs(o) == [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assertTrue(weakref.getweakrefs(o) == [ref1],
"list of refs does not match")
del ref1
self.assertTrue(weakref.getweakrefs(o) == [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assertTrue(weakref.getweakrefs(1) == [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assertTrue(p + 1.0 == 3.0)
self.assertTrue(1.0 + p == 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assertTrue(external_wr() is callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
def test_classes(self):
# Check that classes are weakrefable.
class A(object):
pass
l = []
weakref.ref(int)
a = weakref.ref(A, l.append)
A = None
gc.collect()
self.assertEqual(a(), None)
self.assertEqual(l, [a])
class SubclassableWeakrefTestCase(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super().__init__(ob, callback)
def __call__(self):
self.called = True
return super().__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertTrue(mr() is o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assertTrue(mr() is None)
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertTrue(r1 is not r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertTrue(r2 is refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertTrue(r1 is not r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertFalse(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
# Bug #3110
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
def __eq__(self, other):
if isinstance(other, Object):
return self.arg == other.arg
return NotImplemented
def __lt__(self, other):
if isinstance(other, Object):
return self.arg < other.arg
return NotImplemented
def __hash__(self):
return hash(self.arg)
class RefCycle:
def __init__(self):
self.cycle = self
class MappingTestCase(TestBase):
COUNT = 10
def check_len_cycles(self, dict_type, cons):
N = 20
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
# Keep an iterator alive
it = dct.items()
try:
next(it)
except StopIteration:
pass
del items
gc.collect()
n1 = len(dct)
del it
gc.collect()
n2 = len(dct)
# one item may be kept alive inside the iterator
self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_weak_keyed_len_cycles(self):
self.check_len_cycles(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_cycles(self):
self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k))
def check_len_race(self, dict_type, cons):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
del items
# All items will be collected at next garbage collection pass
it = dct.items()
try:
next(it)
except StopIteration:
pass
n1 = len(dct)
del it
n2 = len(dct)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_weak_keyed_len_race(self):
self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_race(self):
self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1)
self.assertTrue(o is dict[o.arg],
"wrong object returned by weak dict!")
items1 = list(dict.items())
items2 = list(dict.copy().items())
items1.sort()
items2.sort()
self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), self.COUNT - 1,
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), k in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertTrue(weakref.getweakrefcount(o) == 1,
"wrong number of weak references to %r!" % o)
self.assertTrue(o.arg is dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertTrue(len(dict) == (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertTrue(len(dict) == 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.keyrefs())), len(objects))
for wr in dict.keyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = list(dict.items())
for item in dict.items():
items.remove(item)
self.assertFalse(items, "items() did not touch all items")
# key iterator, via __iter__():
keys = list(dict.keys())
for k in dict:
keys.remove(k)
self.assertFalse(keys, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = list(dict.keys())
for k in dict.keys():
keys.remove(k)
self.assertFalse(keys, "iterkeys() did not touch all keys")
# value iterator:
values = list(dict.values())
for v in dict.values():
values.remove(v)
self.assertFalse(values,
"itervalues() did not touch all values")
def check_weak_destroy_while_iterating(self, dict, objects, iter_name):
n = len(dict)
it = iter(getattr(dict, iter_name)())
next(it) # Trigger internal iteration
# Destroy an object
del objects[-1]
gc.collect() # just in case
# We have removed either the first consumed object, or another one
self.assertIn(len(list(it)), [len(objects), len(objects) - 1])
del it
# The removal has been committed
self.assertEqual(len(dict), n - 1)
def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext):
# Check that we can explicitly mutate the weak dict without
# interfering with delayed removal.
# `testcontext` should create an iterator, destroy one of the
# weakref'ed objects and then return a new key/value pair corresponding
# to the destroyed object.
with testcontext() as (k, v):
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.__delitem__, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.pop, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
dict[k] = v
self.assertEqual(dict[k], v)
ddict = copy.copy(dict)
with testcontext() as (k, v):
dict.update(ddict)
self.assertEqual(dict, ddict)
with testcontext() as (k, v):
dict.clear()
self.assertEqual(len(dict), 0)
def test_weak_keys_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_keyed_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'keyrefs')
dict, objects = self.make_weak_keyed_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
v = objects.pop().arg
gc.collect() # just in case
yield Object(v), v
finally:
it = None # should commit all removals
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
def test_weak_values_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_valued_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs')
self.check_weak_destroy_while_iterating(dict, objects, 'valuerefs')
dict, objects = self.make_weak_valued_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
k = objects.pop().arg
gc.collect() # just in case
yield k, Object(k)
finally:
it = None # should commit all removals
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o] = o.arg
return dict, objects
def test_make_weak_valued_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_from_weak_valued_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
dict2 = weakref.WeakValueDictionary(dict)
self.assertEqual(dict[364], o)
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertTrue(v is value1)
else:
self.assertTrue(v is value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertTrue(v is value1)
else:
self.assertTrue(v is value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertTrue(value1 is not value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertTrue(o is value1)
self.assertIn(key, weakdict)
self.assertTrue(weakdict.get(key) is value1)
self.assertTrue(weakdict[key] is value1)
o = weakdict.setdefault(key, value2)
self.assertTrue(o is value1)
self.assertIn(key, weakdict)
self.assertTrue(weakdict.get(key) is value1)
self.assertTrue(weakdict[key] is value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), k in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict, "mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertTrue(v is weakdict[k])
self.assertTrue(v is weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict, "original key disappeared in weak dict")
v = dict[k]
self.assertTrue(v is weakdict[k])
self.assertTrue(v is weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(list(d.keys()), [o2])
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertEqual(len(d), 2)
del d['something']
self.assertEqual(len(d), 1)
self.assertTrue(list(d.items()) == [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = list(d.keys())
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time thru the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print(r() is obj)
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print(r())
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super().__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.items():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super().__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print('OK')
... else:
... print('WeakValueDictionary error')
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
support.run_unittest(
ReferencesTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
)
support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.3.0/Lib/test/test_weakref.py
|
Python
|
mit
| 47,954
|
import sys
import os
import time
import string
import __builtin__
from panda3d.core import *
from direct.showbase.MessengerGlobal import *
from direct.showbase.DirectObject import DirectObject
from direct.showbase.EventManagerGlobal import *
from direct.task.MiniTask import MiniTask, MiniTaskManager
from direct.directnotify.DirectNotifyGlobal import *
class LogAndOutput:
def __init__(self, orig, log):
self.orig = orig
self.log = log
self.console = False
def write(self, str):
self.log.write(str)
self.log.flush()
if self.console:
self.orig.write(str)
self.orig.flush()
def flush(self):
self.log.flush()
self.orig.flush()
class LauncherBase(DirectObject):
GameName = 'game'
ArgCount = 6
LauncherPhases = [1,
2,
3,
4]
TmpOverallMap = [0.25,
0.25,
0.25,
0.25]
BANDWIDTH_ARRAY = [1800,
3600,
4200,
6600,
8000,
12000,
16000,
24000,
32000,
48000,
72000,
96000,
128000,
192000,
250000,
500000,
750000,
1000000,
1250000,
1500000,
1750000,
2000000,
3000000,
4000000,
6000000,
8000000,
10000000,
12000000,
14000000,
16000000,
24000000,
32000000,
48000000,
64000000,
96000000,
128000000,
256000000,
512000000,
1024000000]
win32con_FILE_PERSISTENT_ACLS = 8
InstallDirKey = 'INSTALL_DIR'
GameLogFilenameKey = 'GAMELOG_FILENAME'
PandaWindowOpenKey = 'PANDA_WINDOW_OPEN'
PandaErrorCodeKey = 'PANDA_ERROR_CODE'
NewInstallationKey = 'IS_NEW_INSTALLATION'
LastLoginKey = 'LAST_LOGIN'
UserLoggedInKey = 'USER_LOGGED_IN'
PaidUserLoggedInKey = 'PAID_USER_LOGGED_IN'
ReferrerKey = 'REFERRER_CODE'
PeriodTimeRemainingKey = 'PERIOD_TIME_REMAINING'
PeriodNameKey = 'PERIOD_NAME'
SwidKey = 'SWID'
PatchCDKey = 'FROM_CD'
DISLTokenKey = 'DISLTOKEN'
ProxyServerKey = 'PROXY_SERVER'
ProxyDirectHostsKey = 'PROXY_DIRECT_HOSTS'
launcherFileDbFilename = 'launcherFileDb'
webLauncherFlag = False
def __init__(self):
self.started = False
self.taskMgrStarted = False
self._downloadComplete = False
self.pandaErrorCode = 0
self.WIN32 = os.name == 'nt'
if self.WIN32:
self.VISTA = sys.getwindowsversion()[3] == 2 and sys.getwindowsversion()[0] == 6
else:
self.VISTA = 0
ltime = time.localtime()
logSuffix = '%02d%02d%02d_%02d%02d%02d' % (ltime[0] - 2000,
ltime[1],
ltime[2],
ltime[3],
ltime[4],
ltime[5])
logPrefix = ''
if not self.WIN32:
logPrefix = os.environ.get('LOGFILE_PREFIX', '')
logfile = logPrefix + self.getLogFileName() + '-' + logSuffix + '.log'
self.errorfile = 'errorCode'
log = open(logfile, 'a')
logOut = LogAndOutput(sys.__stdout__, log)
logErr = LogAndOutput(sys.__stderr__, log)
sys.stdout = logOut
sys.stderr = logErr
if sys.platform == 'darwin':
os.system('/usr/sbin/system_profiler >>' + logfile)
elif sys.platform == 'linux2':
os.system('cat /proc/cpuinfo >>' + logfile)
os.system('cat /proc/meminfo >>' + logfile)
os.system('/sbin/ifconfig -a >>' + logfile)
print '\n\nStarting %s...' % self.GameName
print 'Current time: ' + time.asctime(time.localtime(time.time())) + ' ' + time.tzname[0]
print 'sys.path = ', sys.path
print 'sys.argv = ', sys.argv
if len(sys.argv) >= self.ArgCount:
Configrc_args = sys.argv[self.ArgCount - 1]
print "generating configrc using: '" + Configrc_args + "'"
else:
Configrc_args = ''
print 'generating standard configrc'
os.environ['CONFIG_CONFIG'] = ':_:configdir_.:configpath_:configname_Configrc.exe:configexe_1:configargs_-stdout ' + Configrc_args
cpMgr = ConfigPageManager.getGlobalPtr()
cpMgr.reloadImplicitPages()
launcherConfig = getConfigExpress()
__builtin__.config = launcherConfig
if config.GetBool('log-private-info', 0):
print 'os.environ = ', os.environ
elif '__COMPAT_LAYER' in os.environ:
print '__COMPAT_LAYER = %s' % (os.environ['__COMPAT_LAYER'],)
self.miniTaskMgr = MiniTaskManager()
self.VerifyFiles = self.getVerifyFiles()
self.setServerVersion(launcherConfig.GetString('server-version', 'no_version_set'))
self.ServerVersionSuffix = launcherConfig.GetString('server-version-suffix', '')
self.UserUpdateDelay = launcherConfig.GetFloat('launcher-user-update-delay', 0.5)
self.TELEMETRY_BANDWIDTH = launcherConfig.GetInt('launcher-telemetry-bandwidth', 2000)
self.INCREASE_THRESHOLD = launcherConfig.GetFloat('launcher-increase-threshold', 0.75)
self.DECREASE_THRESHOLD = launcherConfig.GetFloat('launcher-decrease-threshold', 0.5)
self.BPS_WINDOW = launcherConfig.GetFloat('launcher-bps-window', 8.0)
self.DECREASE_BANDWIDTH = launcherConfig.GetBool('launcher-decrease-bandwidth', 1)
self.MAX_BANDWIDTH = launcherConfig.GetInt('launcher-max-bandwidth', 0)
self.nout = MultiplexStream()
Notify.ptr().setOstreamPtr(self.nout, 0)
self.nout.addFile(Filename(logfile))
if launcherConfig.GetBool('console-output', 0):
self.nout.addStandardOutput()
sys.stdout.console = True
sys.stderr.console = True
self.notify = directNotify.newCategory('Launcher')
self.clock = TrueClock.getGlobalPtr()
self.logPrefix = logPrefix
self.testServerFlag = self.getTestServerFlag()
self.notify.info('isTestServer: %s' % self.testServerFlag)
downloadServerString = launcherConfig.GetString('download-server', '')
if downloadServerString:
self.notify.info('Overriding downloadServer to %s.' % downloadServerString)
else:
downloadServerString = self.getValue('DOWNLOAD_SERVER', '')
self.notify.info('Download Server List %s' % downloadServerString)
self.downloadServerList = []
for name in downloadServerString.split(';'):
url = URLSpec(name, 1)
self.downloadServerList.append(url)
self.nextDownloadServerIndex = 0
self.getNextDownloadServer()
self.gameServer = self.getGameServer()
self.notify.info('Game Server %s' % self.gameServer)
self.downloadServerRetries = 3
self.multifileRetries = 1
self.curMultifileRetry = 0
self.downloadServerRetryPause = 1
self.bandwidthIndex = len(self.BANDWIDTH_ARRAY) - 1
self.everIncreasedBandwidth = 0
self.goUserName = ''
self.downloadPercentage = 90
self.decompressPercentage = 5
self.extractPercentage = 4
self.lastLauncherMsg = None
self.topDir = Filename.fromOsSpecific(self.getValue(self.InstallDirKey, '.'))
self.setRegistry(self.GameLogFilenameKey, logfile)
tmpVal = self.getValue(self.PatchCDKey)
if tmpVal == None:
self.fromCD = 0
else:
self.fromCD = tmpVal
self.notify.info('patch directory is ' + `(self.fromCD)`)
self.dbDir = self.topDir
self.patchDir = self.topDir
self.mfDir = self.topDir
self.contentDir = 'content/'
self.clientDbFilename = 'client.ddb'
self.compClientDbFilename = self.clientDbFilename + '.pz'
self.serverDbFilename = 'server.ddb'
self.compServerDbFilename = self.serverDbFilename + '.pz'
self.serverDbFilePath = self.contentDir + self.compServerDbFilename
self.clientStarterDbFilePath = self.contentDir + self.compClientDbFilename
self.progressFilename = 'progress'
self.overallComplete = 0
self.progressSoFar = 0
self.patchExtension = 'pch'
self.scanForHacks()
self.firstPhase = self.LauncherPhases[0]
self.finalPhase = self.LauncherPhases[-1]
self.showPhase = 3.5
self.numPhases = len(self.LauncherPhases)
self.phaseComplete = {}
self.phaseNewDownload = {}
self.phaseOverallMap = {}
tmpOverallMap = self.TmpOverallMap
tmpPhase3Map = [0.001,
0.996,
0.0,
0.0,
0.003]
phaseIdx = 0
for phase in self.LauncherPhases:
percentPhaseCompleteKey = 'PERCENT_PHASE_COMPLETE_' + `phase`
self.setRegistry(percentPhaseCompleteKey, 0)
self.phaseComplete[phase] = 0
self.phaseNewDownload[phase] = 0
self.phaseOverallMap[phase] = tmpOverallMap[phaseIdx]
phaseIdx += 1
self.patchList = []
self.reextractList = []
self.byteRate = 0
self.byteRateRequested = 0
self.resetBytesPerSecond()
self.dldb = None
self.currentMfname = None
self.currentPhaseIndex = 0
self.currentPhase = self.LauncherPhases[self.currentPhaseIndex]
self.currentPhaseName = self.Localizer.LauncherPhaseNames[self.currentPhaseIndex]
if self.getServerVersion() == 'no_version_set':
self.setPandaErrorCode(10)
self.notify.info('Aborting, Configrc did not run!')
sys.exit()
self.launcherMessage(self.Localizer.LauncherStartingMessage)
self.http = HTTPClient()
if self.http.getProxySpec() == '':
self.http.setProxySpec(self.getValue(self.ProxyServerKey, ''))
self.http.setDirectHostSpec(self.getValue(self.ProxyDirectHostsKey, ''))
self.notify.info('Proxy spec is: %s' % self.http.getProxySpec())
if self.http.getDirectHostSpec() != '':
self.notify.info('Direct hosts list is: %s' % self.http.getDirectHostSpec())
self.httpChannel = self.http.makeChannel(0)
self.httpChannel.setDownloadThrottle(1)
connOk = 0
while not connOk:
proxies = self.http.getProxiesForUrl(self.downloadServer)
if proxies == 'DIRECT':
self.notify.info('No proxy for download.')
else:
self.notify.info('Download proxy: %s' % proxies)
testurl = self.addDownloadVersion(self.launcherFileDbFilename)
connOk = self.httpChannel.getHeader(DocumentSpec(testurl))
statusCode = self.httpChannel.getStatusCode()
statusString = self.httpChannel.getStatusString()
if not connOk:
self.notify.warning('Could not contact download server at %s' % testurl.cStr())
self.notify.warning('Status code = %s %s' % (statusCode, statusString))
if statusCode == 407 or statusCode == 1407 or statusCode == HTTPChannel.SCSocksNoAcceptableLoginMethod:
self.setPandaErrorCode(3)
elif statusCode == 404:
self.setPandaErrorCode(13)
elif statusCode < 100:
self.setPandaErrorCode(4)
elif statusCode > 1000:
self.setPandaErrorCode(9)
else:
self.setPandaErrorCode(6)
if not self.getNextDownloadServer():
sys.exit()
self.notify.info('Download server: %s' % self.downloadServer.cStr())
if self.notify.getDebug():
self.accept('page_up', self.increaseBandwidth)
self.accept('page_down', self.decreaseBandwidth)
self.httpChannel.setPersistentConnection(1)
self.foreground()
self.prepareClient()
self.setBandwidth()
self.downloadLauncherFileDb()
return
def getTime(self):
return self.clock.getShortTime()
def isDummy(self):
return 0
def getNextDownloadServer(self):
if self.nextDownloadServerIndex >= len(self.downloadServerList):
self.downloadServer = None
return 0
self.downloadServer = self.downloadServerList[self.nextDownloadServerIndex]
self.notify.info('Using download server %s.' % self.downloadServer.cStr())
self.nextDownloadServerIndex += 1
return 1
def getProductName(self):
config = getConfigExpress()
productName = config.GetString('product-name', '')
if productName and productName != 'DisneyOnline-US':
productName = '_%s' % productName
else:
productName = ''
return productName
def background(self):
self.notify.info('background: Launcher now operating in background')
self.backgrounded = 1
def foreground(self):
self.notify.info('foreground: Launcher now operating in foreground')
self.backgrounded = 0
def setRegistry(self, key, value):
self.notify.info('DEPRECATED setRegistry: %s = %s' % (key, value))
def getRegistry(self, key):
self.notify.info('DEPRECATED getRegistry: %s' % key)
return None
def handleInitiateFatalError(self, errorCode):
self.notify.warning('handleInitiateFatalError: ' + errorToText(errorCode))
sys.exit()
def handleDecompressFatalError(self, task, errorCode):
self.notify.warning('handleDecompressFatalError: ' + errorToText(errorCode))
self.miniTaskMgr.remove(task)
self.handleGenericMultifileError()
def handleDecompressWriteError(self, task, errorCode):
self.notify.warning('handleDecompressWriteError: ' + errorToText(errorCode))
self.miniTaskMgr.remove(task)
self.handleGenericMultifileError()
def handleDecompressZlibError(self, task, errorCode):
self.notify.warning('handleDecompressZlibError: ' + errorToText(errorCode))
self.miniTaskMgr.remove(task)
self.handleGenericMultifileError()
def handleExtractFatalError(self, task, errorCode):
self.notify.warning('handleExtractFatalError: ' + errorToText(errorCode))
self.miniTaskMgr.remove(task)
self.handleGenericMultifileError()
def handleExtractWriteError(self, task, errorCode):
self.notify.warning('handleExtractWriteError: ' + errorToText(errorCode))
self.miniTaskMgr.remove(task)
self.handleGenericMultifileError()
def handlePatchFatalError(self, task, errorCode):
self.notify.warning('handlePatchFatalError: ' + errorToText(errorCode))
self.miniTaskMgr.remove(task)
self.handleGenericMultifileError()
def handlePatchWriteError(self, task, errorCode):
self.notify.warning('handlePatchWriteError: ' + errorToText(errorCode))
self.miniTaskMgr.remove(task)
self.handleGenericMultifileError()
def handleDownloadFatalError(self, task):
self.notify.warning('handleDownloadFatalError: status code = %s %s' % (self.httpChannel.getStatusCode(), self.httpChannel.getStatusString()))
self.miniTaskMgr.remove(task)
statusCode = self.httpChannel.getStatusCode()
if statusCode == 404:
self.setPandaErrorCode(5)
elif statusCode < 100:
self.setPandaErrorCode(4)
else:
self.setPandaErrorCode(6)
if not self.getNextDownloadServer():
sys.exit()
def handleDownloadWriteError(self, task):
self.notify.warning('handleDownloadWriteError.')
self.miniTaskMgr.remove(task)
self.setPandaErrorCode(2)
sys.exit()
def handleGenericMultifileError(self):
if not self.currentMfname:
sys.exit()
if self.curMultifileRetry < self.multifileRetries:
self.notify.info('recover attempt: %s / %s' % (self.curMultifileRetry, self.multifileRetries))
self.curMultifileRetry += 1
self.notify.info('downloadPatchDone: Recovering from error.' + ' Deleting files in: ' + self.currentMfname)
self.dldb.setClientMultifileIncomplete(self.currentMfname)
self.dldb.setClientMultifileSize(self.currentMfname, 0)
self.notify.info('downloadPatchDone: Recovering from error.' + ' redownloading: ' + self.currentMfname)
self.httpChannel.reset()
self.getMultifile(self.currentMfname)
else:
self.setPandaErrorCode(6)
self.notify.info('handleGenericMultifileError: Failed to download multifile')
sys.exit()
def foregroundSleep(self):
if not self.backgrounded:
time.sleep(self.ForegroundSleepTime)
def forceSleep(self):
if not self.backgrounded:
time.sleep(3.0)
def addDownloadVersion(self, serverFilePath):
url = URLSpec(self.downloadServer)
origPath = url.getPath()
if origPath and origPath[-1] == '/':
origPath = origPath[:-1]
if self.fromCD:
url.setPath(self.getCDDownloadPath(origPath, serverFilePath))
else:
url.setPath(self.getDownloadPath(origPath, serverFilePath))
self.notify.info('***' + url.cStr())
return url
def download(self, serverFilePath, localFilename, callback, callbackProgress):
self.launcherMessage(self.Localizer.LauncherDownloadFile % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases})
task = MiniTask(self.downloadTask)
task.downloadRam = 0
task.serverFilePath = serverFilePath
task.serverFileURL = self.addDownloadVersion(serverFilePath)
self.notify.info('Download request: %s' % task.serverFileURL.cStr())
task.callback = callback
task.callbackProgress = callbackProgress
task.lastUpdate = 0
self.resetBytesPerSecond()
task.localFilename = localFilename
self.httpChannel.beginGetDocument(DocumentSpec(task.serverFileURL))
self.httpChannel.downloadToFile(task.localFilename)
self.miniTaskMgr.add(task, 'launcher-download')
def downloadRam(self, serverFilePath, callback):
self.ramfile = Ramfile()
task = MiniTask(self.downloadTask)
task.downloadRam = 1
task.serverFilePath = serverFilePath
task.serverFileURL = self.addDownloadVersion(serverFilePath)
self.notify.info('Download request: %s' % task.serverFileURL.cStr())
task.callback = callback
task.callbackProgress = None
task.lastUpdate = 0
self.resetBytesPerSecond()
self.httpChannel.beginGetDocument(DocumentSpec(task.serverFileURL))
self.httpChannel.downloadToRam(self.ramfile)
self.miniTaskMgr.add(task, 'launcher-download')
return
def downloadTask(self, task):
self.maybeStartGame()
if self.httpChannel.run():
now = self.getTime()
if now - task.lastUpdate >= self.UserUpdateDelay:
task.lastUpdate = now
self.testBandwidth()
if task.callbackProgress:
task.callbackProgress(task)
bytesWritten = self.httpChannel.getBytesDownloaded()
totalBytes = self.httpChannel.getFileSize()
if totalBytes:
pct = int(round(bytesWritten / float(totalBytes) * 100))
self.launcherMessage(self.Localizer.LauncherDownloadFilePercent % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases,
'percent': pct})
else:
self.launcherMessage(self.Localizer.LauncherDownloadFileBytes % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases,
'bytes': bytesWritten})
self.foregroundSleep()
return task.cont
statusCode = self.httpChannel.getStatusCode()
statusString = self.httpChannel.getStatusString()
self.notify.info('HTTP status %s: %s' % (statusCode, statusString))
if self.httpChannel.isValid() and self.httpChannel.isDownloadComplete():
bytesWritten = self.httpChannel.getBytesDownloaded()
totalBytes = self.httpChannel.getFileSize()
if totalBytes:
pct = int(round(bytesWritten / float(totalBytes) * 100))
self.launcherMessage(self.Localizer.LauncherDownloadFilePercent % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases,
'percent': pct})
else:
self.launcherMessage(self.Localizer.LauncherDownloadFileBytes % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases,
'bytes': bytesWritten})
self.notify.info('downloadTask: Download done: %s' % task.serverFileURL.cStr())
task.callback()
del task.callback
return task.done
else:
if statusCode == HTTPChannel.SCDownloadOpenError or statusCode == HTTPChannel.SCDownloadWriteError:
self.handleDownloadWriteError(task)
elif statusCode == HTTPChannel.SCLostConnection:
gotBytes = self.httpChannel.getBytesDownloaded()
self.notify.info('Connection lost while downloading; got %s bytes. Reconnecting.' % gotBytes)
if task.downloadRam:
self.downloadRam(task.serverFilePath, task.callback)
else:
self.download(task.serverFilePath, task.localFilename, task.callback, None)
else:
if self.httpChannel.isValid():
self.notify.info('Unexpected situation: no error status, but %s incompletely downloaded.' % task.serverFileURL.cStr())
self.handleDownloadFatalError(task)
if task.downloadRam:
self.downloadRam(task.serverFilePath, task.callback)
else:
self.download(task.serverFilePath, task.localFilename, task.callback, None)
return task.done
return
def downloadMultifile(self, serverFilename, localFilename, mfname, callback, totalSize, currentSize, callbackProgress):
if currentSize != 0 and currentSize == totalSize:
callback()
return
self.launcherMessage(self.Localizer.LauncherDownloadFile % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases})
task = MiniTask(self.downloadMultifileTask)
mfURL = self.addDownloadVersion(serverFilename)
task.mfURL = mfURL
self.notify.info('downloadMultifile: %s ' % task.mfURL.cStr())
task.callback = callback
task.callbackProgress = callbackProgress
task.lastUpdate = 0
self.httpChannel.getHeader(DocumentSpec(task.mfURL))
if self.httpChannel.isFileSizeKnown():
task.totalSize = self.httpChannel.getFileSize()
else:
task.totalSize = totalSize
self.resetBytesPerSecond()
task.serverFilename = serverFilename
task.localFilename = localFilename
task.mfname = mfname
if currentSize != 0:
if task.totalSize == currentSize:
self.notify.info('already have full file! Skipping download.')
callback()
return
self.httpChannel.beginGetSubdocument(DocumentSpec(task.mfURL), currentSize, task.totalSize)
self.httpChannel.downloadToFile(task.localFilename, True)
else:
self.httpChannel.beginGetDocument(DocumentSpec(task.mfURL))
self.httpChannel.downloadToFile(task.localFilename)
self._addMiniTask(task, 'launcher-download-multifile')
def downloadPatchSimpleProgress(self, task):
startingByte = self.httpChannel.getFirstByteDelivered()
bytesDownloaded = self.httpChannel.getBytesDownloaded()
bytesWritten = startingByte + bytesDownloaded
totalBytes = self.httpChannel.getFileSize()
percentPatchComplete = int(round(bytesWritten / float(totalBytes) * self.downloadPercentage))
self.setPercentPhaseComplete(self.currentPhase, percentPatchComplete)
def getPercentPatchComplete(self, bytesWritten):
return int(round((self.patchDownloadSoFar + bytesWritten) / float(self.totalPatchDownload) * self.downloadPercentage))
def downloadPatchOverallProgress(self, task):
startingByte = self.httpChannel.getFirstByteDelivered()
bytesDownloaded = self.httpChannel.getBytesDownloaded()
bytesWritten = startingByte + bytesDownloaded
percentPatchComplete = self.getPercentPatchComplete(bytesWritten)
self.setPercentPhaseComplete(self.currentPhase, percentPatchComplete)
def downloadMultifileWriteToDisk(self, task):
self.maybeStartGame()
startingByte = self.httpChannel.getFirstByteDelivered()
bytesDownloaded = self.httpChannel.getBytesDownloaded()
bytesWritten = startingByte + bytesDownloaded
if self.dldb:
self.dldb.setClientMultifileSize(task.mfname, bytesWritten)
percentComplete = 0
if task.totalSize != 0:
percentComplete = int(round(bytesWritten / float(task.totalSize) * self.downloadPercentage))
self.setPercentPhaseComplete(self.currentPhase, percentComplete)
def downloadMultifileTask(self, task):
task.totalSize = self.httpChannel.getFileSize()
if self.httpChannel.run():
now = self.getTime()
if now - task.lastUpdate >= self.UserUpdateDelay:
task.lastUpdate = now
self.testBandwidth()
if task.callbackProgress:
task.callbackProgress(task)
startingByte = self.httpChannel.getFirstByteDelivered()
bytesDownloaded = self.httpChannel.getBytesDownloaded()
bytesWritten = startingByte + bytesDownloaded
percentComplete = 0
if task.totalSize != 0:
percentComplete = int(round(100.0 * bytesWritten / float(task.totalSize)))
self.launcherMessage(self.Localizer.LauncherDownloadFilePercent % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases,
'percent': percentComplete})
self.foregroundSleep()
return task.cont
statusCode = self.httpChannel.getStatusCode()
statusString = self.httpChannel.getStatusString()
self.notify.info('HTTP status %s: %s' % (statusCode, statusString))
if self.httpChannel.isValid() and self.httpChannel.isDownloadComplete():
if task.callbackProgress:
task.callbackProgress(task)
self.notify.info('done: %s' % task.mfname)
if self.dldb:
self.dldb.setClientMultifileComplete(task.mfname)
task.callback()
del task.callback
return task.done
else:
if statusCode == HTTPChannel.SCDownloadOpenError or statusCode == HTTPChannel.SCDownloadWriteError:
self.handleDownloadWriteError(task)
elif statusCode == HTTPChannel.SCLostConnection:
startingByte = self.httpChannel.getFirstByteDelivered()
bytesDownloaded = self.httpChannel.getBytesDownloaded()
bytesWritten = startingByte + bytesDownloaded
self.notify.info('Connection lost while downloading; got %s bytes. Reconnecting.' % bytesDownloaded)
self.downloadMultifile(task.serverFilename, task.localFilename, task.mfname, task.callback, task.totalSize, bytesWritten, task.callbackProgress)
elif (statusCode == 416 or statusCode == HTTPChannel.SCDownloadInvalidRange) and self.httpChannel.getFirstByteRequested() != 0:
self.notify.info('Invalid subrange; redownloading entire file.')
self.downloadMultifile(task.serverFilename, task.localFilename, task.mfname, task.callback, task.totalSize, 0, task.callbackProgress)
else:
if self.httpChannel.isValid():
self.notify.info('Unexpected situation: no error status, but %s incompletely downloaded.' % task.mfname)
self.handleDownloadFatalError(task)
self.downloadMultifile(task.serverFilename, task.localFilename, task.mfname, task.callback, task.totalSize, 0, task.callbackProgress)
return task.done
def decompressFile(self, localFilename, callback):
self.notify.info('decompress: request: ' + localFilename.cStr())
self.launcherMessage(self.Localizer.LauncherDecompressingFile % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases})
task = MiniTask(self.decompressFileTask)
task.localFilename = localFilename
task.callback = callback
task.lastUpdate = 0
task.decompressor = Decompressor()
errorCode = task.decompressor.initiate(task.localFilename)
if errorCode > 0:
self._addMiniTask(task, 'launcher-decompressFile')
else:
self.handleInitiateFatalError(errorCode)
def decompressFileTask(self, task):
errorCode = task.decompressor.run()
if errorCode == EUOk:
now = self.getTime()
if now - task.lastUpdate >= self.UserUpdateDelay:
task.lastUpdate = now
progress = task.decompressor.getProgress()
self.launcherMessage(self.Localizer.LauncherDecompressingPercent % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases,
'percent': int(round(progress * 100))})
self.foregroundSleep()
return task.cont
elif errorCode == EUSuccess:
self.launcherMessage(self.Localizer.LauncherDecompressingPercent % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases,
'percent': 100})
self.notify.info('decompressTask: Decompress done: ' + task.localFilename.cStr())
del task.decompressor
task.callback()
del task.callback
return task.done
elif errorCode == EUErrorAbort:
self.handleDecompressFatalError(task, errorCode)
return task.done
elif errorCode == EUErrorWriteOutOfFiles or errorCode == EUErrorWriteDiskFull or errorCode == EUErrorWriteDiskSectorNotFound or errorCode == EUErrorWriteOutOfMemory or errorCode == EUErrorWriteSharingViolation or errorCode == EUErrorWriteDiskFault or errorCode == EUErrorWriteDiskNotFound:
self.handleDecompressWriteError(task, errorCode)
return task.done
elif errorCode == EUErrorZlib:
self.handleDecompressZlibError(task, errorCode)
return task.done
elif errorCode > 0:
self.notify.warning('decompressMultifileTask: Unknown success return code: ' + errorToText(errorCode))
return task.cont
else:
self.notify.warning('decompressMultifileTask: Unknown return code: ' + errorToText(errorCode))
self.handleDecompressFatalError(task, errorCode)
return task.done
def decompressMultifile(self, mfname, localFilename, callback):
self.notify.info('decompressMultifile: request: ' + localFilename.cStr())
self.launcherMessage(self.Localizer.LauncherDecompressingFile % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases})
task = MiniTask(self.decompressMultifileTask)
task.mfname = mfname
task.localFilename = localFilename
task.callback = callback
task.lastUpdate = 0
task.decompressor = Decompressor()
errorCode = task.decompressor.initiate(task.localFilename)
if errorCode > 0:
self._addMiniTask(task, 'launcher-decompressMultifile')
else:
self.handleInitiateFatalError(errorCode)
def decompressMultifileTask(self, task):
errorCode = task.decompressor.run()
if errorCode == EUOk:
now = self.getTime()
if now - task.lastUpdate >= self.UserUpdateDelay:
task.lastUpdate = now
progress = task.decompressor.getProgress()
self.launcherMessage(self.Localizer.LauncherDecompressingPercent % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases,
'percent': int(round(progress * 100))})
percentProgress = int(round(progress * self.decompressPercentage))
totalPercent = self.downloadPercentage + percentProgress
self.setPercentPhaseComplete(self.currentPhase, totalPercent)
self.foregroundSleep()
return task.cont
elif errorCode == EUSuccess:
self.launcherMessage(self.Localizer.LauncherDecompressingPercent % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases,
'percent': 100})
totalPercent = self.downloadPercentage + self.decompressPercentage
self.setPercentPhaseComplete(self.currentPhase, totalPercent)
self.notify.info('decompressMultifileTask: Decompress multifile done: ' + task.localFilename.cStr())
self.dldb.setClientMultifileDecompressed(task.mfname)
del task.decompressor
task.callback()
del task.callback
return task.done
elif errorCode == EUErrorAbort:
self.handleDecompressFatalError(task, errorCode)
return task.done
elif errorCode == EUErrorWriteOutOfFiles or errorCode == EUErrorWriteDiskFull or errorCode == EUErrorWriteDiskSectorNotFound or errorCode == EUErrorWriteOutOfMemory or errorCode == EUErrorWriteSharingViolation or errorCode == EUErrorWriteDiskFault or errorCode == EUErrorWriteDiskNotFound:
self.handleDecompressWriteError(task, errorCode)
return task.done
elif errorCode == EUErrorZlib:
self.handleDecompressZlibError(task, errorCode)
return task.done
elif errorCode > 0:
self.notify.warning('decompressMultifileTask: Unknown success return code: ' + errorToText(errorCode))
return task.cont
else:
self.notify.warning('decompressMultifileTask: Unknown return code: ' + errorToText(errorCode))
self.handleDecompressFatalError(task, errorCode)
return task.done
def extract(self, mfname, localFilename, destDir, callback):
self.notify.info('extract: request: ' + localFilename.cStr() + ' destDir: ' + destDir.cStr())
self.launcherMessage(self.Localizer.LauncherExtractingFile % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases})
task = MiniTask(self.extractTask)
task.mfname = mfname
task.localFilename = localFilename
task.destDir = destDir
task.callback = callback
task.lastUpdate = 0
task.extractor = Extractor()
task.extractor.setExtractDir(task.destDir)
if not task.extractor.setMultifile(task.localFilename):
self.setPandaErrorCode(6)
self.notify.info('extract: Unable to open multifile %s' % task.localFilename.cStr())
sys.exit()
numFiles = self.dldb.getServerNumFiles(mfname)
for i in xrange(numFiles):
subfile = self.dldb.getServerFileName(mfname, i)
if not task.extractor.requestSubfile(Filename(subfile)):
self.setPandaErrorCode(6)
self.notify.info('extract: Unable to find subfile %s in multifile %s' % (subfile, mfname))
sys.exit()
self.notify.info('Extracting %d subfiles from multifile %s.' % (numFiles, mfname))
self._addMiniTask(task, 'launcher-extract')
def extractTask(self, task):
errorCode = task.extractor.step()
if errorCode == EUOk:
now = self.getTime()
if now - task.lastUpdate >= self.UserUpdateDelay:
task.lastUpdate = now
progress = task.extractor.getProgress()
self.launcherMessage(self.Localizer.LauncherExtractingPercent % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases,
'percent': int(round(progress * 100.0))})
percentProgress = int(round(progress * self.extractPercentage))
totalPercent = self.downloadPercentage + self.decompressPercentage + percentProgress
self.setPercentPhaseComplete(self.currentPhase, totalPercent)
self.foregroundSleep()
return task.cont
elif errorCode == EUSuccess:
self.launcherMessage(self.Localizer.LauncherExtractingPercent % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases,
'percent': 100})
totalPercent = self.downloadPercentage + self.decompressPercentage + self.extractPercentage
self.setPercentPhaseComplete(self.currentPhase, totalPercent)
self.notify.info('extractTask: Extract multifile done: ' + task.localFilename.cStr())
self.dldb.setClientMultifileExtracted(task.mfname)
del task.extractor
task.callback()
del task.callback
return task.done
elif errorCode == EUErrorAbort:
self.handleExtractFatalError(task, errorCode)
return task.done
elif errorCode == EUErrorFileEmpty:
self.handleExtractFatalError(task, errorCode)
return task.done
elif errorCode == EUErrorWriteOutOfFiles or errorCode == EUErrorWriteDiskFull or errorCode == EUErrorWriteDiskSectorNotFound or errorCode == EUErrorWriteOutOfMemory or errorCode == EUErrorWriteSharingViolation or errorCode == EUErrorWriteDiskFault or errorCode == EUErrorWriteDiskNotFound:
self.handleExtractWriteError(task, errorCode)
return task.done
elif errorCode > 0:
self.notify.warning('extractTask: Unknown success return code: ' + errorToText(errorCode))
return task.cont
else:
self.notify.warning('extractTask: Unknown error return code: ' + errorToText(errorCode))
self.handleExtractFatalError(task, errorCode)
return task.done
def patch(self, patchFile, patcheeFile, callback):
self.notify.info('patch: request: ' + patchFile.cStr() + ' patchee: ' + patcheeFile.cStr())
self.launcherMessage(self.Localizer.LauncherPatchingFile % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases})
task = MiniTask(self.patchTask)
task.patchFile = patchFile
task.patcheeFile = patcheeFile
task.callback = callback
task.lastUpdate = 0
task.patcher = Patcher()
errorCode = task.patcher.initiate(task.patchFile, task.patcheeFile)
if errorCode > 0:
self._addMiniTask(task, 'launcher-patch')
else:
self.handleInitiateFatalError(errorCode)
def patchTask(self, task):
errorCode = task.patcher.run()
if errorCode == EUOk:
now = self.getTime()
if now - task.lastUpdate >= self.UserUpdateDelay:
task.lastUpdate = now
progress = task.patcher.getProgress()
self.launcherMessage(self.Localizer.LauncherPatchingPercent % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases,
'percent': int(round(progress * 100.0))})
self.foregroundSleep()
return task.cont
elif errorCode == EUSuccess:
self.launcherMessage(self.Localizer.LauncherPatchingPercent % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases,
'percent': 100})
self.notify.info('patchTask: Patch done: ' + task.patcheeFile.cStr())
del task.patcher
task.callback()
del task.callback
return task.done
elif errorCode == EUErrorAbort:
self.handlePatchFatalError(task, errorCode)
return task.done
elif errorCode == EUErrorFileEmpty:
self.handlePatchFatalError(task, errorCode)
return task.done
elif errorCode == EUErrorWriteOutOfFiles or errorCode == EUErrorWriteDiskFull or errorCode == EUErrorWriteDiskSectorNotFound or errorCode == EUErrorWriteOutOfMemory or errorCode == EUErrorWriteSharingViolation or errorCode == EUErrorWriteDiskFault or errorCode == EUErrorWriteDiskNotFound:
self.handlePatchWriteError(task, errorCode)
return task.done
elif errorCode > 0:
self.notify.warning('patchTask: Unknown success return code: ' + errorToText(errorCode))
return task.cont
else:
self.notify.warning('patchTask: Unknown error return code: ' + errorToText(errorCode))
self.handlePatchFatalError(task, errorCode)
return task.done
def getProgressSum(self, phase):
sum = 0
for i in xrange(0, len(self.linesInProgress)):
if self.linesInProgress[i].find(phase) > -1:
nameSizeTuple = self.linesInProgress[i].split()
numSize = nameSizeTuple[1].split('L')
sum += numSize[0].atoi()
return sum
def readProgressFile(self):
localFilename = Filename(self.dbDir, Filename(self.progressFilename))
if not localFilename.exists():
self.notify.warning('File does not exist: %s' % localFilename.cStr())
self.linesInProgress = []
else:
f = open(localFilename.toOsSpecific())
self.linesInProgress = f.readlines()
f.close()
localFilename.unlink()
self.progressSum = 0
token = 'phase_'
self.progressSum = self.getProgressSum(token)
self.progressSum -= self.getProgressSum(token + '2')
self.notify.info('total phases to be downloaded = ' + `(self.progressSum)`)
self.checkClientDbExists()
def prepareClient(self):
self.notify.info('prepareClient: Preparing client for install')
if not self.topDir.exists():
self.notify.info('prepareClient: Creating top directory: ' + self.topDir.cStr())
os.makedirs(self.topDir.toOsSpecific())
if not self.dbDir.exists():
self.notify.info('prepareClient: Creating db directory: ' + self.dbDir.cStr())
os.makedirs(self.dbDir.toOsSpecific())
if not self.patchDir.exists():
self.notify.info('prepareClient: Creating patch directory: ' + self.patchDir.cStr())
os.makedirs(self.patchDir.toOsSpecific())
if not self.mfDir.exists():
self.notify.info('prepareClient: Creating mf directory: ' + self.mfDir.cStr())
os.makedirs(self.mfDir.toOsSpecific())
def downloadLauncherFileDb(self):
self.notify.info('Downloading launcherFileDb')
self.downloadRam(self.launcherFileDbFilename, self.downloadLauncherFileDbDone)
def downloadLauncherFileDbDone(self):
self.launcherFileDbHash = HashVal()
self.launcherFileDbHash.hashRamfile(self.ramfile)
if self.VerifyFiles:
self.notify.info('Validating Launcher files')
for fileDesc in self.ramfile.readlines():
try:
filename, hashStr = fileDesc.split(' ', 1)
except:
self.notify.info('Invalid line: "%s"' % fileDesc)
self.failLauncherFileDb('No hash in launcherFileDb')
serverHash = HashVal()
if not self.hashIsValid(serverHash, hashStr):
self.notify.info('Not a valid hash string: "%s"' % hashStr)
self.failLauncherFileDb('Invalid hash in launcherFileDb')
localHash = HashVal()
localFilename = Filename(self.topDir, Filename(filename))
localHash.hashFile(localFilename)
if localHash != serverHash:
self.failLauncherFileDb('%s does not match expected version.' % filename)
self.downloadServerDbFile()
def failLauncherFileDb(self, string):
self.notify.info(string)
self.setPandaErrorCode(15)
sys.exit()
def downloadServerDbFile(self):
self.notify.info('Downloading server db file')
self.launcherMessage(self.Localizer.LauncherDownloadServerFileList)
self.downloadRam(self.serverDbFilePath, self.downloadServerDbFileDone)
def downloadServerDbFileDone(self):
self.serverDbFileHash = HashVal()
self.serverDbFileHash.hashRamfile(self.ramfile)
self.readProgressFile()
def checkClientDbExists(self):
clientFilename = Filename(self.dbDir, Filename(self.clientDbFilename))
if clientFilename.exists():
self.notify.info('Client Db exists')
self.createDownloadDb()
else:
self.notify.info('Client Db does not exist')
self.downloadClientDbStarterFile()
def downloadClientDbStarterFile(self):
self.notify.info('Downloading Client Db starter file')
localFilename = Filename(self.dbDir, Filename(self.compClientDbFilename))
self.download(self.clientStarterDbFilePath, localFilename, self.downloadClientDbStarterFileDone, None)
return
def downloadClientDbStarterFileDone(self):
localFilename = Filename(self.dbDir, Filename(self.compClientDbFilename))
decompressor = Decompressor()
decompressor.decompress(localFilename)
self.createDownloadDb()
def createDownloadDb(self):
self.notify.info('Creating downloadDb')
self.launcherMessage(self.Localizer.LauncherCreatingDownloadDb)
clientFilename = Filename(self.dbDir, Filename(self.clientDbFilename))
self.notify.info('Client file name: ' + clientFilename.cStr())
self.launcherMessage(self.Localizer.LauncherDownloadClientFileList)
serverFile = self.ramfile
decompressor = Decompressor()
decompressor.decompress(serverFile)
self.notify.info('Finished decompress')
self.dldb = DownloadDb(serverFile, clientFilename)
self.notify.info('created download db')
self.launcherMessage(self.Localizer.LauncherFinishedDownloadDb)
self.currentPhase = self.LauncherPhases[0]
self.currentPhaseIndex = 1
self.currentPhaseName = self.Localizer.LauncherPhaseNames[self.currentPhase]
self.updatePhase(self.currentPhase)
def maybeStartGame(self):
if not self.started and self.currentPhase >= self.showPhase:
self.started = True
self.notify.info('maybeStartGame: starting game')
self.launcherMessage(self.Localizer.LauncherStartingGame)
self.background()
__builtin__.launcher = self
self.startGame()
def _runTaskManager(self):
if not self.taskMgrStarted:
self.miniTaskMgr.run()
self.notify.info('Switching task managers.')
taskMgr.run()
def _stepMiniTaskManager(self, task):
self.miniTaskMgr.step()
if self.miniTaskMgr.taskList:
return task.cont
self.notify.info('Stopping mini task manager.')
self.miniTaskMgr = None
return task.done
def _addMiniTask(self, task, name):
if not self.miniTaskMgr:
self.notify.info('Restarting mini task manager.')
self.miniTaskMgr = MiniTaskManager()
from direct.task.TaskManagerGlobal import taskMgr
taskMgr.remove('miniTaskManager')
taskMgr.add(self._stepMiniTaskManager, 'miniTaskManager')
self.miniTaskMgr.add(task, name)
def newTaskManager(self):
self.taskMgrStarted = True
if self.miniTaskMgr.running:
self.miniTaskMgr.stop()
from direct.task.TaskManagerGlobal import taskMgr
taskMgr.remove('miniTaskManager')
taskMgr.add(self._stepMiniTaskManager, 'miniTaskManager')
def mainLoop(self):
try:
self._runTaskManager()
except SystemExit:
if hasattr(__builtin__, 'base'):
base.destroy()
self.notify.info('Normal exit.')
raise
except:
self.setPandaErrorCode(12)
self.notify.warning('Handling Python exception.')
if hasattr(__builtin__, 'base') and getattr(base, 'cr', None):
if base.cr.timeManager:
from otp.otpbase import OTPGlobals
base.cr.timeManager.setDisconnectReason(OTPGlobals.DisconnectPythonError)
base.cr.timeManager.setExceptionInfo()
base.cr.sendDisconnect()
if hasattr(__builtin__, 'base'):
base.destroy()
self.notify.info('Exception exit.\n')
import traceback
traceback.print_exc()
sys.exit()
return
def updatePhase(self, phase):
self.notify.info('Updating multifiles in phase: ' + `phase`)
self.setPercentPhaseComplete(self.currentPhase, 0)
self.phaseMultifileNames = []
numfiles = self.dldb.getServerNumMultifiles()
for i in xrange(self.dldb.getServerNumMultifiles()):
mfname = self.dldb.getServerMultifileName(i)
if self.dldb.getServerMultifilePhase(mfname) == phase:
self.phaseMultifileNames.append(mfname)
self.updateNextMultifile()
def updateNextMultifile(self):
if len(self.phaseMultifileNames) > 0:
self.currentMfname = self.phaseMultifileNames.pop()
self.curMultifileRetry = 0
self.getMultifile(self.currentMfname)
else:
if self.currentMfname is None:
self.notify.warning('no multifile found! See below for debug info:')
for i in xrange(self.dldb.getServerNumMultifiles()):
mfname = self.dldb.getServerMultifileName(i)
phase = self.dldb.getServerMultifilePhase(mfname)
print i, mfname, phase
self.handleGenericMultifileError()
decompressedMfname = os.path.splitext(self.currentMfname)[0]
localFilename = Filename(self.mfDir, Filename(decompressedMfname))
nextIndex = self.LauncherPhases.index(self.currentPhase) + 1
if nextIndex < len(self.LauncherPhases):
self.MakeNTFSFilesGlobalWriteable(localFilename)
else:
self.MakeNTFSFilesGlobalWriteable()
vfs = VirtualFileSystem.getGlobalPtr()
vfs.mount(localFilename, '.', VirtualFileSystem.MFReadOnly)
self.setPercentPhaseComplete(self.currentPhase, 100)
self.notify.info('Done updating multifiles in phase: ' + `(self.currentPhase)`)
self.progressSoFar += int(round(self.phaseOverallMap[self.currentPhase] * 100))
self.notify.info('progress so far ' + `(self.progressSoFar)`)
messenger.send('phaseComplete-' + `(self.currentPhase)`)
if nextIndex < len(self.LauncherPhases):
self.currentPhase = self.LauncherPhases[nextIndex]
self.currentPhaseIndex = nextIndex + 1
self.currentPhaseName = self.Localizer.LauncherPhaseNames[self.currentPhase]
self.updatePhase(self.currentPhase)
else:
self.notify.info('ALL PHASES COMPLETE')
self.maybeStartGame()
messenger.send('launcherAllPhasesComplete')
self.cleanup()
return
def isDownloadComplete(self):
return self._downloadComplete
def updateMultifileDone(self):
self.updateNextMultifile()
def downloadMultifileDone(self):
self.getDecompressMultifile(self.currentMfname)
def getMultifile(self, mfname):
self.notify.info('Downloading multifile: ' + mfname)
if not self.dldb.clientMultifileExists(mfname):
self.maybeStartGame()
self.notify.info('Multifile does not exist in client db,' + 'creating new record: ' + mfname)
self.dldb.addClientMultifile(mfname)
curHash = self.dldb.getServerMultifileHash(mfname)
self.dldb.setClientMultifileHash(mfname, curHash)
localFilename = Filename(self.mfDir, Filename(mfname))
if localFilename.exists():
curSize = localFilename.getFileSize()
self.dldb.setClientMultifileSize(mfname, curSize)
if curSize == self.dldb.getServerMultifileSize(mfname):
self.dldb.setClientMultifileComplete(mfname)
decompressedMfname = os.path.splitext(mfname)[0]
decompressedFilename = Filename(self.mfDir, Filename(decompressedMfname))
if (not self.dldb.clientMultifileComplete(mfname) or not self.dldb.clientMultifileDecompressed(mfname)) and decompressedFilename.exists():
clientMd5 = HashVal()
clientMd5.hashFile(decompressedFilename)
clientVer = self.dldb.getVersion(Filename(decompressedMfname), clientMd5)
if clientVer != -1:
self.notify.info('Decompressed multifile is already on disk and correct: %s (version %s)' % (mfname, clientVer))
self.dldb.setClientMultifileComplete(mfname)
self.dldb.setClientMultifileDecompressed(mfname)
compressedFilename = Filename(self.mfDir, Filename(mfname))
compressedFilename.unlink()
extractedOk = True
numFiles = self.dldb.getServerNumFiles(mfname)
for i in xrange(numFiles):
subfile = self.dldb.getServerFileName(mfname, i)
fn = Filename(self.mfDir, Filename(subfile))
if fn.compareTimestamps(decompressedFilename) <= 0:
extractedOk = False
break
if extractedOk:
self.notify.info('Multifile appears to have been extracted already.')
self.dldb.setClientMultifileExtracted(mfname)
if not self.dldb.clientMultifileComplete(mfname) or not decompressedFilename.exists():
self.maybeStartGame()
currentSize = self.dldb.getClientMultifileSize(mfname)
totalSize = self.dldb.getServerMultifileSize(mfname)
localFilename = Filename(self.mfDir, Filename(mfname))
if not localFilename.exists():
currentSize = 0
else:
currentSize = min(currentSize, localFilename.getFileSize())
if currentSize == 0:
self.notify.info('Multifile has not been started, ' + 'downloading new file: ' + mfname)
curHash = self.dldb.getServerMultifileHash(mfname)
self.dldb.setClientMultifileHash(mfname, curHash)
self.phaseNewDownload[self.currentPhase] = 1
self.downloadMultifile(self.contentDir + mfname, localFilename, mfname, self.downloadMultifileDone, totalSize, 0, self.downloadMultifileWriteToDisk)
else:
clientHash = self.dldb.getClientMultifileHash(mfname)
serverHash = self.dldb.getServerMultifileHash(mfname)
if clientHash.eq(serverHash):
self.notify.info('Multifile is not complete, finishing download for %s, size = %s / %s' % (mfname, currentSize, totalSize))
self.downloadMultifile(self.contentDir + mfname, localFilename, mfname, self.downloadMultifileDone, totalSize, currentSize, self.downloadMultifileWriteToDisk)
elif self.curMultifileRetry < self.multifileRetries:
self.notify.info('recover attempt: %s / %s' % (self.curMultifileRetry, self.multifileRetries))
self.curMultifileRetry += 1
self.notify.info('Multifile is not complete, and is out of date. ' + 'Restarting download with newest multifile')
self.dldb.setClientMultifileIncomplete(self.currentMfname)
self.dldb.setClientMultifileSize(self.currentMfname, 0)
self.dldb.setClientMultifileHash(self.currentMfname, serverHash)
self.getMultifile(self.currentMfname)
else:
self.setPandaErrorCode(6)
self.notify.info('getMultifile: Failed to download multifile')
sys.exit()
else:
self.notify.info('Multifile already complete: ' + mfname)
self.downloadMultifileDone()
def updateMultifileDone(self):
self.updateNextMultifile()
def downloadMultifileDone(self):
self.getDecompressMultifile(self.currentMfname)
def getMultifile(self, mfname):
self.notify.info('Downloading multifile: ' + mfname)
if not self.dldb.clientMultifileExists(mfname):
self.maybeStartGame()
self.notify.info('Multifile does not exist in client db,' + 'creating new record: ' + mfname)
self.dldb.addClientMultifile(mfname)
if self.DecompressMultifiles:
curHash = self.dldb.getServerMultifileHash(mfname)
self.dldb.setClientMultifileHash(mfname, curHash)
localFilename = Filename(self.mfDir, Filename(mfname))
if localFilename.exists():
curSize = localFilename.getFileSize()
self.dldb.setClientMultifileSize(mfname, curSize)
if curSize == self.dldb.getServerMultifileSize(mfname):
self.dldb.setClientMultifileComplete(mfname)
decompressedMfname = os.path.splitext(mfname)[0]
decompressedFilename = Filename(self.mfDir, Filename(decompressedMfname))
if self.DecompressMultifiles:
if (not self.dldb.clientMultifileComplete(mfname) or not self.dldb.clientMultifileDecompressed(mfname)) and decompressedFilename.exists():
clientMd5 = HashVal()
clientMd5.hashFile(decompressedFilename)
clientVer = self.dldb.getVersion(Filename(decompressedMfname), clientMd5)
if clientVer != -1:
self.notify.info('Decompressed multifile is already on disk and correct: %s (version %s)' % (mfname, clientVer))
self.dldb.setClientMultifileComplete(mfname)
self.dldb.setClientMultifileDecompressed(mfname)
compressedFilename = Filename(self.mfDir, Filename(mfname))
compressedFilename.unlink()
extractedOk = True
numFiles = self.dldb.getServerNumFiles(mfname)
for i in xrange(numFiles):
subfile = self.dldb.getServerFileName(mfname, i)
fn = Filename(self.mfDir, Filename(subfile))
if fn.compareTimestamps(decompressedFilename) <= 0:
extractedOk = False
break
if extractedOk:
self.notify.info('Multifile appears to have been extracted already.')
self.dldb.setClientMultifileExtracted(mfname)
if not self.dldb.clientMultifileComplete(mfname) or not decompressedFilename.exists():
self.maybeStartGame()
currentSize = self.dldb.getClientMultifileSize(mfname)
totalSize = self.dldb.getServerMultifileSize(mfname)
localFilename = Filename(self.mfDir, Filename(mfname))
if not localFilename.exists():
currentSize = 0
if currentSize == 0:
self.notify.info('Multifile has not been started, ' + 'downloading new file: ' + mfname)
curHash = self.dldb.getServerMultifileHash(mfname)
self.dldb.setClientMultifileHash(mfname, curHash)
self.phaseNewDownload[self.currentPhase] = 1
self.downloadMultifile(self.contentDir + mfname, localFilename, mfname, self.downloadMultifileDone, totalSize, 0, self.downloadMultifileWriteToDisk)
else:
clientHash = self.dldb.getClientMultifileHash(mfname)
serverHash = self.dldb.getServerMultifileHash(mfname)
if clientHash.eq(serverHash):
self.notify.info('Multifile is not complete, finishing download for %s, size = %s / %s' % (mfname, currentSize, totalSize))
self.downloadMultifile(self.contentDir + mfname, localFilename, mfname, self.downloadMultifileDone, totalSize, currentSize, self.downloadMultifileWriteToDisk)
elif self.curMultifileRetry < self.multifileRetries:
self.notify.info('recover attempt: %s / %s' % (self.curMultifileRetry, self.multifileRetries))
self.curMultifileRetry += 1
self.notify.info('Multifile is not complete, and is out of date. ' + 'Restarting download with newest multifile')
self.dldb.setClientMultifileIncomplete(self.currentMfname)
self.dldb.setClientMultifileSize(self.currentMfname, 0)
if self.DecompressMultifiles:
self.dldb.setClientMultifileHash(self.currentMfname, serverHash)
self.getMultifile(self.currentMfname)
else:
self.setPandaErrorCode(6)
self.notify.info('getMultifile: Failed to download multifile')
sys.exit()
else:
self.notify.info('Multifile already complete: ' + mfname)
self.downloadMultifileDone()
def getDecompressMultifile(self, mfname):
if not self.DecompressMultifiles:
self.decompressMultifileDone()
elif not self.dldb.clientMultifileDecompressed(mfname):
self.maybeStartGame()
self.notify.info('decompressMultifile: Decompressing multifile: ' + mfname)
localFilename = Filename(self.mfDir, Filename(mfname))
self.decompressMultifile(mfname, localFilename, self.decompressMultifileDone)
else:
self.notify.info('decompressMultifile: Multifile already decompressed: ' + mfname)
self.decompressMultifileDone()
def decompressMultifileDone(self):
if self.phaseNewDownload[self.currentPhase]:
self.setPercentPhaseComplete(self.currentPhase, 95)
self.extractMultifile(self.currentMfname)
def extractMultifile(self, mfname):
if not self.dldb.clientMultifileExtracted(mfname):
self.maybeStartGame()
self.notify.info('extractMultifile: Extracting multifile: ' + mfname)
decompressedMfname = os.path.splitext(mfname)[0]
localFilename = Filename(self.mfDir, Filename(decompressedMfname))
destDir = Filename(self.topDir)
self.notify.info('extractMultifile: Extracting: ' + localFilename.cStr() + ' to: ' + destDir.cStr())
self.extract(mfname, localFilename, destDir, self.extractMultifileDone)
else:
self.notify.info('extractMultifile: Multifile already extracted: ' + mfname)
self.extractMultifileDone()
def extractMultifileDone(self):
if self.phaseNewDownload[self.currentPhase]:
self.setPercentPhaseComplete(self.currentPhase, 99)
self.notify.info('extractMultifileDone: Finished updating multifile: ' + self.currentMfname)
self.patchMultifile()
def getPatchFilename(self, fname, currentVersion):
return fname + '.v' + `currentVersion` + '.' + self.patchExtension
def downloadPatches(self):
if len(self.patchList) > 0:
self.currentPatch, self.currentPatchee, self.currentPatchVersion = self.patchList.pop()
self.notify.info(self.contentDir)
self.notify.info(self.currentPatch)
patchFile = self.currentPatch + '.pz'
serverPatchFilePath = self.contentDir + patchFile
self.notify.info(serverPatchFilePath)
localPatchFilename = Filename(self.patchDir, Filename(patchFile))
if self.currentPhase > 3:
self.download(serverPatchFilePath, localPatchFilename, self.downloadPatchDone, self.downloadPatchSimpleProgress)
else:
self.download(serverPatchFilePath, localPatchFilename, self.downloadPatchDone, self.downloadPatchOverallProgress)
else:
self.notify.info('applyNextPatch: Done patching multifile: ' + `(self.currentPhase)`)
self.patchDone()
def downloadPatchDone(self):
self.patchDownloadSoFar += self.httpChannel.getBytesDownloaded()
self.notify.info('downloadPatchDone: Decompressing patch file: ' + self.currentPatch + '.pz')
self.decompressFile(Filename(self.patchDir, Filename(self.currentPatch + '.pz')), self.decompressPatchDone)
def decompressPatchDone(self):
self.notify.info('decompressPatchDone: Patching file: ' + self.currentPatchee + ' from ver: ' + `(self.currentPatchVersion)`)
patchFile = Filename(self.patchDir, Filename(self.currentPatch))
patchFile.setBinary()
patchee = Filename(self.mfDir, Filename(self.currentPatchee))
patchee.setBinary()
self.patch(patchFile, patchee, self.downloadPatches)
def patchDone(self):
self.notify.info('patchDone: Patch successful')
del self.currentPatch
del self.currentPatchee
del self.currentPatchVersion
decompressedMfname = os.path.splitext(self.currentMfname)[0]
localFilename = Filename(self.mfDir, Filename(decompressedMfname))
destDir = Filename(self.topDir)
self.extract(self.currentMfname, localFilename, destDir, self.updateMultifileDone)
def startReextractingFiles(self):
self.notify.info('startReextractingFiles: Reextracting ' + `(len(self.reextractList))` + ' files for multifile: ' + self.currentMfname)
self.launcherMessage(self.Localizer.LauncherRecoverFiles)
self.currentMfile = Multifile()
decompressedMfname = os.path.splitext(self.currentMfname)[0]
self.currentMfile.openRead(Filename(self.mfDir, Filename(decompressedMfname)))
self.reextractNextFile()
def reextractNextFile(self):
failure = 0
while not failure and len(self.reextractList) > 0:
currentReextractFile = self.reextractList.pop()
subfileIndex = self.currentMfile.findSubfile(currentReextractFile)
if subfileIndex >= 0:
destFilename = Filename(self.topDir, Filename(currentReextractFile))
result = self.currentMfile.extractSubfile(subfileIndex, destFilename)
if not result:
self.notify.warning('reextractNextFile: Failure on reextract.')
failure = 1
else:
self.notify.warning('reextractNextFile: File not found in multifile: ' + `currentReextractFile`)
failure = 1
if failure:
sys.exit()
self.notify.info('reextractNextFile: Done reextracting files for multifile: ' + `(self.currentPhase)`)
del self.currentMfile
self.updateMultifileDone()
def patchMultifile(self):
self.launcherMessage(self.Localizer.LauncherCheckUpdates % {'name': self.currentPhaseName,
'current': self.currentPhaseIndex,
'total': self.numPhases})
self.notify.info('patchMultifile: Checking for patches on multifile: ' + self.currentMfname)
self.patchList = []
clientMd5 = HashVal()
decompressedMfname = os.path.splitext(self.currentMfname)[0]
localFilename = Filename(self.mfDir, Filename(decompressedMfname))
clientMd5.hashFile(localFilename)
clientVer = self.dldb.getVersion(Filename(decompressedMfname), clientMd5)
if clientVer == 1:
self.patchAndHash()
return
elif clientVer == -1:
self.notify.info('patchMultifile: Invalid hash for file: ' + self.currentMfname)
self.maybeStartGame()
if self.curMultifileRetry < self.multifileRetries:
self.notify.info('recover attempt: %s / %s' % (self.curMultifileRetry, self.multifileRetries))
self.curMultifileRetry += 1
self.notify.info('patchMultifile: Restarting download with newest multifile')
self.dldb.setClientMultifileIncomplete(self.currentMfname)
self.dldb.setClientMultifileSize(self.currentMfname, 0)
self.getMultifile(self.currentMfname)
else:
self.setPandaErrorCode(6)
self.notify.info('patchMultifile: Failed to download multifile')
sys.exit()
return
elif clientVer > 1:
self.notify.info('patchMultifile: Old version for multifile: ' + self.currentMfname + ' Client ver: ' + `clientVer`)
self.maybeStartGame()
self.totalPatchDownload = 0
self.patchDownloadSoFar = 0
for ver in xrange(1, clientVer):
patch = self.getPatchFilename(decompressedMfname, ver + 1)
patchee = decompressedMfname
patchVersion = ver + 1
self.patchList.append((patch, patchee, patchVersion))
if self.currentPhase == 3:
self.totalPatchDownload += self.getProgressSum(patch)
self.notify.info('total patch to be downloaded = ' + `(self.totalPatchDownload)`)
self.downloadPatches()
return
def patchAndHash(self):
self.reextractList = []
self.PAHClean = 1
self.PAHNumFiles = self.dldb.getServerNumFiles(self.currentMfname)
self.PAHFileCounter = 0
if self.PAHNumFiles > 0:
task = MiniTask(self.patchAndHashTask)
task.cleanCallback = self.updateMultifileDone
task.uncleanCallback = self.startReextractingFiles
self._addMiniTask(task, 'patchAndHash')
else:
self.updateMultifileDone()
def patchAndHashTask(self, task):
self.launcherMessage(self.Localizer.LauncherVerifyPhase)
if self.PAHFileCounter == self.PAHNumFiles:
if self.PAHClean:
task.cleanCallback()
else:
task.uncleanCallback()
return task.done
else:
i = self.PAHFileCounter
self.PAHFileCounter += 1
fname = self.dldb.getServerFileName(self.currentMfname, i)
fnameFilename = Filename(self.topDir, Filename(fname))
if not os.path.exists(fnameFilename.toOsSpecific()):
self.notify.info('patchAndHash: File not found: ' + fname)
self.reextractList.append(fname)
self.PAHClean = 0
return task.cont
if self.VerifyFiles and self.dldb.hasVersion(Filename(fname)):
clientMd5 = HashVal()
clientMd5.hashFile(fnameFilename)
clientVer = self.dldb.getVersion(Filename(fname), clientMd5)
if clientVer == 1:
return task.cont
else:
self.notify.info('patchAndHash: Invalid hash for file: ' + fname)
self.reextractList.append(fname)
self.PAHClean = 0
return task.cont
def launcherMessage(self, msg):
if msg != self.lastLauncherMsg:
self.lastLauncherMsg = msg
self.notify.info(msg)
def isTestServer(self):
return self.testServerFlag
def recordPeriodTimeRemaining(self, secondsRemaining):
self.setValue(self.PeriodTimeRemainingKey, int(secondsRemaining))
def recordPeriodName(self, periodName):
self.setValue(self.PeriodNameKey, periodName)
def recordSwid(self, swid):
self.setValue(self.SwidKey, swid)
def getGoUserName(self):
return self.goUserName
def setGoUserName(self, userName):
self.goUserName = userName
def getInstallDir(self):
return self.topDir.cStr()
def setPandaWindowOpen(self):
self.setValue(self.PandaWindowOpenKey, 1)
def setPandaErrorCode(self, code):
self.notify.info('setting panda error code to %s' % code)
self.pandaErrorCode = code
errorLog = open(self.errorfile, 'w')
errorLog.write(str(code) + '\n')
errorLog.flush()
errorLog.close()
def getPandaErrorCode(self):
return self.pandaErrorCode
def setDisconnectDetailsNormal(self):
self.notify.info('Setting Disconnect Details normal')
self.disconnectCode = 0
self.disconnectMsg = 'normal'
def setDisconnectDetails(self, newCode, newMsg):
self.notify.info('New Disconnect Details: %s - %s ' % (newCode, newMsg))
self.disconnectCode = newCode
self.disconnectMsg = newMsg
def setServerVersion(self, version):
self.ServerVersion = version
def getServerVersion(self):
return self.ServerVersion
def getIsNewInstallation(self):
result = self.getValue(self.NewInstallationKey, 1)
result = base.config.GetBool('new-installation', result)
return result
def setIsNotNewInstallation(self):
self.setValue(self.NewInstallationKey, 0)
def getLastLogin(self):
return self.getValue(self.LastLoginKey, '')
def setLastLogin(self, login):
self.setValue(self.LastLoginKey, login)
def setUserLoggedIn(self):
self.setValue(self.UserLoggedInKey, '1')
def setPaidUserLoggedIn(self):
self.setValue(self.PaidUserLoggedInKey, '1')
def getReferrerCode(self):
return self.getValue(self.ReferrerKey, None)
def getPhaseComplete(self, phase):
percentDone = self.phaseComplete[phase]
return percentDone == 100
def setPercentPhaseComplete(self, phase, percent):
self.notify.info('phase updating %s, %s' % (phase, percent))
oldPercent = self.phaseComplete[phase]
if oldPercent != percent:
self.phaseComplete[phase] = percent
messenger.send('launcherPercentPhaseComplete', [phase,
percent,
self.getBandwidth(),
self.byteRate])
percentPhaseCompleteKey = 'PERCENT_PHASE_COMPLETE_' + `phase`
self.setRegistry(percentPhaseCompleteKey, percent)
self.overallComplete = int(round(percent * self.phaseOverallMap[phase])) + self.progressSoFar
self.setRegistry('PERCENT_OVERALL_COMPLETE', self.overallComplete)
def getPercentPhaseComplete(self, phase):
return self.phaseComplete[phase]
dr = finalRequested - startRequested
if dt <= 0.0:
return -1
self.byteRate = db / dt
self.byteRateRequested = dr / dt
return self.byteRate
def addPhasePostProcess(self, phase, func, taskChain = 'default'):
if self.getPhaseComplete(phase):
func()
return
self.acceptOnce('phaseComplete-%s' % phase, func)
def testBandwidth(self):
self.recordBytesPerSecond()
byteRate = self.getBytesPerSecond()
if byteRate < 0:
return
if byteRate >= self.getBandwidth() * self.INCREASE_THRESHOLD:
self.increaseBandwidth(byteRate)
elif byteRate < self.byteRateRequested * self.DECREASE_THRESHOLD:
self.decreaseBandwidth(byteRate)
def getBandwidth(self):
if self.backgrounded:
bandwidth = self.BANDWIDTH_ARRAY[self.bandwidthIndex] - self.TELEMETRY_BANDWIDTH
else:
bandwidth = self.BANDWIDTH_ARRAY[self.bandwidthIndex]
if self.MAX_BANDWIDTH > 0:
bandwidth = min(bandwidth, self.MAX_BANDWIDTH)
return bandwidth
def increaseBandwidth(self, targetBandwidth = None):
maxBandwidthIndex = len(self.BANDWIDTH_ARRAY) - 1
if self.bandwidthIndex == maxBandwidthIndex:
self.notify.debug('increaseBandwidth: Already at maximum bandwidth')
return 0
self.bandwidthIndex += 1
self.everIncreasedBandwidth = 1
self.setBandwidth()
return 1
def decreaseBandwidth(self, targetBandwidth = None):
if not self.DECREASE_BANDWIDTH:
return 0
if self.backgrounded and self.everIncreasedBandwidth:
return 0
if self.bandwidthIndex == 0:
return 0
else:
self.bandwidthIndex -= 1
if targetBandwidth:
while self.bandwidthIndex > 0 and self.BANDWIDTH_ARRAY[self.bandwidthIndex] > targetBandwidth:
self.bandwidthIndex -= 1
self.setBandwidth()
return 1
def setBandwidth(self):
self.resetBytesPerSecond()
self.httpChannel.setMaxBytesPerSecond(self.getBandwidth())
def resetBytesPerSecond(self):
self.bpsList = []
def recordBytesPerSecond(self):
bytesDownloaded = self.httpChannel.getBytesDownloaded()
bytesRequested = self.httpChannel.getBytesRequested()
t = self.getTime()
self.bpsList.append((t, bytesDownloaded, bytesRequested))
while 1:
if len(self.bpsList) == 0:
break
ft, fb, fr = self.bpsList[0]
if ft < t-self.BPS_WINDOW:
self.bpsList.pop(0)
else:
break
def getBytesPerSecond(self):
if len(self.bpsList) < 2:
return -1
startTime, startBytes, startRequested = self.bpsList[0]
finalTime, finalBytes, finalRequested = self.bpsList[-1]
dt = finalTime - startTime
db = finalBytes - startBytes
dr = finalRequested - startRequested
if dt <= 0.0:
return -1
self.byteRate = db / dt
self.byteRateRequested = dr / dt
return self.byteRate
def testBandwidth(self):
self.recordBytesPerSecond()
byteRate = self.getBytesPerSecond()
if byteRate < 0:
return
if byteRate >= self.getBandwidth() * self.INCREASE_THRESHOLD:
self.increaseBandwidth(byteRate)
elif byteRate < self.byteRateRequested * self.DECREASE_THRESHOLD:
self.decreaseBandwidth(byteRate)
def getBandwidth(self):
if self.backgrounded:
bandwidth = self.BANDWIDTH_ARRAY[self.bandwidthIndex] - self.TELEMETRY_BANDWIDTH
else:
bandwidth = self.BANDWIDTH_ARRAY[self.bandwidthIndex]
if self.MAX_BANDWIDTH > 0:
bandwidth = min(bandwidth, self.MAX_BANDWIDTH)
return bandwidth
def increaseBandwidth(self, targetBandwidth = None):
maxBandwidthIndex = len(self.BANDWIDTH_ARRAY) - 1
if self.bandwidthIndex == maxBandwidthIndex:
return 0
self.bandwidthIndex += 1
self.everIncreasedBandwidth = 1
self.setBandwidth()
return 1
def decreaseBandwidth(self, targetBandwidth = None):
if not self.DECREASE_BANDWIDTH:
return 0
if self.backgrounded and self.everIncreasedBandwidth:
return 0
if self.bandwidthIndex == 0:
return 0
else:
self.bandwidthIndex -= 1
if targetBandwidth:
while self.bandwidthIndex > 0 and self.BANDWIDTH_ARRAY[self.bandwidthIndex] > targetBandwidth:
self.bandwidthIndex -= 1
self.setBandwidth()
return 1
def setBandwidth(self):
self.resetBytesPerSecond()
self.httpChannel.setMaxBytesPerSecond(self.getBandwidth())
def MakeNTFSFilesGlobalWriteable(self, pathToSet = None):
if not self.WIN32:
return
import win32api
if pathToSet == None:
pathToSet = self.getInstallDir()
else:
pathToSet = pathToSet.cStr() + '*'
DrivePath = pathToSet[0:3]
try:
volname, volsernum, maxfilenamlen, sysflags, filesystemtype = win32api.GetVolumeInformation(DrivePath)
except:
return
if self.win32con_FILE_PERSISTENT_ACLS & sysflags:
self.notify.info('NTFS detected, making files global writeable\n')
win32dir = win32api.GetWindowsDirectory()
cmdLine = win32dir + '\\system32\\cacls.exe "' + pathToSet + '" /T /E /C /G Everyone:F > nul'
os.system(cmdLine)
return
def cleanup(self):
self.notify.info('cleanup: cleaning up Launcher')
self.ignoreAll()
del self.clock
del self.dldb
del self.httpChannel
del self.http
def scanForHacks(self):
if not self.WIN32:
return
import _winreg
hacksInstalled = {}
hacksRunning = {}
hackName = ['!xSpeed.net', 'A Speeder', 'Speed Gear']
knownHacksRegistryKeys = {
hackName[0] : [
[_winreg.HKEY_LOCAL_MACHINE, 'Software\\Microsoft\\Windows\\CurrentVersion\\Run\\!xSpeed'],
[_winreg.HKEY_CURRENT_USER, 'Software\\!xSpeednethy'],
[_winreg.HKEY_CURRENT_USER, 'Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\MenuOrder\\Start Menu\\Programs\\!xSpeednet'],
[_winreg.HKEY_LOCAL_MACHINE, 'Software\\Gentee\\Paths\\!xSpeednet'],
[_winreg.HKEY_LOCAL_MACHINE, 'Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\!xSpeed.net 2.0']],
hackName[1] : [
[_winreg.HKEY_CURRENT_USER, 'Software\\aspeeder'],
[_winreg.HKEY_LOCAL_MACHINE, 'Software\\aspeeder'],
[_winreg.HKEY_LOCAL_MACHINE, 'Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\aspeeder']]
}
try:
for prog in knownHacksRegistryKeys.keys():
for key in knownHacksRegistryKeys[prog]:
try:
h = _winreg.OpenKey(key[0], key[1])
hacksInstalled[prog] = 1
_winreg.CloseKey(h)
break
except:
pass
except:
pass
knownHacksMUI = {'!xspeednet': hackName[0], 'aspeeder': hackName[1], 'speed gear': hackName[2]}
i = 0
try:
rh = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, 'Software\\Microsoft\\Windows\\ShellNoRoam\\MUICache')
while 1:
name, value, type = _winreg.EnumValue(rh, i)
i += 1
if type == 1:
val = value.lower()
for hackprog in knownHacksMUI:
if val.find(hackprog) != -1:
hacksInstalled[knownHacksMUI[hackprog]] = 1
break
_winreg.CloseKey(rh)
except:
pass
try:
import otp.launcher.procapi
except:
pass
else:
knownHacksExe = {'!xspeednet.exe': hackName[0], 'aspeeder.exe': hackName[1], 'speedgear.exe': hackName[2]}
try:
for p in procapi.getProcessList():
pname = p.name
if pname in knownHacksExe:
hacksRunning[knownHacksExe[pname]] = 1
except:
pass
if len(hacksInstalled) > 0:
self.notify.info("Third party programs installed:")
for hack in hacksInstalled.keys():
self.notify.info(hack)
if len(hacksRunning) > 0:
self.notify.info("Third party programs running:")
for hack in hacksRunning.keys():
self.notify.info(hack)
self.setPandaErrorCode(8)
sys.exit()
def getBlue(self):
return None
def getPlayToken(self):
return None
def getDISLToken(self):
DISLToken = self.getValue(self.DISLTokenKey)
self.setValue(self.DISLTokenKey, '')
if DISLToken == 'NO DISLTOKEN':
DISLToken = None
return DISLToken
|
Spiderlover/Toontown
|
otp/launcher/LauncherBase.py
|
Python
|
mit
| 85,462
|
from ctypes import *
import ctypes.util;
import os, os.path
from common import *
import common
common.dll = dll = CDLL(os.path.abspath("libtest4.so"))
dll.test_pyd.restype = py_object
reg_fun('utf8_to_python', UTF8CONV(utf8_to_str))
a = dll.test_pyd(1)
assert a == u'Doctor!\0'
a = dll.test_pyd(2)
assert a == u'Doctor!\0 Doctor!\0'
a = dll.test_pyd(5)
assert a == u'Doctor!\0 Doctor!\0 \u3061!\0 Doctor!\0 Doctor!\0'
|
eugeneai/pyd
|
examples/misc/ctypes/test4.py
|
Python
|
mit
| 421
|
# pylint: disable=W0223
import warnings
import numpy as np
from pandas.compat import range, zip
import pandas.compat as compat
from pandas.core.dtypes.generic import ABCDataFrame, ABCPanel, ABCSeries
from pandas.core.dtypes.common import (
is_integer_dtype,
is_integer, is_float,
is_list_like,
is_sequence,
is_iterator,
is_scalar,
is_sparse,
_is_unorderable_exception,
_ensure_platform_int)
from pandas.core.dtypes.missing import isnull, _infer_fill_value
from pandas.core.index import Index, MultiIndex
import pandas.core.common as com
from pandas.core.common import (is_bool_indexer, _asarray_tuplesafe,
is_null_slice, is_full_slice,
_values_from_object)
# the supported indexers
def get_indexers_list():
return [
('ix', _IXIndexer),
('iloc', _iLocIndexer),
('loc', _LocIndexer),
('at', _AtIndexer),
('iat', _iAtIndexer),
]
# "null slice"
_NS = slice(None, None)
# the public IndexSlicerMaker
class _IndexSlice(object):
"""
Create an object to more easily perform multi-index slicing
Examples
--------
>>> midx = pd.MultiIndex.from_product([['A0','A1'], ['B0','B1','B2','B3']])
>>> columns = ['foo', 'bar']
>>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))),
index=midx, columns=columns)
Using the default slice command:
>>> dfmi.loc[(slice(None), slice('B0', 'B1')), :]
foo bar
A0 B0 0 1
B1 2 3
A1 B0 8 9
B1 10 11
Using the IndexSlice class for a more intuitive command:
>>> idx = pd.IndexSlice
>>> dfmi.loc[idx[:, 'B0':'B1'], :]
foo bar
A0 B0 0 1
B1 2 3
A1 B0 8 9
B1 10 11
"""
def __getitem__(self, arg):
return arg
IndexSlice = _IndexSlice()
class IndexingError(Exception):
pass
class _NDFrameIndexer(object):
_valid_types = None
_exception = KeyError
axis = None
def __init__(self, obj, name):
self.obj = obj
self.ndim = obj.ndim
self.name = name
def __call__(self, axis=None):
# we need to return a copy of ourselves
new_self = self.__class__(self.obj, self.name)
new_self.axis = axis
return new_self
def __iter__(self):
raise NotImplementedError('ix is not iterable')
def __getitem__(self, key):
if type(key) is tuple:
key = tuple(com._apply_if_callable(x, self.obj) for x in key)
try:
values = self.obj.get_value(*key)
if is_scalar(values):
return values
except Exception:
pass
return self._getitem_tuple(key)
else:
key = com._apply_if_callable(key, self.obj)
return self._getitem_axis(key, axis=0)
def _get_label(self, label, axis=0):
if self.ndim == 1:
# for perf reasons we want to try _xs first
# as its basically direct indexing
# but will fail when the index is not present
# see GH5667
try:
return self.obj._xs(label, axis=axis)
except:
return self.obj[label]
elif isinstance(label, tuple) and isinstance(label[axis], slice):
raise IndexingError('no slices here, handle elsewhere')
return self.obj._xs(label, axis=axis)
def _get_loc(self, key, axis=0):
return self.obj._ixs(key, axis=axis)
def _slice(self, obj, axis=0, kind=None):
return self.obj._slice(obj, axis=axis, kind=kind)
def _get_setitem_indexer(self, key):
if self.axis is not None:
return self._convert_tuple(key, is_setter=True)
axis = self.obj._get_axis(0)
if isinstance(axis, MultiIndex):
try:
return axis.get_loc(key)
except Exception:
pass
if isinstance(key, tuple):
try:
return self._convert_tuple(key, is_setter=True)
except IndexingError:
pass
if isinstance(key, range):
return self._convert_range(key, is_setter=True)
try:
return self._convert_to_indexer(key, is_setter=True)
except TypeError as e:
# invalid indexer type vs 'other' indexing errors
if 'cannot do' in str(e):
raise
raise IndexingError(key)
def __setitem__(self, key, value):
if isinstance(key, tuple):
key = tuple(com._apply_if_callable(x, self.obj) for x in key)
else:
key = com._apply_if_callable(key, self.obj)
indexer = self._get_setitem_indexer(key)
self._setitem_with_indexer(indexer, value)
def _has_valid_type(self, k, axis):
raise NotImplementedError()
def _has_valid_tuple(self, key):
""" check the key for valid keys across my indexer """
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if not self._has_valid_type(k, i):
raise ValueError("Location based indexing can only have [%s] "
"types" % self._valid_types)
def _should_validate_iterable(self, axis=0):
""" return a boolean whether this axes needs validation for a passed
iterable
"""
ax = self.obj._get_axis(axis)
if isinstance(ax, MultiIndex):
return False
elif ax.is_floating():
return False
return True
def _is_nested_tuple_indexer(self, tup):
if any([isinstance(ax, MultiIndex) for ax in self.obj.axes]):
return any([is_nested_tuple(tup, ax) for ax in self.obj.axes])
return False
def _convert_tuple(self, key, is_setter=False):
keyidx = []
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
for i in range(self.ndim):
if i == axis:
keyidx.append(self._convert_to_indexer(
key, axis=axis, is_setter=is_setter))
else:
keyidx.append(slice(None))
else:
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter)
keyidx.append(idx)
return tuple(keyidx)
def _convert_range(self, key, is_setter=False):
""" convert a range argument """
return list(key)
def _convert_scalar_indexer(self, key, axis):
# if we are accessing via lowered dim, use the last dim
ax = self.obj._get_axis(min(axis, self.ndim - 1))
# a scalar
return ax._convert_scalar_indexer(key, kind=self.name)
def _convert_slice_indexer(self, key, axis):
# if we are accessing via lowered dim, use the last dim
ax = self.obj._get_axis(min(axis, self.ndim - 1))
return ax._convert_slice_indexer(key, kind=self.name)
def _has_valid_setitem_indexer(self, indexer):
return True
def _has_valid_positional_setitem_indexer(self, indexer):
""" validate that an positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally
"""
if isinstance(indexer, dict):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
else:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
for ax, i in zip(self.obj.axes, indexer):
if isinstance(i, slice):
# should check the stop slice?
pass
elif is_list_like_indexer(i):
# should check the elements?
pass
elif is_integer(i):
if i >= len(ax):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
elif isinstance(i, dict):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
return True
def _setitem_with_indexer(self, indexer, value):
self._has_valid_setitem_indexer(indexer)
# also has the side effect of consolidating in-place
# TODO: Panel, DataFrame are not imported, remove?
from pandas import Panel, DataFrame, Series # noqa
info_axis = self.obj._info_axis_number
# maybe partial set
take_split_path = self.obj._is_mixed_type
# if there is only one block/type, still have to take split path
# unless the block is one-dimensional or it can hold the value
if not take_split_path and self.obj._data.blocks:
blk, = self.obj._data.blocks
if 1 < blk.ndim: # in case of dict, keys are indices
val = list(value.values()) if isinstance(value,
dict) else value
take_split_path = not blk._can_hold_element(val)
if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes):
for i, ax in zip(indexer, self.obj.axes):
# if we have any multi-indexes that have non-trivial slices
# (not null slices) then we must take the split path, xref
# GH 10360
if (isinstance(ax, MultiIndex) and
not (is_integer(i) or is_null_slice(i))):
take_split_path = True
break
if isinstance(indexer, tuple):
nindexer = []
for i, idx in enumerate(indexer):
if isinstance(idx, dict):
# reindex the axis to the new value
# and set inplace
key, _ = convert_missing_indexer(idx)
# if this is the items axes, then take the main missing
# path first
# this correctly sets the dtype and avoids cache issues
# essentially this separates out the block that is needed
# to possibly be modified
if self.ndim > 1 and i == self.obj._info_axis_number:
# add the new item, and set the value
# must have all defined axes if we have a scalar
# or a list-like on the non-info axes if we have a
# list-like
len_non_info_axes = [
len(_ax) for _i, _ax in enumerate(self.obj.axes)
if _i != i
]
if any([not l for l in len_non_info_axes]):
if not is_list_like_indexer(value):
raise ValueError("cannot set a frame with no "
"defined index and a scalar")
self.obj[key] = value
return self.obj
# add a new item with the dtype setup
self.obj[key] = _infer_fill_value(value)
new_indexer = convert_from_missing_indexer_tuple(
indexer, self.obj.axes)
self._setitem_with_indexer(new_indexer, value)
return self.obj
# reindex the axis
# make sure to clear the cache because we are
# just replacing the block manager here
# so the object is the same
index = self.obj._get_axis(i)
labels = index.insert(len(index), key)
self.obj._data = self.obj.reindex_axis(labels, i)._data
self.obj._maybe_update_cacher(clear=True)
self.obj.is_copy = None
nindexer.append(labels.get_loc(key))
else:
nindexer.append(idx)
indexer = tuple(nindexer)
else:
indexer, missing = convert_missing_indexer(indexer)
if missing:
# reindex the axis to the new value
# and set inplace
if self.ndim == 1:
index = self.obj.index
new_index = index.insert(len(index), indexer)
# we have a coerced indexer, e.g. a float
# that matches in an Int64Index, so
# we will not create a duplicate index, rather
# index to that element
# e.g. 0.0 -> 0
# GH12246
if index.is_unique:
new_indexer = index.get_indexer([new_index[-1]])
if (new_indexer != -1).any():
return self._setitem_with_indexer(new_indexer,
value)
# this preserves dtype of the value
new_values = Series([value])._values
if len(self.obj._values):
try:
new_values = np.concatenate([self.obj._values,
new_values])
except TypeError:
new_values = np.concatenate([self.obj.asobject,
new_values])
self.obj._data = self.obj._constructor(
new_values, index=new_index, name=self.obj.name)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
elif self.ndim == 2:
# no columns and scalar
if not len(self.obj.columns):
raise ValueError("cannot set a frame with no defined "
"columns")
# append a Series
if isinstance(value, Series):
value = value.reindex(index=self.obj.columns,
copy=True)
value.name = indexer
# a list-list
else:
# must have conforming columns
if is_list_like_indexer(value):
if len(value) != len(self.obj.columns):
raise ValueError("cannot set a row with "
"mismatched columns")
value = Series(value, index=self.obj.columns,
name=indexer)
self.obj._data = self.obj.append(value)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
# set using setitem (Panel and > dims)
elif self.ndim >= 3:
return self.obj.__setitem__(indexer, value)
# set
item_labels = self.obj._get_axis(info_axis)
# align and set the values
if take_split_path:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
if isinstance(value, ABCSeries):
value = self._align_series(indexer, value)
info_idx = indexer[info_axis]
if is_integer(info_idx):
info_idx = [info_idx]
labels = item_labels[info_idx]
# if we have a partial multiindex, then need to adjust the plane
# indexer here
if (len(labels) == 1 and
isinstance(self.obj[labels[0]].axes[0], MultiIndex)):
item = labels[0]
obj = self.obj[item]
index = obj.index
idx = indexer[:info_axis][0]
plane_indexer = tuple([idx]) + indexer[info_axis + 1:]
lplane_indexer = length_of_indexer(plane_indexer[0], index)
# require that we are setting the right number of values that
# we are indexing
if is_list_like_indexer(value) and np.iterable(
value) and lplane_indexer != len(value):
if len(obj[idx]) != len(value):
raise ValueError("cannot set using a multi-index "
"selection indexer with a different "
"length than the value")
# make sure we have an ndarray
value = getattr(value, 'values', value).ravel()
# we can directly set the series here
# as we select a slice indexer on the mi
idx = index._convert_slice_indexer(idx)
obj._consolidate_inplace()
obj = obj.copy()
obj._data = obj._data.setitem(indexer=tuple([idx]),
value=value)
self.obj[item] = obj
return
# non-mi
else:
plane_indexer = indexer[:info_axis] + indexer[info_axis + 1:]
if info_axis > 0:
plane_axis = self.obj.axes[:info_axis][0]
lplane_indexer = length_of_indexer(plane_indexer[0],
plane_axis)
else:
lplane_indexer = 0
def setter(item, v):
s = self.obj[item]
pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
# perform the equivalent of a setitem on the info axis
# as we have a null slice or a slice with full bounds
# which means essentially reassign to the columns of a
# multi-dim object
# GH6149 (null slice), GH10408 (full bounds)
if (isinstance(pi, tuple) and
all(is_null_slice(idx) or
is_full_slice(idx, len(self.obj))
for idx in pi)):
s = v
else:
# set the item, possibly having a dtype change
s._consolidate_inplace()
s = s.copy()
s._data = s._data.setitem(indexer=pi, value=v)
s._maybe_update_cacher(clear=True)
# reset the sliced object if unique
self.obj[item] = s
def can_do_equal_len():
""" return True if we have an equal len settable """
if not len(labels) == 1 or not np.iterable(value):
return False
l = len(value)
item = labels[0]
index = self.obj[item].index
# equal len list/ndarray
if len(index) == l:
return True
elif lplane_indexer == l:
return True
return False
# we need an iterable, with a ndim of at least 1
# eg. don't pass through np.array(0)
if is_list_like_indexer(value) and getattr(value, 'ndim', 1) > 0:
# we have an equal len Frame
if isinstance(value, ABCDataFrame) and value.ndim > 1:
sub_indexer = list(indexer)
multiindex_indexer = isinstance(labels, MultiIndex)
for item in labels:
if item in value:
sub_indexer[info_axis] = item
v = self._align_series(
tuple(sub_indexer), value[item],
multiindex_indexer)
else:
v = np.nan
setter(item, v)
# we have an equal len ndarray/convertible to our labels
elif np.array(value).ndim == 2:
# note that this coerces the dtype if we are mixed
# GH 7551
value = np.array(value, dtype=object)
if len(labels) != value.shape[1]:
raise ValueError('Must have equal len keys and value '
'when setting with an ndarray')
for i, item in enumerate(labels):
# setting with a list, recoerces
setter(item, value[:, i].tolist())
# we have an equal len list/ndarray
elif can_do_equal_len():
setter(labels[0], value)
# per label values
else:
if len(labels) != len(value):
raise ValueError('Must have equal len keys and value '
'when setting with an iterable')
for item, v in zip(labels, value):
setter(item, v)
else:
# scalar
for item in labels:
setter(item, value)
else:
if isinstance(indexer, tuple):
indexer = maybe_convert_ix(*indexer)
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
# logic here
if (len(indexer) > info_axis and
is_integer(indexer[info_axis]) and
all(is_null_slice(idx) for i, idx in enumerate(indexer)
if i != info_axis) and item_labels.is_unique):
self.obj[item_labels[indexer[info_axis]]] = value
return
if isinstance(value, (ABCSeries, dict)):
value = self._align_series(indexer, Series(value))
elif isinstance(value, ABCDataFrame):
value = self._align_frame(indexer, value)
if isinstance(value, ABCPanel):
value = self._align_panel(indexer, value)
# check for chained assignment
self.obj._check_is_chained_assignment_possible()
# actually do the set
self.obj._consolidate_inplace()
self.obj._data = self.obj._data.setitem(indexer=indexer,
value=value)
self.obj._maybe_update_cacher(clear=True)
def _align_series(self, indexer, ser, multiindex_indexer=False):
"""
Parameters
----------
indexer : tuple, slice, scalar
The indexer used to get the locations that will be set to
`ser`
ser : pd.Series
The values to assign to the locations specified by `indexer`
multiindex_indexer : boolean, optional
Defaults to False. Should be set to True if `indexer` was from
a `pd.MultiIndex`, to avoid unnecessary broadcasting.
Returns:
--------
`np.array` of `ser` broadcast to the appropriate shape for assignment
to the locations selected by `indexer`
"""
if isinstance(indexer, (slice, np.ndarray, list, Index)):
indexer = tuple([indexer])
if isinstance(indexer, tuple):
# flatten np.ndarray indexers
ravel = lambda i: i.ravel() if isinstance(i, np.ndarray) else i
indexer = tuple(map(ravel, indexer))
aligners = [not is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
single_aligner = sum_aligners == 1
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
obj = self.obj
# are we a single alignable value on a non-primary
# dim (e.g. panel: 1,2, or frame: 0) ?
# hence need to align to a single axis dimension
# rather that find all valid dims
# frame
if is_frame:
single_aligner = single_aligner and aligners[0]
# panel
elif is_panel:
single_aligner = (single_aligner and
(aligners[1] or aligners[2]))
# we have a frame, with multiple indexers on both axes; and a
# series, so need to broadcast (see GH5206)
if (sum_aligners == self.ndim and
all([is_sequence(_) for _ in indexer])):
ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
# single indexer
if len(indexer) > 1 and not multiindex_indexer:
l = len(indexer[1])
ser = np.tile(ser, l).reshape(l, -1).T
return ser
for i, idx in enumerate(indexer):
ax = obj.axes[i]
# multiple aligners (or null slices)
if is_sequence(idx) or isinstance(idx, slice):
if single_aligner and is_null_slice(idx):
continue
new_ix = ax[idx]
if not is_list_like_indexer(new_ix):
new_ix = Index([new_ix])
else:
new_ix = Index(new_ix)
if ser.index.equals(new_ix) or not len(new_ix):
return ser._values.copy()
return ser.reindex(new_ix)._values
# 2 dims
elif single_aligner and is_frame:
# reindex along index
ax = self.obj.axes[1]
if ser.index.equals(ax) or not len(ax):
return ser._values.copy()
return ser.reindex(ax)._values
# >2 dims
elif single_aligner:
broadcast = []
for n, labels in enumerate(self.obj._get_plane_axes(i)):
# reindex along the matching dimensions
if len(labels & ser.index):
ser = ser.reindex(labels)
else:
broadcast.append((n, len(labels)))
# broadcast along other dims
ser = ser._values.copy()
for (axis, l) in broadcast:
shape = [-1] * (len(broadcast) + 1)
shape[axis] = l
ser = np.tile(ser, l).reshape(shape)
if self.obj.ndim == 3:
ser = ser.T
return ser
elif is_scalar(indexer):
ax = self.obj._get_axis(1)
if ser.index.equals(ax):
return ser._values.copy()
return ser.reindex(ax)._values
raise ValueError('Incompatible indexer with Series')
def _align_frame(self, indexer, df):
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
if isinstance(indexer, tuple):
aligners = [not is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
# TODO: single_aligner is not used
single_aligner = sum_aligners == 1 # noqa
idx, cols = None, None
sindexers = []
for i, ix in enumerate(indexer):
ax = self.obj.axes[i]
if is_sequence(ix) or isinstance(ix, slice):
if idx is None:
idx = ax[ix].ravel()
elif cols is None:
cols = ax[ix].ravel()
else:
break
else:
sindexers.append(i)
# panel
if is_panel:
# need to conform to the convention
# as we are not selecting on the items axis
# and we have a single indexer
# GH 7763
if len(sindexers) == 1 and sindexers[0] != 0:
df = df.T
if idx is None:
idx = df.index
if cols is None:
cols = df.columns
if idx is not None and cols is not None:
if df.index.equals(idx) and df.columns.equals(cols):
val = df.copy()._values
else:
val = df.reindex(idx, columns=cols)._values
return val
elif ((isinstance(indexer, slice) or is_list_like_indexer(indexer)) and
is_frame):
ax = self.obj.index[indexer]
if df.index.equals(ax):
val = df.copy()._values
else:
# we have a multi-index and are trying to align
# with a particular, level GH3738
if (isinstance(ax, MultiIndex) and
isinstance(df.index, MultiIndex) and
ax.nlevels != df.index.nlevels):
raise TypeError("cannot align on a multi-index with out "
"specifying the join levels")
val = df.reindex(index=ax)._values
return val
elif is_scalar(indexer) and is_panel:
idx = self.obj.axes[1]
cols = self.obj.axes[2]
# by definition we are indexing on the 0th axis
# a passed in dataframe which is actually a transpose
# of what is needed
if idx.equals(df.index) and cols.equals(df.columns):
return df.copy()._values
return df.reindex(idx, columns=cols)._values
raise ValueError('Incompatible indexer with DataFrame')
def _align_panel(self, indexer, df):
# TODO: is_frame, is_panel are unused
is_frame = self.obj.ndim == 2 # noqa
is_panel = self.obj.ndim >= 3 # noqa
raise NotImplementedError("cannot set using an indexer with a Panel "
"yet!")
def _getitem_tuple(self, tup):
try:
return self._getitem_lowerdim(tup)
except IndexingError:
pass
# no multi-index, so validate all of the indexers
self._has_valid_tuple(tup)
# ugly hack for GH #836
if self._multi_take_opportunity(tup):
return self._multi_take(tup)
# no shortcut needed
retval = self.obj
for i, key in enumerate(tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if is_null_slice(key):
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=i)
return retval
def _multi_take_opportunity(self, tup):
from pandas.core.generic import NDFrame
# ugly hack for GH #836
if not isinstance(self.obj, NDFrame):
return False
if not all(is_list_like_indexer(x) for x in tup):
return False
# just too complicated
for indexer, ax in zip(tup, self.obj._data.axes):
if isinstance(ax, MultiIndex):
return False
elif is_bool_indexer(indexer):
return False
elif not ax.is_unique:
return False
return True
def _multi_take(self, tup):
""" create the reindex map for our objects, raise the _exception if we
can't create the indexer
"""
try:
o = self.obj
d = dict(
[(a, self._convert_for_reindex(t, axis=o._get_axis_number(a)))
for t, a in zip(tup, o._AXIS_ORDERS)])
return o.reindex(**d)
except(KeyError, IndexingError):
raise self._exception
def _convert_for_reindex(self, key, axis=0):
labels = self.obj._get_axis(axis)
if is_bool_indexer(key):
key = check_bool_indexer(labels, key)
return labels[key]
else:
if isinstance(key, Index):
keyarr = labels._convert_index_indexer(key)
else:
# asarray can be unsafe, NumPy strings are weird
keyarr = _asarray_tuplesafe(key)
if is_integer_dtype(keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
keyarr = labels._convert_arr_indexer(keyarr)
if not labels.is_integer():
keyarr = _ensure_platform_int(keyarr)
return labels.take(keyarr)
return keyarr
def _handle_lowerdim_multi_index_axis0(self, tup):
# we have an axis0 multi-index, handle or raise
try:
# fast path for series or for tup devoid of slices
return self._get_label(tup, axis=0)
except TypeError:
# slices are unhashable
pass
except Exception as e1:
if isinstance(tup[0], (slice, Index)):
raise IndexingError("Handle elsewhere")
# raise the error if we are not sorted
ax0 = self.obj._get_axis(0)
if not ax0.is_lexsorted_for_tuple(tup):
raise e1
return None
def _getitem_lowerdim(self, tup):
# we can directly get the axis result since the axis is specified
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
return self._getitem_axis(tup, axis=axis)
# we may have a nested tuples indexer here
if self._is_nested_tuple_indexer(tup):
return self._getitem_nested_tuple(tup)
# we maybe be using a tuple to represent multiple dimensions here
ax0 = self.obj._get_axis(0)
# ...but iloc should handle the tuple as simple integer-location
# instead of checking it as multiindex representation (GH 13797)
if isinstance(ax0, MultiIndex) and self.name != 'iloc':
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
if len(tup) > self.obj.ndim:
raise IndexingError("Too many indexers. handle elsewhere")
# to avoid wasted computation
# df.ix[d1:d2, 0] -> columns first (True)
# df.ix[0, ['C', 'B', A']] -> rows first (False)
for i, key in enumerate(tup):
if is_label_like(key) or isinstance(key, tuple):
section = self._getitem_axis(key, axis=i)
# we have yielded a scalar ?
if not is_list_like_indexer(section):
return section
elif section.ndim == self.ndim:
# we're in the middle of slicing through a MultiIndex
# revise the key wrt to `section` by inserting an _NS
new_key = tup[:i] + (_NS,) + tup[i + 1:]
else:
new_key = tup[:i] + tup[i + 1:]
# unfortunately need an odious kludge here because of
# DataFrame transposing convention
if (isinstance(section, ABCDataFrame) and i > 0 and
len(new_key) == 2):
a, b = new_key
new_key = b, a
if len(new_key) == 1:
new_key, = new_key
# This is an elided recursive call to iloc/loc/etc'
return getattr(section, self.name)[new_key]
raise IndexingError('not applicable')
def _getitem_nested_tuple(self, tup):
# we have a nested tuple so have at least 1 multi-index level
# we should be able to match up the dimensionaility here
# we have too many indexers for our dim, but have at least 1
# multi-index dimension, try to see if we have something like
# a tuple passed to a series with a multi-index
if len(tup) > self.ndim:
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
# this is a series with a multi-index specified a tuple of
# selectors
return self._getitem_axis(tup, axis=0)
# handle the multi-axis by taking sections and reducing
# this is iterative
obj = self.obj
axis = 0
for i, key in enumerate(tup):
if is_null_slice(key):
axis += 1
continue
current_ndim = obj.ndim
obj = getattr(obj, self.name)._getitem_axis(key, axis=axis)
axis += 1
# if we have a scalar, we are done
if is_scalar(obj) or not hasattr(obj, 'ndim'):
break
# has the dim of the obj changed?
# GH 7199
if obj.ndim < current_ndim:
# GH 7516
# if had a 3 dim and are going to a 2d
# axes are reversed on a DataFrame
if i >= 1 and current_ndim == 3 and obj.ndim == 2:
obj = obj.T
axis -= 1
return obj
def _getitem_axis(self, key, axis=0):
if self._should_validate_iterable(axis):
self._has_valid_type(key, axis)
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
elif (is_list_like_indexer(key) and
not (isinstance(key, tuple) and
isinstance(labels, MultiIndex))):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
return self._getitem_iterable(key, axis=axis)
else:
# maybe coerce a float scalar to integer
key = labels._maybe_cast_indexer(key)
if is_integer(key):
if axis == 0 and isinstance(labels, MultiIndex):
try:
return self._get_label(key, axis=axis)
except (KeyError, TypeError):
if self.obj.index.levels[0].is_integer():
raise
# this is the fallback! (for a non-float, non-integer index)
if not labels.is_floating() and not labels.is_integer():
return self._get_loc(key, axis=axis)
return self._get_label(key, axis=axis)
def _getitem_iterable(self, key, axis=0):
if self._should_validate_iterable(axis):
self._has_valid_type(key, axis)
labels = self.obj._get_axis(axis)
if is_bool_indexer(key):
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
return self.obj.take(inds, axis=axis, convert=False)
else:
# Have the index compute an indexer or return None
# if it cannot handle; we only act on all found values
indexer, keyarr = labels._convert_listlike_indexer(
key, kind=self.name)
if indexer is not None and (indexer != -1).all():
return self.obj.take(indexer, axis=axis)
# existing labels are unique and indexer are unique
if labels.is_unique and Index(keyarr).is_unique:
try:
return self.obj.reindex_axis(keyarr, axis=axis)
except AttributeError:
# Series
if axis != 0:
raise AssertionError('axis must be 0')
return self.obj.reindex(keyarr)
# existing labels are non-unique
else:
# reindex with the specified axis
if axis + 1 > self.obj.ndim:
raise AssertionError("invalid indexing error with "
"non-unique index")
new_target, indexer, new_indexer = labels._reindex_non_unique(
keyarr)
if new_indexer is not None:
result = self.obj.take(indexer[indexer != -1], axis=axis,
convert=False)
result = result._reindex_with_indexers(
{axis: [new_target, new_indexer]},
copy=True, allow_dups=True)
else:
result = self.obj.take(indexer, axis=axis, convert=False)
return result
def _convert_to_indexer(self, obj, axis=0, is_setter=False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
labels = self.obj._get_axis(axis)
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
# try to find out correct indexer, if not type correct raise
try:
obj = self._convert_scalar_indexer(obj, axis)
except TypeError:
# but we will allow setting
if is_setter:
pass
# see if we are positional in nature
is_int_index = labels.is_integer()
is_int_positional = is_integer(obj) and not is_int_index
# if we are a label return me
try:
return labels.get_loc(obj)
except LookupError:
if isinstance(obj, tuple) and isinstance(labels, MultiIndex):
if is_setter and len(obj) == labels.nlevels:
return {'key': obj}
raise
except TypeError:
pass
except (ValueError):
if not is_int_positional:
raise
# a positional
if is_int_positional:
# if we are setting and its not a valid location
# its an insert which fails by definition
if is_setter:
# always valid
if self.name == 'loc':
return {'key': obj}
# a positional
if (obj >= self.obj.shape[axis] and
not isinstance(labels, MultiIndex)):
raise ValueError("cannot set by positional indexing with "
"enlargement")
return obj
if is_nested_tuple(obj, labels):
return labels.get_locs(obj)
elif is_list_like_indexer(obj):
if is_bool_indexer(obj):
obj = check_bool_indexer(labels, obj)
inds, = obj.nonzero()
return inds
else:
# Have the index compute an indexer or return None
# if it cannot handle
indexer, objarr = labels._convert_listlike_indexer(
obj, kind=self.name)
if indexer is not None:
return indexer
# unique index
if labels.is_unique:
indexer = check = labels.get_indexer(objarr)
# non-unique (dups)
else:
(indexer,
missing) = labels.get_indexer_non_unique(objarr)
# 'indexer' has dupes, create 'check' using 'missing'
check = np.zeros_like(objarr)
check[missing] = -1
mask = check == -1
if mask.any():
raise KeyError('%s not in index' % objarr[mask])
return _values_from_object(indexer)
else:
try:
return labels.get_loc(obj)
except LookupError:
# allow a not found key only if we are a setter
if not is_list_like_indexer(obj) and is_setter:
return {'key': obj}
raise
def _tuplify(self, loc):
tup = [slice(None, None) for _ in range(self.ndim)]
tup[0] = loc
return tuple(tup)
def _get_slice_axis(self, slice_obj, axis=0):
obj = self.obj
if not need_slice(slice_obj):
return obj
indexer = self._convert_slice_indexer(slice_obj, axis)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
return self.obj.take(indexer, axis=axis, convert=False)
class _IXIndexer(_NDFrameIndexer):
"""A primarily label-location based indexer, with integer position
fallback.
``.ix[]`` supports mixed integer and label based access. It is
primarily label based, but will fall back to integer positional
access unless the corresponding axis is of integer type.
``.ix`` is the most general indexer and will support any of the
inputs in ``.loc`` and ``.iloc``. ``.ix`` also supports floating
point label schemes. ``.ix`` is exceptionally useful when dealing
with mixed positional and label based hierachical indexes.
However, when an axis is integer based, ONLY label based access
and not positional access is supported. Thus, in such cases, it's
usually better to be explicit and use ``.iloc`` or ``.loc``.
See more at :ref:`Advanced Indexing <advanced>`.
"""
def __init__(self, obj, name):
_ix_deprecation_warning = """
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated""" # noqa
warnings.warn(_ix_deprecation_warning,
DeprecationWarning, stacklevel=3)
super(_IXIndexer, self).__init__(obj, name)
def _has_valid_type(self, key, axis):
if isinstance(key, slice):
return True
elif is_bool_indexer(key):
return True
elif is_list_like_indexer(key):
return True
else:
self._convert_scalar_indexer(key, axis)
return True
class _LocationIndexer(_NDFrameIndexer):
_exception = Exception
def __getitem__(self, key):
if type(key) is tuple:
key = tuple(com._apply_if_callable(x, self.obj) for x in key)
try:
if self._is_scalar_access(key):
return self._getitem_scalar(key)
except (KeyError, IndexError):
pass
return self._getitem_tuple(key)
else:
key = com._apply_if_callable(key, self.obj)
return self._getitem_axis(key, axis=0)
def _is_scalar_access(self, key):
raise NotImplementedError()
def _getitem_scalar(self, key):
raise NotImplementedError()
def _getitem_axis(self, key, axis=0):
raise NotImplementedError()
def _getbool_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
try:
return self.obj.take(inds, axis=axis, convert=False)
except Exception as detail:
raise self._exception(detail)
def _get_slice_axis(self, slice_obj, axis=0):
""" this is pretty simple as we just have to deal with labels """
obj = self.obj
if not need_slice(slice_obj):
return obj
labels = obj._get_axis(axis)
indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop,
slice_obj.step, kind=self.name)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
return self.obj.take(indexer, axis=axis, convert=False)
class _LocIndexer(_LocationIndexer):
"""Purely label-location based indexer for selection by label.
``.loc[]`` is primarily label based, but may also be used with a
boolean array.
Allowed inputs are:
- A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is
interpreted as a *label* of the index, and **never** as an
integer position along the index).
- A list or array of labels, e.g. ``['a', 'b', 'c']``.
- A slice object with labels, e.g. ``'a':'f'`` (note that contrary
to usual python slices, **both** the start and the stop are included!).
- A boolean array.
- A ``callable`` function with one argument (the calling Series, DataFrame
or Panel) and that returns valid output for indexing (one of the above)
``.loc`` will raise a ``KeyError`` when the items are not found.
See more at :ref:`Selection by Label <indexing.label>`
"""
_valid_types = ("labels (MUST BE IN THE INDEX), slices of labels (BOTH "
"endpoints included! Can be slices of integers if the "
"index is integers), listlike of labels, boolean")
_exception = KeyError
def _has_valid_type(self, key, axis):
ax = self.obj._get_axis(axis)
# valid for a label where all labels are in the index
# slice of lables (where start-end in labels)
# slice of integers (only if in the lables)
# boolean
if isinstance(key, slice):
return True
elif is_bool_indexer(key):
return True
elif is_list_like_indexer(key):
# mi is just a passthru
if isinstance(key, tuple) and isinstance(ax, MultiIndex):
return True
# TODO: don't check the entire key unless necessary
if (not is_iterator(key) and len(key) and
np.all(ax.get_indexer_for(key) < 0)):
raise KeyError("None of [%s] are in the [%s]" %
(key, self.obj._get_axis_name(axis)))
return True
else:
def error():
if isnull(key):
raise TypeError("cannot use label indexing with a null "
"key")
raise KeyError("the label [%s] is not in the [%s]" %
(key, self.obj._get_axis_name(axis)))
try:
key = self._convert_scalar_indexer(key, axis)
if not ax.contains(key):
error()
except TypeError as e:
# python 3 type errors should be raised
if _is_unorderable_exception(e):
error()
raise
except:
error()
return True
def _is_scalar_access(self, key):
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
# a) avoid getting things via sections and (to minimize dtype changes)
# b) provide a performant path
if not hasattr(key, '__len__'):
return False
if len(key) != self.ndim:
return False
for i, k in enumerate(key):
if not is_scalar(k):
return False
ax = self.obj.axes[i]
if isinstance(ax, MultiIndex):
return False
if not ax.is_unique:
return False
return True
def _getitem_scalar(self, key):
# a fast-path to scalar access
# if not, raise
values = self.obj.get_value(*key)
return values
def _get_partial_string_timestamp_match_key(self, key, labels):
"""Translate any partial string timestamp matches in key, returning the
new key (GH 10331)"""
if isinstance(labels, MultiIndex):
if isinstance(key, compat.string_types) and \
labels.levels[0].is_all_dates:
# Convert key '2016-01-01' to
# ('2016-01-01'[, slice(None, None, None)]+)
key = tuple([key] + [slice(None)] * (len(labels.levels) - 1))
if isinstance(key, tuple):
# Convert (..., '2016-01-01', ...) in tuple to
# (..., slice('2016-01-01', '2016-01-01', None), ...)
new_key = []
for i, component in enumerate(key):
if isinstance(component, compat.string_types) and \
labels.levels[i].is_all_dates:
new_key.append(slice(component, component, None))
else:
new_key.append(component)
key = tuple(new_key)
return key
def _getitem_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
key = self._get_partial_string_timestamp_match_key(key, labels)
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
elif is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
# convert various list-like indexers
# to a list of keys
# we will use the *values* of the object
# and NOT the index if its a PandasObject
if isinstance(labels, MultiIndex):
if isinstance(key, (ABCSeries, np.ndarray)) and key.ndim <= 1:
# Series, or 0,1 ndim ndarray
# GH 14730
key = list(key)
elif isinstance(key, ABCDataFrame):
# GH 15438
raise NotImplementedError("Indexing a MultiIndex with a "
"DataFrame key is not "
"implemented")
elif hasattr(key, 'ndim') and key.ndim > 1:
raise NotImplementedError("Indexing a MultiIndex with a "
"multidimensional key is not "
"implemented")
if (not isinstance(key, tuple) and len(key) > 1 and
not isinstance(key[0], tuple)):
key = tuple([key])
# an iterable multi-selection
if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
if is_nested_tuple(key, labels):
locs = labels.get_locs(key)
indexer = [slice(None)] * self.ndim
indexer[axis] = locs
return self.obj.iloc[tuple(indexer)]
# fall thru to straight lookup
self._has_valid_type(key, axis)
return self._get_label(key, axis=axis)
class _iLocIndexer(_LocationIndexer):
"""Purely integer-location based indexing for selection by position.
``.iloc[]`` is primarily integer position based (from ``0`` to
``length-1`` of the axis), but may also be used with a boolean
array.
Allowed inputs are:
- An integer, e.g. ``5``.
- A list or array of integers, e.g. ``[4, 3, 0]``.
- A slice object with ints, e.g. ``1:7``.
- A boolean array.
- A ``callable`` function with one argument (the calling Series, DataFrame
or Panel) and that returns valid output for indexing (one of the above)
``.iloc`` will raise ``IndexError`` if a requested indexer is
out-of-bounds, except *slice* indexers which allow out-of-bounds
indexing (this conforms with python/numpy *slice* semantics).
See more at :ref:`Selection by Position <indexing.integer>`
"""
_valid_types = ("integer, integer slice (START point is INCLUDED, END "
"point is EXCLUDED), listlike of integers, boolean array")
_exception = IndexError
def _has_valid_type(self, key, axis):
if is_bool_indexer(key):
if hasattr(key, 'index') and isinstance(key.index, Index):
if key.index.inferred_type == 'integer':
raise NotImplementedError("iLocation based boolean "
"indexing on an integer type "
"is not available")
raise ValueError("iLocation based boolean indexing cannot use "
"an indexable as a mask")
return True
if isinstance(key, slice):
return True
elif is_integer(key):
return self._is_valid_integer(key, axis)
elif is_list_like_indexer(key):
return self._is_valid_list_like(key, axis)
return False
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
def _is_scalar_access(self, key):
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
# a) avoid getting things via sections and (to minimize dtype changes)
# b) provide a performant path
if not hasattr(key, '__len__'):
return False
if len(key) != self.ndim:
return False
for i, k in enumerate(key):
if not is_integer(k):
return False
ax = self.obj.axes[i]
if not ax.is_unique:
return False
return True
def _getitem_scalar(self, key):
# a fast-path to scalar access
# if not, raise
values = self.obj.get_value(*key, takeable=True)
return values
def _is_valid_integer(self, key, axis):
# return a boolean if we have a valid integer indexer
ax = self.obj._get_axis(axis)
l = len(ax)
if key >= l or key < -l:
raise IndexError("single positional indexer is out-of-bounds")
return True
def _is_valid_list_like(self, key, axis):
# return a boolean if we are a valid list-like (e.g. that we don't
# have out-of-bounds values)
# a tuple should already have been caught by this point
# so don't treat a tuple as a valid indexer
if isinstance(key, tuple):
raise IndexingError('Too many indexers')
# coerce the key to not exceed the maximum size of the index
arr = np.array(key)
ax = self.obj._get_axis(axis)
l = len(ax)
if (hasattr(arr, '__len__') and len(arr) and
(arr.max() >= l or arr.min() < -l)):
raise IndexError("positional indexers are out-of-bounds")
return True
def _getitem_tuple(self, tup):
self._has_valid_tuple(tup)
try:
return self._getitem_lowerdim(tup)
except:
pass
retval = self.obj
axis = 0
for i, key in enumerate(tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if is_null_slice(key):
axis += 1
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=axis)
# if the dim was reduced, then pass a lower-dim the next time
if retval.ndim < self.ndim:
axis -= 1
# try to get for the next axis
axis += 1
return retval
def _get_slice_axis(self, slice_obj, axis=0):
obj = self.obj
if not need_slice(slice_obj):
return obj
slice_obj = self._convert_slice_indexer(slice_obj, axis)
if isinstance(slice_obj, slice):
return self._slice(slice_obj, axis=axis, kind='iloc')
else:
return self.obj.take(slice_obj, axis=axis, convert=False)
def _get_list_axis(self, key, axis=0):
"""
Return Series values by list or array of integers
Parameters
----------
key : list-like positional indexer
axis : int (can only be zero)
Returns
-------
Series object
"""
try:
return self.obj.take(key, axis=axis, convert=False)
except IndexError:
# re-raise with different error message
raise IndexError("positional indexers are out-of-bounds")
def _getitem_axis(self, key, axis=0):
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
if isinstance(key, list):
try:
key = np.asarray(key)
except TypeError: # pragma: no cover
pass
if is_bool_indexer(key):
self._has_valid_type(key, axis)
return self._getbool_axis(key, axis=axis)
# a list of integers
elif is_list_like_indexer(key):
return self._get_list_axis(key, axis=axis)
# a single integer
else:
key = self._convert_scalar_indexer(key, axis)
if not is_integer(key):
raise TypeError("Cannot index by location index with a "
"non-integer key")
# validate the location
self._is_valid_integer(key, axis)
return self._get_loc(key, axis=axis)
def _convert_to_indexer(self, obj, axis=0, is_setter=False):
""" much simpler as we only have to deal with our valid types """
# make need to convert a float key
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
elif is_float(obj):
return self._convert_scalar_indexer(obj, axis)
elif self._has_valid_type(obj, axis):
return obj
raise ValueError("Can only index by location with a [%s]" %
self._valid_types)
class _ScalarAccessIndexer(_NDFrameIndexer):
""" access scalars quickly """
def _convert_key(self, key, is_setter=False):
return list(key)
def __getitem__(self, key):
if not isinstance(key, tuple):
# we could have a convertible item here (e.g. Timestamp)
if not is_list_like_indexer(key):
key = tuple([key])
else:
raise ValueError('Invalid call for scalar access (getting)!')
key = self._convert_key(key)
return self.obj.get_value(*key, takeable=self._takeable)
def __setitem__(self, key, value):
if isinstance(key, tuple):
key = tuple(com._apply_if_callable(x, self.obj) for x in key)
else:
# scalar callable may return tuple
key = com._apply_if_callable(key, self.obj)
if not isinstance(key, tuple):
key = self._tuplify(key)
if len(key) != self.obj.ndim:
raise ValueError('Not enough indexers for scalar access '
'(setting)!')
key = list(self._convert_key(key, is_setter=True))
key.append(value)
self.obj.set_value(*key, takeable=self._takeable)
class _AtIndexer(_ScalarAccessIndexer):
"""Fast label-based scalar accessor
Similarly to ``loc``, ``at`` provides **label** based scalar lookups.
You can also set using these indexers.
"""
_takeable = False
def _convert_key(self, key, is_setter=False):
""" require they keys to be the same type as the index (so we don't
fallback)
"""
# allow arbitrary setting
if is_setter:
return list(key)
for ax, i in zip(self.obj.axes, key):
if ax.is_integer():
if not is_integer(i):
raise ValueError("At based indexing on an integer index "
"can only have integer indexers")
else:
if is_integer(i):
raise ValueError("At based indexing on an non-integer "
"index can only have non-integer "
"indexers")
return key
class _iAtIndexer(_ScalarAccessIndexer):
"""Fast integer location scalar accessor.
Similarly to ``iloc``, ``iat`` provides **integer** based lookups.
You can also set using these indexers.
"""
_takeable = True
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
def _convert_key(self, key, is_setter=False):
""" require integer args (and convert to label arguments) """
for a, i in zip(self.obj.axes, key):
if not is_integer(i):
raise ValueError("iAt based indexing can only have integer "
"indexers")
return key
# 32-bit floating point machine epsilon
_eps = 1.1920929e-07
def length_of_indexer(indexer, target=None):
"""return the length of a single non-tuple indexer which could be a slice
"""
if target is not None and isinstance(indexer, slice):
l = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += l
if stop is None or stop > l:
stop = l
elif stop < 0:
stop += l
if step is None:
step = 1
elif step < 0:
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, Index, np.ndarray, list)):
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
def convert_to_index_sliceable(obj, key):
"""if we are index sliceable, then return my slicer, otherwise return None
"""
idx = obj.index
if isinstance(key, slice):
return idx._convert_slice_indexer(key, kind='getitem')
elif isinstance(key, compat.string_types):
# we are an actual column
if obj._data.items.contains(key):
return None
# We might have a datetimelike string that we can translate to a
# slice here via partial string indexing
if idx.is_all_dates:
try:
return idx._get_string_slice(key)
except (KeyError, ValueError, NotImplementedError):
return None
return None
def is_index_slice(obj):
def _is_valid_index(x):
return (is_integer(x) or is_float(x) and
np.allclose(x, int(x), rtol=_eps, atol=0))
def _crit(v):
return v is None or _is_valid_index(v)
both_none = obj.start is None and obj.stop is None
return not both_none and (_crit(obj.start) and _crit(obj.stop))
def check_bool_indexer(ax, key):
# boolean indexing, need to check that the data are aligned, otherwise
# disallowed
# this function assumes that is_bool_indexer(key) == True
result = key
if isinstance(key, ABCSeries) and not key.index.equals(ax):
result = result.reindex(ax)
mask = isnull(result._values)
if mask.any():
raise IndexingError('Unalignable boolean Series provided as '
'indexer (index of the boolean Series and of '
'the indexed object do not match')
result = result.astype(bool)._values
elif is_sparse(result):
result = result.to_dense()
result = np.asarray(result, dtype=bool)
else:
# is_bool_indexer has already checked for nulls in the case of an
# object array key, so no check needed here
result = np.asarray(result, dtype=bool)
return result
def convert_missing_indexer(indexer):
""" reverse convert a missing indexer, which is a dict
return the scalar indexer and a boolean indicating if we converted
"""
if isinstance(indexer, dict):
# a missing key (but not a tuple indexer)
indexer = indexer['key']
if isinstance(indexer, bool):
raise KeyError("cannot use a single bool to index into setitem")
return indexer, True
return indexer, False
def convert_from_missing_indexer_tuple(indexer, axes):
""" create a filtered indexer that doesn't have any missing indexers """
def get_indexer(_i, _idx):
return (axes[_i].get_loc(_idx['key']) if isinstance(_idx, dict) else
_idx)
return tuple([get_indexer(_i, _idx) for _i, _idx in enumerate(indexer)])
def maybe_convert_indices(indices, n):
""" if we have negative indicies, translate to postive here
if have indicies that are out-of-bounds, raise an IndexError
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If list is empty, np.array will return float and cause indexing
# errors.
return np.empty(0, dtype=np.int_)
mask = indices < 0
if mask.any():
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
def maybe_convert_ix(*args):
"""
We likely want to take the cross-product
"""
ixify = True
for arg in args:
if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)):
ixify = False
if ixify:
return np.ix_(*args)
else:
return args
def is_nested_tuple(tup, labels):
# check for a compatiable nested tuple and multiindexes among the axes
if not isinstance(tup, tuple):
return False
# are we nested tuple of: tuple,list,slice
for i, k in enumerate(tup):
if isinstance(k, (tuple, list, slice)):
return isinstance(labels, MultiIndex)
return False
def is_list_like_indexer(key):
# allow a list_like, but exclude NamedTuples which can be indexers
return is_list_like(key) and not (isinstance(key, tuple) and
type(key) is not tuple)
def is_label_like(key):
# select a label or row
return not isinstance(key, slice) and not is_list_like_indexer(key)
def need_slice(obj):
return (obj.start is not None or obj.stop is not None or
(obj.step is not None and obj.step != 1))
def maybe_droplevels(index, key):
# drop levels
original_index = index
if isinstance(key, tuple):
for _ in key:
try:
index = index.droplevel(0)
except:
# we have dropped too much, so back out
return original_index
else:
try:
index = index.droplevel(0)
except:
pass
return index
def _non_reducing_slice(slice_):
"""
Ensurse that a slice doesn't reduce to a Series or Scalar.
Any user-paseed `subset` should have this called on it
to make sure we're always working with DataFrames.
"""
# default to column slice, like DataFrame
# ['A', 'B'] -> IndexSlices[:, ['A', 'B']]
kinds = tuple(list(compat.string_types) + [ABCSeries, np.ndarray, Index,
list])
if isinstance(slice_, kinds):
slice_ = IndexSlice[:, slice_]
def pred(part):
# true when slice does *not* reduce
return isinstance(part, slice) or is_list_like(part)
if not is_list_like(slice_):
if not isinstance(slice_, slice):
# a 1-d slice, like df.loc[1]
slice_ = [[slice_]]
else:
# slice(a, b, c)
slice_ = [slice_] # to tuplize later
else:
slice_ = [part if pred(part) else [part] for part in slice_]
return tuple(slice_)
def _maybe_numeric_slice(df, slice_, include_bool=False):
"""
want nice defaults for background_gradient that don't break
with non-numeric data. But if slice_ is passed go with that.
"""
if slice_ is None:
dtypes = [np.number]
if include_bool:
dtypes.append(bool)
slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns]
return slice_
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/pandas/core/indexing.py
|
Python
|
mit
| 73,228
|
createCancelServerTicket = {'id': 1234, 'title': 'Server Cancellation Request'}
getObject = {
"accountId": 1234,
"assignedUserId": 12345,
"createDate": "2013-08-01T14:14:04-07:00",
"id": 100,
"lastEditDate": "2013-08-01T14:16:47-07:00",
"lastEditType": "AUTO",
"modifyDate": "2013-08-01T14:16:47-07:00",
"status": {
"id": 1002,
"name": "Closed"
},
"statusId": 1002,
"title": "Cloud Instance Cancellation - 08/01/13",
'updateCount': 3,
'updates': [
{'entry': 'a bot says something'},
{'entry': 'user says something',
'editor': {'firstName': 'John', 'lastName': 'Smith'}},
{'entry': 'employee says something',
'editor': {'displayName': 'emp1'}},
]
}
createStandardTicket = {
"assignedUserId": 12345,
"id": 100,
"contents": "body",
"subjectId": 1004,
"title": "Cloud Instance Cancellation - 08/01/13"
}
edit = True
addUpdate = {}
|
briancline/softlayer-python
|
SoftLayer/fixtures/SoftLayer_Ticket.py
|
Python
|
mit
| 963
|
import six
from ctypes import c_ulong, c_ushort, c_void_p, c_ulonglong, POINTER,\
Structure, c_wchar_p, WINFUNCTYPE, windll, byref, cast
class Status(object):
SEC_E_OK = 0
SEC_I_CONTINUE_NEEDED = 0x00090312
SEC_I_COMPLETE_AND_CONTINUE = 0x00090314
SEC_I_INCOMPLETE_CREDENTIALS = 0x00090320
SEC_E_INSUFFICIENT_MEMORY = 0x80090300 - 0x100000000
SEC_E_INVALID_HANDLE = 0x80090301 - 0x100000000
SEC_E_UNSUPPORTED_FUNCTION = 0x80090302 - 0x100000000
SEC_E_INTERNAL_ERROR = 0x80090304 - 0x100000000
SEC_E_SECPKG_NOT_FOUND = 0x80090305 - 0x100000000
SEC_E_NOT_OWNER = 0x80090306 - 0x100000000
SEC_E_INVALID_TOKEN = 0x80090308 - 0x100000000
SEC_E_NO_IMPERSONATION = 0x8009030B - 0x100000000
SEC_E_LOGON_DENIED = 0x8009030C - 0x100000000
SEC_E_UNKNOWN_CREDENTIALS = 0x8009030D - 0x100000000
SEC_E_NO_CREDENTIALS = 0x8009030E - 0x100000000
SEC_E_OUT_OF_SEQUENCE = 0x80090310 - 0x100000000
SEC_E_NO_AUTHENTICATING_AUTHORITY = 0x80090311 - 0x100000000
SEC_E_BUFFER_TOO_SMALL = 0x80090321 - 0x100000000
SEC_E_WRONG_PRINCIPAL = 0x80090322 - 0x100000000
SEC_E_ALGORITHM_MISMATCH = 0x80090331 - 0x100000000
@classmethod
def getname(cls, value):
for name in dir(cls):
if name.startswith('SEC_E_') and getattr(cls, name) == value:
return name
return 'unknown value {0:x}'.format(0x100000000 + value)
#define SECBUFFER_EMPTY 0 // Undefined, replaced by provider
#define SECBUFFER_DATA 1 // Packet data
SECBUFFER_TOKEN = 2
#define SECBUFFER_PKG_PARAMS 3 // Package specific parameters
#define SECBUFFER_MISSING 4 // Missing Data indicator
#define SECBUFFER_EXTRA 5 // Extra data
#define SECBUFFER_STREAM_TRAILER 6 // Security Trailer
#define SECBUFFER_STREAM_HEADER 7 // Security Header
#define SECBUFFER_NEGOTIATION_INFO 8 // Hints from the negotiation pkg
#define SECBUFFER_PADDING 9 // non-data padding
#define SECBUFFER_STREAM 10 // whole encrypted message
#define SECBUFFER_MECHLIST 11
#define SECBUFFER_MECHLIST_SIGNATURE 12
#define SECBUFFER_TARGET 13 // obsolete
#define SECBUFFER_CHANNEL_BINDINGS 14
#define SECBUFFER_CHANGE_PASS_RESPONSE 15
#define SECBUFFER_TARGET_HOST 16
#define SECBUFFER_ALERT 17
SECPKG_CRED_INBOUND = 0x00000001
SECPKG_CRED_OUTBOUND = 0x00000002
SECPKG_CRED_BOTH = 0x00000003
SECPKG_CRED_DEFAULT = 0x00000004
SECPKG_CRED_RESERVED = 0xF0000000
SECBUFFER_VERSION = 0
#define ISC_REQ_DELEGATE 0x00000001
#define ISC_REQ_MUTUAL_AUTH 0x00000002
ISC_REQ_REPLAY_DETECT = 4
#define ISC_REQ_SEQUENCE_DETECT 0x00000008
ISC_REQ_CONFIDENTIALITY = 0x10
ISC_REQ_USE_SESSION_KEY = 0x00000020
ISC_REQ_PROMPT_FOR_CREDS = 0x00000040
ISC_REQ_USE_SUPPLIED_CREDS = 0x00000080
ISC_REQ_ALLOCATE_MEMORY = 0x00000100
ISC_REQ_USE_DCE_STYLE = 0x00000200
ISC_REQ_DATAGRAM = 0x00000400
ISC_REQ_CONNECTION = 0x00000800
#define ISC_REQ_CALL_LEVEL 0x00001000
#define ISC_REQ_FRAGMENT_SUPPLIED 0x00002000
#define ISC_REQ_EXTENDED_ERROR 0x00004000
#define ISC_REQ_STREAM 0x00008000
#define ISC_REQ_INTEGRITY 0x00010000
#define ISC_REQ_IDENTIFY 0x00020000
#define ISC_REQ_NULL_SESSION 0x00040000
#define ISC_REQ_MANUAL_CRED_VALIDATION 0x00080000
#define ISC_REQ_RESERVED1 0x00100000
#define ISC_REQ_FRAGMENT_TO_FIT 0x00200000
#// This exists only in Windows Vista and greater
#define ISC_REQ_FORWARD_CREDENTIALS 0x00400000
#define ISC_REQ_NO_INTEGRITY 0x00800000 // honored only by SPNEGO
#define ISC_REQ_USE_HTTP_STYLE 0x01000000
#define ISC_REQ_UNVERIFIED_TARGET_NAME 0x20000000
#define ISC_REQ_CONFIDENTIALITY_ONLY 0x40000000 // honored by SPNEGO/Kerberos
SECURITY_NETWORK_DREP = 0
SECURITY_NATIVE_DREP = 0x10
SECPKG_CRED_ATTR_NAMES = 1
ULONG = c_ulong
USHORT = c_ushort
PULONG = POINTER(ULONG)
PVOID = c_void_p
TimeStamp = c_ulonglong
PTimeStamp = POINTER(c_ulonglong)
PLUID = POINTER(c_ulonglong)
class SecHandle(Structure):
_fields_ = [
('lower', c_void_p),
('upper', c_void_p),
]
PSecHandle = POINTER(SecHandle)
CredHandle = SecHandle
PCredHandle = PSecHandle
PCtxtHandle = PSecHandle
class SecBuffer(Structure):
_fields_ = [
('cbBuffer', ULONG),
('BufferType', ULONG),
('pvBuffer', PVOID),
]
PSecBuffer = POINTER(SecBuffer)
class SecBufferDesc(Structure):
_fields_ = [
('ulVersion', ULONG),
('cBuffers', ULONG),
('pBuffers', PSecBuffer),
]
PSecBufferDesc = POINTER(SecBufferDesc)
class SEC_WINNT_AUTH_IDENTITY(Structure):
_fields_ = [
('User', c_wchar_p),
('UserLength', c_ulong),
('Domain', c_wchar_p),
('DomainLength', c_ulong),
('Password', c_wchar_p),
('PasswordLength', c_ulong),
('Flags', c_ulong),
]
class SecPkgInfo(Structure):
_fields_ = [
('fCapabilities', ULONG),
('wVersion', USHORT),
('wRPCID', USHORT),
('cbMaxToken', ULONG),
('Name', c_wchar_p),
('Comment', c_wchar_p),
]
PSecPkgInfo = POINTER(SecPkgInfo)
class SecPkgCredentials_Names(Structure):
_fields_ = [('UserName', c_wchar_p)]
def ret_val(value):
if value < 0:
raise Exception('SSPI Error {0}'.format(Status.getname(value)))
return value
ENUMERATE_SECURITY_PACKAGES_FN = WINFUNCTYPE(
ret_val,
POINTER(c_ulong),
POINTER(POINTER(SecPkgInfo)))
ACQUIRE_CREDENTIALS_HANDLE_FN = WINFUNCTYPE(
ret_val,
c_wchar_p, # principal
c_wchar_p, # package
ULONG, # fCredentialUse
PLUID, # pvLogonID
PVOID, # pAuthData
PVOID, # pGetKeyFn
PVOID, # pvGetKeyArgument
PCredHandle, # phCredential
PTimeStamp # ptsExpiry
)
FREE_CREDENTIALS_HANDLE_FN = WINFUNCTYPE(ret_val, POINTER(SecHandle))
INITIALIZE_SECURITY_CONTEXT_FN = WINFUNCTYPE(
ret_val,
PCredHandle,
PCtxtHandle, # phContext,
c_wchar_p, # pszTargetName,
ULONG, # fContextReq,
ULONG, # Reserved1,
ULONG, # TargetDataRep,
PSecBufferDesc, # pInput,
ULONG, # Reserved2,
PCtxtHandle, # phNewContext,
PSecBufferDesc, # pOutput,
PULONG, # pfContextAttr,
PTimeStamp, # ptsExpiry
)
COMPLETE_AUTH_TOKEN_FN = WINFUNCTYPE(
ret_val,
PCtxtHandle, # phContext
PSecBufferDesc, # pToken
)
FREE_CONTEXT_BUFFER_FN = WINFUNCTYPE(ret_val, PVOID)
QUERY_CREDENTIAL_ATTRIBUTES_FN = WINFUNCTYPE(
ret_val,
PCredHandle, # cred
ULONG, # attribute
PVOID, # out buffer
)
ACCEPT_SECURITY_CONTEXT_FN = PVOID
DELETE_SECURITY_CONTEXT_FN = WINFUNCTYPE(ret_val, PCtxtHandle)
APPLY_CONTROL_TOKEN_FN = PVOID
QUERY_CONTEXT_ATTRIBUTES_FN = PVOID
IMPERSONATE_SECURITY_CONTEXT_FN = PVOID
REVERT_SECURITY_CONTEXT_FN = PVOID
MAKE_SIGNATURE_FN = PVOID
VERIFY_SIGNATURE_FN = PVOID
QUERY_SECURITY_PACKAGE_INFO_FN = WINFUNCTYPE(
ret_val,
c_wchar_p, # package name
POINTER(PSecPkgInfo),
)
EXPORT_SECURITY_CONTEXT_FN = PVOID
IMPORT_SECURITY_CONTEXT_FN = PVOID
ADD_CREDENTIALS_FN = PVOID
QUERY_SECURITY_CONTEXT_TOKEN_FN = PVOID
ENCRYPT_MESSAGE_FN = PVOID
DECRYPT_MESSAGE_FN = PVOID
SET_CONTEXT_ATTRIBUTES_FN = PVOID
class SECURITY_FUNCTION_TABLE(Structure):
_fields_ = [
('dwVersion', c_ulong),
('EnumerateSecurityPackages', ENUMERATE_SECURITY_PACKAGES_FN),
('QueryCredentialsAttributes', QUERY_CREDENTIAL_ATTRIBUTES_FN),
('AcquireCredentialsHandle', ACQUIRE_CREDENTIALS_HANDLE_FN),
('FreeCredentialsHandle', FREE_CREDENTIALS_HANDLE_FN),
('Reserved2', c_void_p),
('InitializeSecurityContext', INITIALIZE_SECURITY_CONTEXT_FN),
('AcceptSecurityContext', ACCEPT_SECURITY_CONTEXT_FN),
('CompleteAuthToken', COMPLETE_AUTH_TOKEN_FN),
('DeleteSecurityContext', DELETE_SECURITY_CONTEXT_FN),
('ApplyControlToken', APPLY_CONTROL_TOKEN_FN),
('QueryContextAttributes', QUERY_CONTEXT_ATTRIBUTES_FN),
('ImpersonateSecurityContext', IMPERSONATE_SECURITY_CONTEXT_FN),
('RevertSecurityContext', REVERT_SECURITY_CONTEXT_FN),
('MakeSignature', MAKE_SIGNATURE_FN),
('VerifySignature', VERIFY_SIGNATURE_FN),
('FreeContextBuffer', FREE_CONTEXT_BUFFER_FN),
('QuerySecurityPackageInfo', QUERY_SECURITY_PACKAGE_INFO_FN),
('Reserved3', c_void_p),
('Reserved4', c_void_p),
('ExportSecurityContext', EXPORT_SECURITY_CONTEXT_FN),
('ImportSecurityContext', IMPORT_SECURITY_CONTEXT_FN),
('AddCredentials', ADD_CREDENTIALS_FN),
('Reserved8', c_void_p),
('QuerySecurityContextToken', QUERY_SECURITY_CONTEXT_TOKEN_FN),
('EncryptMessage', ENCRYPT_MESSAGE_FN),
('DecryptMessage', DECRYPT_MESSAGE_FN),
('SetContextAttributes', SET_CONTEXT_ATTRIBUTES_FN),
]
_PInitSecurityInterface = WINFUNCTYPE(POINTER(SECURITY_FUNCTION_TABLE))
InitSecurityInterface = _PInitSecurityInterface(('InitSecurityInterfaceW', windll.secur32))
sec_fn = InitSecurityInterface()
if not sec_fn:
raise Exception('InitSecurityInterface failed')
sec_fn = sec_fn.contents
class _SecContext(object):
def close(self):
if self._handle.lower and self._handle.upper:
sec_fn.DeleteSecurityContext(self._handle)
self._handle.lower = self._handle.upper = 0
def __del__(self):
self.close()
def complete_auth_token(self, bufs):
sec_fn.CompleteAuthToken(
byref(self._handle),
byref(_make_buffers_desc(bufs)))
def next(self,
flags,
target_name=None,
byte_ordering='network',
input_buffers=None,
output_buffers=None):
input_buffers_desc = _make_buffers_desc(input_buffers) if input_buffers else None
output_buffers_desc = _make_buffers_desc(output_buffers) if output_buffers else None
status = sec_fn.InitializeSecurityContext(
byref(self._cred._handle),
byref(self._handle),
target_name,
flags,
0,
SECURITY_NETWORK_DREP if byte_ordering == 'network' else SECURITY_NATIVE_DREP,
byref(input_buffers_desc) if input_buffers_desc else None,
0,
byref(self._handle),
byref(output_buffers_desc) if input_buffers_desc else None,
byref(self._attrs),
byref(self._ts))
result_buffers = []
for i, (type, buf) in enumerate(output_buffers):
buf = buf[:output_buffers_desc.pBuffers[i].cbBuffer]
result_buffers.append((type, buf))
return status, result_buffers
class SspiCredentials(object):
def __init__(self, package, use, identity=None):
self._handle = SecHandle()
self._ts = TimeStamp()
sec_fn.AcquireCredentialsHandle(
None, package, use,
None, byref(identity) if identity and identity.Domain else None,
None, None, byref(self._handle), byref(self._ts))
def close(self):
if self._handle.lower or self._handle.upper:
sec_fn.FreeCredentialsHandle(byref(self._handle))
self._handle.lower = 0
self._handle.upper = 0
def __del__(self):
self.close()
def query_user_name(self):
names = SecPkgCredentials_Names()
try:
sec_fn.QueryCredentialsAttributes(
byref(self._handle),
SECPKG_CRED_ATTR_NAMES,
byref(names))
user_name = six.text_type(names.UserName)
finally:
p = c_wchar_p.from_buffer(names, SecPkgCredentials_Names.UserName.offset)
sec_fn.FreeContextBuffer(p)
return user_name
def create_context(
self,
flags,
target_name=None,
byte_ordering='network',
input_buffers=None,
output_buffers=None):
ctx = _SecContext()
ctx._cred = self
ctx._handle = SecHandle()
ctx._ts = TimeStamp()
ctx._attrs = ULONG()
input_buffers_desc = _make_buffers_desc(input_buffers) if input_buffers else None
output_buffers_desc = _make_buffers_desc(output_buffers) if output_buffers else None
status = sec_fn.InitializeSecurityContext(
byref(self._handle),
None,
target_name,
flags,
0,
SECURITY_NETWORK_DREP if byte_ordering == 'network' else SECURITY_NATIVE_DREP,
byref(input_buffers_desc) if input_buffers_desc else None,
0,
byref(ctx._handle),
byref(output_buffers_desc) if output_buffers_desc else None,
byref(ctx._attrs),
byref(ctx._ts))
result_buffers = []
for i, (type, buf) in enumerate(output_buffers):
buf = buf[:output_buffers_desc.pBuffers[i].cbBuffer]
result_buffers.append((type, buf))
return ctx, status, result_buffers
def _make_buffers_desc(buffers):
desc = SecBufferDesc()
desc.ulVersion = SECBUFFER_VERSION
bufs_array = (SecBuffer * len(buffers))()
for i, (type, buf) in enumerate(buffers):
bufs_array[i].BufferType = type
bufs_array[i].cbBuffer = len(buf)
bufs_array[i].pvBuffer = cast(buf, PVOID)
desc.pBuffers = bufs_array
desc.cBuffers = len(buffers)
return desc
def make_winnt_identity(domain, user_name, password):
identity = SEC_WINNT_AUTH_IDENTITY()
identity.Flags = 2 # SEC_WINNT_AUTH_IDENTITY_UNICODE
identity.Password = password
identity.PasswordLength = len(password)
identity.Domain = domain
identity.DomainLength = len(domain)
identity.User = user_name
identity.UserLength = len(user_name)
return identity
#class SspiSecBuffer(object):
# def __init__(self, type, buflen=4096):
# self._buf = create_string_buffer(int(buflen))
# self._desc = SecBuffer()
# self._desc.cbBuffer = buflen
# self._desc.BufferType = type
# self._desc.pvBuffer = cast(self._buf, PVOID)
#
#class SspiSecBuffers(object):
# def __init__(self):
# self._desc = SecBufferDesc()
# self._desc.ulVersion = SECBUFFER_VERSION
# self._descrs = (SecBuffer * 8)()
# self._desc.pBuffers = self._descrs
#
# def append(self, buf):
# if len(self._descrs) <= self._desc.cBuffers:
# newdescrs = (SecBuffer * (len(self._descrs) * 2))(*self._descrs)
# self._descrs = newdescrs
# self._desc.pBuffers = newdescrs
# self._descrs[self._desc.cBuffers] = buf._desc
# self._desc.cBuffers += 1
def enum_security_packages():
num = ULONG()
infos = POINTER(SecPkgInfo)()
status = sec_fn.EnumerateSecurityPackages(byref(num), byref(infos))
try:
return [{'caps': infos[i].fCapabilities,
'version': infos[i].wVersion,
'rpcid': infos[i].wRPCID,
'max_token': infos[i].cbMaxToken,
'name': infos[i].Name,
'comment': infos[i].Comment,
} for i in range(num.value)]
finally:
sec_fn.FreeContextBuffer(infos)
|
m32/pytds
|
src/pytds/sspi.py
|
Python
|
mit
| 15,485
|
# coding=utf-8
"""
Collects data from php-fpm if the pm.status_path is enabled
#### Usage
A sample php-fpm config for this collector to work is
```
pm.status_path = /fpm-status
```
#### Dependencies
* urllib2
* json (or simeplejson)
"""
try:
import json
json # workaround for pyflakes issue #13
except ImportError:
import simplejson as json
import urllib2
import diamond.collector
class PhpFpmCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(PhpFpmCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(PhpFpmCollector, self).get_default_config()
config.update({
'host': 'localhost',
'port': 80,
'uri': 'fpm-status',
'byte_unit': ['byte'],
'path': 'phpfpm',
})
return config
def collect(self):
#
# if there is a / in front remove it
if self.config['uri'][0] == '/':
self.config['uri'] = self.config['uri'][1:]
try:
response = urllib2.urlopen("http://%s:%s/%s?json" % (
self.config['host'], int(self.config['port']),
self.config['uri']))
except Exception, e:
self.log.error('Couldnt connect to php-fpm status page: %s', e)
return {}
try:
j = json.loads(response.read())
except Exception, e:
self.log.error('Couldnt parse json: %s', e)
return {}
valid_metrics = [
'accepted_conn',
'listen_queue',
'max_listen_queue',
'listen_queue_len',
'idle_processes',
'active_processes',
'total_processes',
'max_active_processes',
'max_children_reached',
'slow_requests'
]
for k, v in j.items():
#
# php-fpm has spaces in the keys so lets replace all spaces with _
k = k.replace(" ", "_")
if k in valid_metrics:
self.publish(k, v)
|
disqus/Diamond
|
src/collectors/phpfpm/phpfpm.py
|
Python
|
mit
| 2,259
|
from .test_models import *
from .test_gcm_push_payload import *
from .test_apns_push_payload import *
from .test_management_commands import *
from .test_apns_certfilecheck import *
from .test_wns import *
# conditionally test rest_framework api if the DRF package is installed
try:
import rest_framework
except ImportError:
pass
else:
from test_rest_framework import *
|
hylje/django-push-notifications
|
tests/__init__.py
|
Python
|
mit
| 373
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import sorl.thumbnail.fields
from django.conf import settings
import cambiaahora.utils
import ckeditor.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Staff',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('titulo', models.CharField(max_length=250, verbose_name='Nombre')),
('slug', models.SlugField(editable=False)),
('foto', sorl.thumbnail.fields.ImageField(upload_to=cambiaahora.utils.get_file_path, null=True, verbose_name='Foto principal', blank=True)),
('fecha', models.DateField(verbose_name='fecha de nacimiento')),
('texto', ckeditor.fields.RichTextField(verbose_name='Texto')),
('profesion', models.CharField(max_length=250, verbose_name='Profesi\xf3n')),
('cargo', models.CharField(max_length=250, verbose_name='Cargo/Puesto')),
('aprobacion', models.IntegerField(default=b'1', verbose_name='Aprobaci\xf3n', choices=[(1, 'Borrador'), (2, 'Aprobado')])),
('idioma', models.IntegerField(default=b'1', verbose_name='Idioma', choices=[(1, 'Espa\xf1ol'), (2, 'English')])),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Staff',
'verbose_name_plural': 'Staff',
},
),
]
|
CARocha/plataforma_fadcanic
|
cambiaahora/staff/migrations/0001_initial.py
|
Python
|
mit
| 1,717
|
# -*- coding: utf-8 -*-
__version__ = '7.1.0'
|
sbuss/voteswap
|
lib/bootstrap3/__init__.py
|
Python
|
mit
| 47
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Tests for Layer1 of DynamoDB v2
"""
import time
from tests.unit import unittest
from boto.dynamodb2 import exceptions
from boto.dynamodb2.layer1 import DynamoDBConnection
class DynamoDBv2Layer1Test(unittest.TestCase):
dynamodb = True
def setUp(self):
self.dynamodb = DynamoDBConnection()
self.table_name = 'test-%d' % int(time.time())
self.hash_key_name = 'username'
self.hash_key_type = 'S'
self.range_key_name = 'date_joined'
self.range_key_type = 'N'
self.read_units = 5
self.write_units = 5
self.attributes = [
{
'AttributeName': self.hash_key_name,
'AttributeType': self.hash_key_type,
},
{
'AttributeName': self.range_key_name,
'AttributeType': self.range_key_type,
}
]
self.schema = [
{
'AttributeName': self.hash_key_name,
'KeyType': 'HASH',
},
{
'AttributeName': self.range_key_name,
'KeyType': 'RANGE',
},
]
self.provisioned_throughput = {
'ReadCapacityUnits': self.read_units,
'WriteCapacityUnits': self.write_units,
}
self.lsi = [
{
'IndexName': 'MostRecentIndex',
'KeySchema': [
{
'AttributeName': self.hash_key_name,
'KeyType': 'HASH',
},
{
'AttributeName': self.range_key_name,
'KeyType': 'RANGE',
},
],
'Projection': {
'ProjectionType': 'KEYS_ONLY',
}
}
]
def create_table(self, table_name, attributes, schema,
provisioned_throughput, lsi=None, wait=True):
# Note: This is a slightly different ordering that makes less sense.
result = self.dynamodb.create_table(
attributes,
table_name,
schema,
provisioned_throughput,
local_secondary_indexes=lsi
)
self.addCleanup(self.dynamodb.delete_table, table_name)
if wait:
while True:
description = self.dynamodb.describe_table(table_name)
if description['Table']['TableStatus'].lower() == 'active':
return result
else:
time.sleep(5)
else:
return result
def test_integrated(self):
result = self.create_table(
self.table_name,
self.attributes,
self.schema,
self.provisioned_throughput,
self.lsi
)
self.assertEqual(
result['TableDescription']['TableName'],
self.table_name
)
description = self.dynamodb.describe_table(self.table_name)
self.assertEqual(description['Table']['ItemCount'], 0)
# Create some records.
record_1_data = {
'username': {'S': 'johndoe'},
'first_name': {'S': 'John'},
'last_name': {'S': 'Doe'},
'date_joined': {'N': '1366056668'},
'friend_count': {'N': '3'},
'friends': {'SS': ['alice', 'bob', 'jane']},
}
r1_result = self.dynamodb.put_item(self.table_name, record_1_data)
# Get the data.
record_1 = self.dynamodb.get_item(self.table_name, key={
'username': {'S': 'johndoe'},
'date_joined': {'N': '1366056668'},
}, consistent_read=True)
self.assertEqual(record_1['Item']['username']['S'], 'johndoe')
self.assertEqual(record_1['Item']['first_name']['S'], 'John')
self.assertEqual(record_1['Item']['friends']['SS'], [
'alice', 'bob', 'jane'
])
# Now in a batch.
self.dynamodb.batch_write_item({
self.table_name: [
{
'PutRequest': {
'Item': {
'username': {'S': 'jane'},
'first_name': {'S': 'Jane'},
'last_name': {'S': 'Doe'},
'date_joined': {'N': '1366056789'},
'friend_count': {'N': '1'},
'friends': {'SS': ['johndoe']},
},
},
},
]
})
# Now a query.
lsi_results = self.dynamodb.query(
self.table_name,
index_name='MostRecentIndex',
key_conditions={
'username': {
'AttributeValueList': [
{'S': 'johndoe'},
],
'ComparisonOperator': 'EQ',
},
},
consistent_read=True
)
self.assertEqual(lsi_results['Count'], 1)
results = self.dynamodb.query(self.table_name, key_conditions={
'username': {
'AttributeValueList': [
{'S': 'jane'},
],
'ComparisonOperator': 'EQ',
},
'date_joined': {
'AttributeValueList': [
{'N': '1366050000'}
],
'ComparisonOperator': 'GT',
}
}, consistent_read=True)
self.assertEqual(results['Count'], 1)
# Now a scan.
results = self.dynamodb.scan(self.table_name)
self.assertEqual(results['Count'], 2)
s_items = sorted([res['username']['S'] for res in results['Items']])
self.assertEqual(s_items, ['jane', 'johndoe'])
self.dynamodb.delete_item(self.table_name, key={
'username': {'S': 'johndoe'},
'date_joined': {'N': '1366056668'},
})
results = self.dynamodb.scan(self.table_name)
self.assertEqual(results['Count'], 1)
# Parallel scan (minus client-side threading).
self.dynamodb.batch_write_item({
self.table_name: [
{
'PutRequest': {
'Item': {
'username': {'S': 'johndoe'},
'first_name': {'S': 'Johann'},
'last_name': {'S': 'Does'},
'date_joined': {'N': '1366058000'},
'friend_count': {'N': '1'},
'friends': {'SS': ['jane']},
},
},
'PutRequest': {
'Item': {
'username': {'S': 'alice'},
'first_name': {'S': 'Alice'},
'last_name': {'S': 'Expert'},
'date_joined': {'N': '1366056800'},
'friend_count': {'N': '2'},
'friends': {'SS': ['johndoe', 'jane']},
},
},
},
]
})
time.sleep(20)
results = self.dynamodb.scan(self.table_name, segment=0, total_segments=2)
self.assertTrue(results['Count'] in [1, 2])
results = self.dynamodb.scan(self.table_name, segment=1, total_segments=2)
self.assertTrue(results['Count'] in [1, 2])
def test_without_range_key(self):
result = self.create_table(
self.table_name,
[
{
'AttributeName': self.hash_key_name,
'AttributeType': self.hash_key_type,
},
],
[
{
'AttributeName': self.hash_key_name,
'KeyType': 'HASH',
},
],
self.provisioned_throughput
)
self.assertEqual(
result['TableDescription']['TableName'],
self.table_name
)
description = self.dynamodb.describe_table(self.table_name)
self.assertEqual(description['Table']['ItemCount'], 0)
# Create some records.
record_1_data = {
'username': {'S': 'johndoe'},
'first_name': {'S': 'John'},
'last_name': {'S': 'Doe'},
'date_joined': {'N': '1366056668'},
'friend_count': {'N': '3'},
'friends': {'SS': ['alice', 'bob', 'jane']},
}
r1_result = self.dynamodb.put_item(self.table_name, record_1_data)
# Now try a range-less get.
johndoe = self.dynamodb.get_item(self.table_name, key={
'username': {'S': 'johndoe'},
}, consistent_read=True)
self.assertEqual(johndoe['Item']['username']['S'], 'johndoe')
self.assertEqual(johndoe['Item']['first_name']['S'], 'John')
self.assertEqual(johndoe['Item']['friends']['SS'], [
'alice', 'bob', 'jane'
])
|
mattcaldwell/boto
|
tests/integration/dynamodb2/test_layer1.py
|
Python
|
mit
| 10,268
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class InvoicePaged(Paged):
"""
A paging container for iterating over a list of Invoice object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Invoice]'}
}
def __init__(self, *args, **kwargs):
super(InvoicePaged, self).__init__(*args, **kwargs)
|
SUSE/azure-sdk-for-python
|
azure-mgmt-billing/azure/mgmt/billing/models/invoice_paged.py
|
Python
|
mit
| 870
|
'''OpenGL extension PGI.vertex_hints
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_PGI_vertex_hints'
_DEPRECATED = False
GL_VERTEX_DATA_HINT_PGI = constant.Constant( 'GL_VERTEX_DATA_HINT_PGI', 0x1A22A )
GL_VERTEX_CONSISTENT_HINT_PGI = constant.Constant( 'GL_VERTEX_CONSISTENT_HINT_PGI', 0x1A22B )
GL_MATERIAL_SIDE_HINT_PGI = constant.Constant( 'GL_MATERIAL_SIDE_HINT_PGI', 0x1A22C )
GL_MAX_VERTEX_HINT_PGI = constant.Constant( 'GL_MAX_VERTEX_HINT_PGI', 0x1A22D )
GL_COLOR3_BIT_PGI = constant.Constant( 'GL_COLOR3_BIT_PGI', 0x10000 )
GL_COLOR4_BIT_PGI = constant.Constant( 'GL_COLOR4_BIT_PGI', 0x20000 )
GL_EDGEFLAG_BIT_PGI = constant.Constant( 'GL_EDGEFLAG_BIT_PGI', 0x40000 )
GL_INDEX_BIT_PGI = constant.Constant( 'GL_INDEX_BIT_PGI', 0x80000 )
GL_MAT_AMBIENT_BIT_PGI = constant.Constant( 'GL_MAT_AMBIENT_BIT_PGI', 0x100000 )
GL_MAT_AMBIENT_AND_DIFFUSE_BIT_PGI = constant.Constant( 'GL_MAT_AMBIENT_AND_DIFFUSE_BIT_PGI', 0x200000 )
GL_MAT_DIFFUSE_BIT_PGI = constant.Constant( 'GL_MAT_DIFFUSE_BIT_PGI', 0x400000 )
GL_MAT_EMISSION_BIT_PGI = constant.Constant( 'GL_MAT_EMISSION_BIT_PGI', 0x800000 )
GL_MAT_COLOR_INDEXES_BIT_PGI = constant.Constant( 'GL_MAT_COLOR_INDEXES_BIT_PGI', 0x1000000 )
GL_MAT_SHININESS_BIT_PGI = constant.Constant( 'GL_MAT_SHININESS_BIT_PGI', 0x2000000 )
GL_MAT_SPECULAR_BIT_PGI = constant.Constant( 'GL_MAT_SPECULAR_BIT_PGI', 0x4000000 )
GL_NORMAL_BIT_PGI = constant.Constant( 'GL_NORMAL_BIT_PGI', 0x8000000 )
GL_TEXCOORD1_BIT_PGI = constant.Constant( 'GL_TEXCOORD1_BIT_PGI', 0x10000000 )
GL_TEXCOORD2_BIT_PGI = constant.Constant( 'GL_TEXCOORD2_BIT_PGI', 0x20000000 )
GL_TEXCOORD3_BIT_PGI = constant.Constant( 'GL_TEXCOORD3_BIT_PGI', 0x40000000 )
GL_TEXCOORD4_BIT_PGI = constant.Constant( 'GL_TEXCOORD4_BIT_PGI', 0x80000000 )
GL_VERTEX23_BIT_PGI = constant.Constant( 'GL_VERTEX23_BIT_PGI', 0x4 )
GL_VERTEX4_BIT_PGI = constant.Constant( 'GL_VERTEX4_BIT_PGI', 0x8 )
def glInitVertexHintsPGI():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
D4wN/brickv
|
src/build_data/windows/OpenGL/raw/GL/PGI/vertex_hints.py
|
Python
|
gpl-2.0
| 2,221
|
from builtins import range
from idc import Byte, SegEnd
from idautils import Segments
from idaapi import is_mapped
from miasm.core.utils import int_to_byte
from miasm.core.bin_stream import bin_stream_str
class bin_stream_ida(bin_stream_str):
"""
bin_stream implementation for IDA
Don't generate xrange using address computation:
It can raise error on overflow 7FFFFFFF with 32 bit python
"""
def _getbytes(self, start, l=1):
out = []
for ad in range(l):
offset = ad + start + self.base_address
if not is_mapped(offset):
raise IOError("not enough bytes")
out.append(int_to_byte(Byte(offset)))
return b''.join(out)
def readbs(self, l=1):
if self.offset + l > self.l:
raise IOError("not enough bytes")
content = self.getbytes(self.offset)
self.offset += l
return content
def __str__(self):
raise NotImplementedError('Not fully functional')
def setoffset(self, val):
self.offset = val
def getlen(self):
# Lazy version
if hasattr(self, "_getlen"):
return self._getlen
max_addr = SegEnd(list(Segments())[-1] - (self.offset - self.base_address))
self._getlen = max_addr
return max_addr
|
mrphrazer/miasm
|
miasm/core/bin_stream_ida.py
|
Python
|
gpl-2.0
| 1,317
|
import os
import sys
import getpass
import select
import subprocess as proc
try:
import json
except ImportError:
import simplejson as json
_input = None
# read stdin, if there's anything to read
_stdin_data = {}
while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
line = sys.stdin.readline()
if line:
d = line.split(':', 1)
if len(d) == 2:
_stdin_data[d[0].strip()] = d[1].strip()
else:
break
def decode_utf8(s):
"""
Convert the given byte sequence to a
utf8 string.
"""
if s is None or isinstance(s, str):
return s
return s.decode('utf-8', 'ignore')
def host():
return os.uname()[1]
def get_input():
global _input
if _input is None:
_input = json.load(open('./script.input'))
return _input
def parameters():
return get_input()[0]
def param(name):
return parameters().get(name)
def output(step_idx):
if step_idx < len(get_input()):
return get_input()[step_idx]
return {}
def exit_fail(msg):
print(msg, file=sys.stderr)
sys.exit(1)
def exit_ok(data):
print(json.dumps(data))
sys.exit(0)
def is_true(s):
if s in (True, False):
return s
return isinstance(s, str) and s.lower() in ('yes', 'true', '1', 'on')
_debug_enabled = None
def debug_enabled():
global _debug_enabled
if _debug_enabled is None:
_debug_enabled = is_true(param('debug'))
return _debug_enabled
def info(msg):
"writes msg to log"
with open('./crm_script.debug', 'a') as dbglog:
dbglog.write('%s' % (msg))
def debug(msg):
"writes msg to log and syslog if debug is enabled"
if debug_enabled():
try:
with open('./crm_script.debug', 'a') as dbglog:
dbglog.write('%s\n' % (msg))
import syslog
syslog.openlog("crmsh", 0, syslog.LOG_USER)
syslog.syslog(syslog.LOG_NOTICE, str(msg).encode('utf8'))
except:
pass
def call(cmd, shell=False):
debug("crm_script(call): %s" % (cmd))
p = proc.Popen(cmd, shell=shell, stdin=None, stdout=proc.PIPE, stderr=proc.PIPE)
out, err = p.communicate()
return p.returncode, decode_utf8(out).strip(), decode_utf8(err).strip()
def use_sudo():
return getpass.getuser() != 'root' and is_true(param('sudo')) and _stdin_data.get('sudo')
def sudo_call(cmd, shell=False):
if not use_sudo():
return call(cmd, shell=shell)
debug("crm_script(sudo_call): %s" % (cmd))
os.unsetenv('SSH_ASKPASS')
call(['sudo', '-k'], shell=False)
sudo_prompt = 'crm_script_sudo_prompt'
if isinstance(cmd, str):
cmd = "sudo -H -S -p '%s' %s" % (sudo_prompt, cmd)
else:
cmd = ['sudo', '-H', '-S', '-p', sudo_prompt] + cmd
p = proc.Popen(cmd, shell=shell, stdin=proc.PIPE, stdout=proc.PIPE, stderr=proc.PIPE)
sudo_pass = "%s\n" % (_stdin_data.get('sudo', 'linux'))
debug("CMD(SUDO): %s" % (str(cmd)))
out, err = p.communicate(input=sudo_pass)
return p.returncode, out.strip(), err.strip()
def service(name, action):
if action.startswith('is-'):
return call(['/usr/bin/systemctl', action, name + '.service'])
return sudo_call(['/usr/bin/systemctl', action, name + '.service'])
def package(name, state):
rc, out, err = sudo_call(['./crm_pkg.py', '-n', name, '-s', state])
if rc != 0:
raise IOError("%s / %s" % (out, err))
outp = json.loads(decode_utf8(out))
if isinstance(outp, dict) and 'rc' in outp:
rc = int(outp['rc'])
if rc != 0:
raise IOError("(rc=%s) %s%s" % (rc, outp.get('stdout', ''), outp.get('stderr', '')))
return outp
def check_package(name, state):
rc, out, err = call(['./crm_pkg.py', '--dry-run', '-n', name, '-s', state])
if rc != 0:
raise IOError(err)
outp = json.loads(out)
if isinstance(outp, dict) and 'rc' in outp:
rc = int(outp['rc'])
if rc != 0:
raise IOError("(rc=%s) %s%s" % (rc, outp.get('stdout', ''), outp.get('stderr', '')))
return outp
def rpmcheck(names):
rc, out, err = call(['./crm_rpmcheck.py'] + names)
if rc != 0:
raise IOError(err)
return json.loads(out)
def save_template(template, dest, **kwargs):
'''
1. Reads a template from <template>,
2. Replaces all template variables with those in <kwargs> and
3. writes the resulting file to <dest>
'''
import re
tmpl = open(template).read()
keys = re.findall(r'%\((\w+)\)s', tmpl, re.MULTILINE)
missing_keys = set(keys) - set(kwargs.keys())
if missing_keys:
raise ValueError("Missing template arguments: %s" % ', '.join(missing_keys))
tmpl = tmpl % kwargs
try:
with open(dest, 'w') as f:
f.write(tmpl)
except Exception as e:
raise IOError("Failed to write %s from template %s: %s" % (dest, template, e))
debug("crm_script(save_template): wrote %s" % (dest))
|
ClusterLabs/crmsh
|
utils/crm_script.py
|
Python
|
gpl-2.0
| 4,984
|
'''OpenGL extension NV.texture_shader3
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_NV_texture_shader3'
_DEPRECATED = False
GL_OFFSET_PROJECTIVE_TEXTURE_2D_NV = constant.Constant( 'GL_OFFSET_PROJECTIVE_TEXTURE_2D_NV', 0x8850 )
GL_OFFSET_PROJECTIVE_TEXTURE_2D_SCALE_NV = constant.Constant( 'GL_OFFSET_PROJECTIVE_TEXTURE_2D_SCALE_NV', 0x8851 )
GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_NV = constant.Constant( 'GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_NV', 0x8852 )
GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_SCALE_NV = constant.Constant( 'GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_SCALE_NV', 0x8853 )
GL_OFFSET_HILO_TEXTURE_2D_NV = constant.Constant( 'GL_OFFSET_HILO_TEXTURE_2D_NV', 0x8854 )
GL_OFFSET_HILO_TEXTURE_RECTANGLE_NV = constant.Constant( 'GL_OFFSET_HILO_TEXTURE_RECTANGLE_NV', 0x8855 )
GL_OFFSET_HILO_PROJECTIVE_TEXTURE_2D_NV = constant.Constant( 'GL_OFFSET_HILO_PROJECTIVE_TEXTURE_2D_NV', 0x8856 )
GL_OFFSET_HILO_PROJECTIVE_TEXTURE_RECTANGLE_NV = constant.Constant( 'GL_OFFSET_HILO_PROJECTIVE_TEXTURE_RECTANGLE_NV', 0x8857 )
GL_DEPENDENT_HILO_TEXTURE_2D_NV = constant.Constant( 'GL_DEPENDENT_HILO_TEXTURE_2D_NV', 0x8858 )
GL_DEPENDENT_RGB_TEXTURE_3D_NV = constant.Constant( 'GL_DEPENDENT_RGB_TEXTURE_3D_NV', 0x8859 )
GL_DEPENDENT_RGB_TEXTURE_CUBE_MAP_NV = constant.Constant( 'GL_DEPENDENT_RGB_TEXTURE_CUBE_MAP_NV', 0x885A )
GL_DOT_PRODUCT_PASS_THROUGH_NV = constant.Constant( 'GL_DOT_PRODUCT_PASS_THROUGH_NV', 0x885B )
GL_DOT_PRODUCT_TEXTURE_1D_NV = constant.Constant( 'GL_DOT_PRODUCT_TEXTURE_1D_NV', 0x885C )
GL_DOT_PRODUCT_AFFINE_DEPTH_REPLACE_NV = constant.Constant( 'GL_DOT_PRODUCT_AFFINE_DEPTH_REPLACE_NV', 0x885D )
GL_HILO8_NV = constant.Constant( 'GL_HILO8_NV', 0x885E )
GL_SIGNED_HILO8_NV = constant.Constant( 'GL_SIGNED_HILO8_NV', 0x885F )
GL_FORCE_BLUE_TO_ONE_NV = constant.Constant( 'GL_FORCE_BLUE_TO_ONE_NV', 0x8860 )
def glInitTextureShader3NV():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
D4wN/brickv
|
src/build_data/windows/OpenGL/raw/GL/NV/texture_shader3.py
|
Python
|
gpl-2.0
| 2,168
|
# -*- coding:utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.search_engine import get_record
from invenio.bibrecord import record_get_field_instances
from invenio.bibindex_tokenizers.BibIndexMultiFieldTokenizer import BibIndexMultiFieldTokenizer
class BibIndexFilteringTokenizer(BibIndexMultiFieldTokenizer):
"""
This tokenizer would tokenize phrases from tag
only if another tag was present in the record's metadata,
for example it would tokenize phrases from 100__a
only if 100__u was found in the record's metadata.
This tokenizer is abstract and it shouldn't be used
for indexes. Insted of using this tokenizer one can
create another tokenizer iheriting after this one.
To create new tokenizer based on BibIndexFilteringTokenizer
you need to specify rules of tokenizing in self.rules
property.
Examples:
1) Let's say we want to tokenize data only from 100__a if 100__u is present:
set: self.rules = (('100__a', 'u', ''),)
2) We want to tokenize data from '0247_a' if '0247_2' == 'DOI':
set: self.rules = (('0247_2', '2', 'DOI'),)
3) We want to tokenize data from '0247_a' if '0247_2' == 'DOI' and all data
from '100__a' with no constraints:
set: self.rules = (('0247_2', '2', 'DOI'), ('100__a', '', ''))
Definition of 'rules' tuple:
(tag_to_take_phrases_from, value_of_sub_tag or '', necessary_value_of_sub_tag or '')
Note: there is no get_tokenizing_function() to make this tokenizer abstract.
"""
def __init__(self, stemming_language=None, remove_stopwords=False, remove_html_markup=False, remove_latex_markup=False):
self.rules = ()
def tokenize(self, recID):
phrases = []
try:
rec = get_record(recID)
for rule in self.rules:
tag_to_index, necessary_tag, necessary_value = rule
core_tag = tag_to_index[0:3]
ind = tag_to_index[3:5]
sub_tag = tag_to_index[5]
fields = [dict(instance[0]) for instance in record_get_field_instances(rec, core_tag, ind[0], ind[1])]
for field in fields:
tag_condition = necessary_tag and field.has_key(necessary_tag) or necessary_tag == ''
value_condition = necessary_value and field.get(necessary_tag, '') == necessary_value or \
necessary_value == ''
if tag_condition and field.has_key(sub_tag) and value_condition:
phrases.append(field[sub_tag])
return phrases
except KeyError:
return []
return phrases
def tokenize_via_recjson(self, recID):
"""
TODO: implementation needs to be introduced
in order to work with non-marc standards.
"""
return []
|
CERNDocumentServer/invenio
|
modules/bibindex/lib/tokenizers/BibIndexFilteringTokenizer.py
|
Python
|
gpl-2.0
| 3,671
|
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2009 Douglas S. Blank <doug.blank@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Clears gramps data
"""
import os
os.environ["DJANGO_SETTINGS_MODULE"] = "settings"
import settings
from .grampsdb.models import models as dj
dj.clear_tables("primary", "secondary", "ref", "system")
|
pmghalvorsen/gramps_branch
|
gramps/webapp/init_gramps.py
|
Python
|
gpl-2.0
| 1,033
|
from PyQt4.QtCore import Qt, QPoint, QObject, pyqtSignal
from PyQt4.QtGui import QWidget, QVBoxLayout, QSizePolicy, QFrame, QColor, QLabel
class ErrorPopup(QWidget):
error_template = ("<html>"
"<table style='background-color: #ffdfdf;'width='100%%'>"
"<tr><td style='font-weight: bold; padding-left: 5px;'>Warning:</td></tr>"
"%s"
"</table>"
"</html>")
def __init__(self):
QWidget.__init__(self, None, Qt.ToolTip)
self.resize(300, 50)
self.setContentsMargins(0, 0, 0, 0)
layout = QVBoxLayout()
layout.setMargin(0)
self._error_widget = QLabel("")
self._error_widget.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum)
self._error_widget.setFrameStyle(QFrame.Box)
self._error_widget.setWordWrap(True)
self._error_widget.setScaledContents(True)
# self.warning_widget.setAlignment(Qt.AlignHCenter)
self._error_widget.setTextFormat(Qt.RichText)
layout.addWidget(self._error_widget)
self.setLayout(layout)
def presentError(self, widget, error):
assert isinstance(widget, QWidget)
self._error_widget.setText(ErrorPopup.error_template % error)
self.show()
size_hint = self.sizeHint()
rect = widget.rect()
p = widget.mapToGlobal(QPoint(rect.left(), rect.top()))
self.setGeometry(p.x(), p.y() - size_hint.height() - 5, size_hint.width(), size_hint.height())
self.raise_()
class ValidationSupport(QObject):
STRONG_ERROR_COLOR = QColor(255, 215, 215)
ERROR_COLOR = QColor(255, 235, 235)
INVALID_COLOR = QColor(235, 235, 255)
WARNING = "warning"
EXCLAMATION = "ide/small/exclamation"
validationChanged = pyqtSignal(bool)
def __init__(self, validation_target):
""" @type validation_target: QWidget """
QObject.__init__(self)
self._validation_target = validation_target
self._validation_message = None
self._validation_type = None
self._error_popup = ErrorPopup()
self._originalEnterEvent = validation_target.enterEvent
self._originalLeaveEvent = validation_target.leaveEvent
self._originalHideEvent = validation_target.hideEvent
def enterEvent(event):
self._originalEnterEvent(event)
if not self.isValid():
self._error_popup.presentError(self._validation_target, self._validation_message)
validation_target.enterEvent = enterEvent
def leaveEvent(event):
self._originalLeaveEvent(event)
if self._error_popup is not None:
self._error_popup.hide()
validation_target.leaveEvent = leaveEvent
def hideEvent(hide_event):
self._error_popup.hide()
self._originalHideEvent(hide_event)
validation_target.hideEvent = hideEvent
def setValidationMessage(self, message, validation_type=WARNING):
"""Add a warning or information icon to the widget with a tooltip"""
message = message.strip()
if message == "":
self._validation_type = None
self._validation_message = None
self._error_popup.hide()
self.validationChanged.emit(True)
else:
self._validation_type = validation_type
self._validation_message = message
if self._validation_target.hasFocus() or self._validation_target.underMouse():
self._error_popup.presentError(self._validation_target, self._validation_message)
self.validationChanged.emit(False)
def isValid(self):
return self._validation_message is None
|
arielalmendral/ert
|
python/python/ert_gui/ertwidgets/validationsupport.py
|
Python
|
gpl-3.0
| 3,782
|
# This file is part of pybootchartgui.
# pybootchartgui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pybootchartgui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
import gobject
import gtk
import gtk.gdk
import gtk.keysyms
import draw
class PyBootchartWidget(gtk.DrawingArea):
__gsignals__ = {
'expose-event': 'override',
'clicked' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING, gtk.gdk.Event)),
'position-changed' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT, gobject.TYPE_INT)),
'set-scroll-adjustments' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gtk.Adjustment, gtk.Adjustment))
}
def __init__(self, res, options):
gtk.DrawingArea.__init__(self)
self.res = res
self.options = options
self.set_flags(gtk.CAN_FOCUS)
self.add_events(gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("button-press-event", self.on_area_button_press)
self.connect("button-release-event", self.on_area_button_release)
self.add_events(gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("motion-notify-event", self.on_area_motion_notify)
self.connect("scroll-event", self.on_area_scroll_event)
self.connect('key-press-event', self.on_key_press_event)
self.connect('set-scroll-adjustments', self.on_set_scroll_adjustments)
self.connect("size-allocate", self.on_allocation_size_changed)
self.connect("position-changed", self.on_position_changed)
self.zoom_ratio = 1.0
self.x, self.y = 0.0, 0.0
self.chart_width, self.chart_height = draw.extents(*res)
self.hadj = None
self.vadj = None
def do_expose_event(self, event):
cr = self.window.cairo_create()
# set a clip region for the expose event
cr.rectangle(
event.area.x, event.area.y,
event.area.width, event.area.height
)
cr.clip()
self.draw(cr, self.get_allocation())
return False
def draw(self, cr, rect):
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.paint()
cr.scale(self.zoom_ratio, self.zoom_ratio)
cr.translate(-self.x, -self.y)
draw.render(cr, self.options, *self.res)
def position_changed(self):
self.emit("position-changed", self.x, self.y)
ZOOM_INCREMENT = 1.25
def zoom_image(self, zoom_ratio):
self.zoom_ratio = zoom_ratio
self._set_scroll_adjustments(self.hadj, self.vadj)
self.queue_draw()
def zoom_to_rect(self, rect):
zoom_ratio = float(rect.width)/float(self.chart_width)
self.zoom_image(zoom_ratio)
self.x = 0
self.position_changed()
def on_zoom_in(self, action):
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
def on_zoom_out(self, action):
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
def on_zoom_fit(self, action):
self.zoom_to_rect(self.get_allocation())
def on_zoom_100(self, action):
self.zoom_image(1.0)
POS_INCREMENT = 100
def on_key_press_event(self, widget, event):
if event.keyval == gtk.keysyms.Left:
self.x -= self.POS_INCREMENT/self.zoom_ratio
elif event.keyval == gtk.keysyms.Right:
self.x += self.POS_INCREMENT/self.zoom_ratio
elif event.keyval == gtk.keysyms.Up:
self.y -= self.POS_INCREMENT/self.zoom_ratio
elif event.keyval == gtk.keysyms.Down:
self.y += self.POS_INCREMENT/self.zoom_ratio
elif event.keyval == gtk.keysyms.Page_Up:
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
elif event.keyval == gtk.keysyms.Page_Down:
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
else:
return False
self.queue_draw()
self.position_changed()
return True
def on_area_button_press(self, area, event):
if event.button == 2 or event.button == 1:
area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
self.prevmousex = event.x
self.prevmousey = event.y
if event.type not in (gtk.gdk.BUTTON_PRESS, gtk.gdk.BUTTON_RELEASE):
return False
return False
def on_area_button_release(self, area, event):
if event.button == 2 or event.button == 1:
area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
self.prevmousex = None
self.prevmousey = None
return True
return False
def on_area_scroll_event(self, area, event):
if event.state & gtk.gdk.CONTROL_MASK:
if event.direction == gtk.gdk.SCROLL_UP:
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
return True
if event.direction == gtk.gdk.SCROLL_DOWN:
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
return True
return False
def on_area_motion_notify(self, area, event):
state = event.state
if state & gtk.gdk.BUTTON2_MASK or state & gtk.gdk.BUTTON1_MASK:
x, y = int(event.x), int(event.y)
# pan the image
self.x += (self.prevmousex - x)/self.zoom_ratio
self.y += (self.prevmousey - y)/self.zoom_ratio
self.queue_draw()
self.prevmousex = x
self.prevmousey = y
self.position_changed()
return True
def on_set_scroll_adjustments(self, area, hadj, vadj):
self._set_scroll_adjustments(hadj, vadj)
def on_allocation_size_changed(self, widget, allocation):
self.hadj.page_size = allocation.width
self.hadj.page_increment = allocation.width * 0.9
self.vadj.page_size = allocation.height
self.vadj.page_increment = allocation.height * 0.9
def _set_scroll_adjustments(self, hadj, vadj):
if hadj == None:
hadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
if vadj == None:
vadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
if self.hadj != None and hadj != self.hadj:
self.hadj.disconnect(self.hadj_changed_signal_id)
if self.vadj != None and vadj != self.vadj:
self.vadj.disconnect(self.vadj_changed_signal_id)
if hadj != None:
self.hadj = hadj
self._set_adj_upper(self.hadj, self.zoom_ratio * self.chart_width)
self.hadj_changed_signal_id = self.hadj.connect('value-changed', self.on_adjustments_changed)
if vadj != None:
self.vadj = vadj
self._set_adj_upper(self.vadj, self.zoom_ratio * self.chart_height)
self.vadj_changed_signal_id = self.vadj.connect('value-changed', self.on_adjustments_changed)
def _set_adj_upper(self, adj, upper):
changed = False
value_changed = False
if adj.upper != upper:
adj.upper = upper
changed = True
max_value = max(0.0, upper - adj.page_size)
if adj.value > max_value:
adj.value = max_value
value_changed = True
if changed:
adj.changed()
if value_changed:
adj.value_changed()
def on_adjustments_changed(self, adj):
self.x = self.hadj.value / self.zoom_ratio
self.y = self.vadj.value / self.zoom_ratio
self.queue_draw()
def on_position_changed(self, widget, x, y):
self.hadj.value = x * self.zoom_ratio
self.vadj.value = y * self.zoom_ratio
PyBootchartWidget.set_set_scroll_adjustments_signal('set-scroll-adjustments')
class PyBootchartWindow(gtk.Window):
ui = '''
<ui>
<toolbar name="ToolBar">
<toolitem action="ZoomIn"/>
<toolitem action="ZoomOut"/>
<toolitem action="ZoomFit"/>
<toolitem action="Zoom100"/>
</toolbar>
</ui>
'''
def __init__(self, res, options):
gtk.Window.__init__(self)
window = self
window.set_title('Bootchart')
window.set_default_size(512, 512)
vbox = gtk.VBox()
window.add(vbox)
self.widget = PyBootchartWidget(res, options)
# Create a UIManager instance
uimanager = self.uimanager = gtk.UIManager()
# Add the accelerator group to the toplevel window
accelgroup = uimanager.get_accel_group()
window.add_accel_group(accelgroup)
# Create an ActionGroup
actiongroup = gtk.ActionGroup('Actions')
self.actiongroup = actiongroup
# Create actions
actiongroup.add_actions((
('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget.on_zoom_in),
('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget.on_zoom_out),
('ZoomFit', gtk.STOCK_ZOOM_FIT, 'Fit Width', None, None, self.widget.on_zoom_fit),
('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget.on_zoom_100),
))
# Add the actiongroup to the uimanager
uimanager.insert_action_group(actiongroup, 0)
# Add a UI description
uimanager.add_ui_from_string(self.ui)
# Scrolled window
scrolled = gtk.ScrolledWindow()
scrolled.add(self.widget)
# Create a Toolbar
toolbar = uimanager.get_widget('/ToolBar')
vbox.pack_start(toolbar, False)
vbox.pack_start(scrolled)
self.set_focus(self.widget)
self.show_all()
def show(res, options):
win = PyBootchartWindow(res, options)
win.connect('destroy', gtk.main_quit)
gtk.main()
|
Alwnikrotikz/pybootchartgui
|
pybootchartgui/gui.py
|
Python
|
gpl-3.0
| 10,060
|
from twisted.internet import defer
from twisted.trial import unittest
from twisted.trial import runner, reporter, util
from twisted.trial.test import detests
class TestSetUp(unittest.TestCase):
def _loadSuite(self, klass):
loader = runner.TestLoader()
r = reporter.TestResult()
s = loader.loadClass(klass)
return r, s
def test_success(self):
result, suite = self._loadSuite(detests.DeferredSetUpOK)
suite(result)
self.failUnless(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
def test_fail(self):
self.failIf(detests.DeferredSetUpFail.testCalled)
result, suite = self._loadSuite(detests.DeferredSetUpFail)
suite(result)
self.failIf(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
self.failUnlessEqual(len(result.failures), 0)
self.failUnlessEqual(len(result.errors), 1)
self.failIf(detests.DeferredSetUpFail.testCalled)
def test_callbackFail(self):
self.failIf(detests.DeferredSetUpCallbackFail.testCalled)
result, suite = self._loadSuite(detests.DeferredSetUpCallbackFail)
suite(result)
self.failIf(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
self.failUnlessEqual(len(result.failures), 0)
self.failUnlessEqual(len(result.errors), 1)
self.failIf(detests.DeferredSetUpCallbackFail.testCalled)
def test_error(self):
self.failIf(detests.DeferredSetUpError.testCalled)
result, suite = self._loadSuite(detests.DeferredSetUpError)
suite(result)
self.failIf(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
self.failUnlessEqual(len(result.failures), 0)
self.failUnlessEqual(len(result.errors), 1)
self.failIf(detests.DeferredSetUpError.testCalled)
def test_skip(self):
self.failIf(detests.DeferredSetUpSkip.testCalled)
result, suite = self._loadSuite(detests.DeferredSetUpSkip)
suite(result)
self.failUnless(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
self.failUnlessEqual(len(result.failures), 0)
self.failUnlessEqual(len(result.errors), 0)
self.failUnlessEqual(len(result.skips), 1)
self.failIf(detests.DeferredSetUpSkip.testCalled)
class TestNeverFire(unittest.TestCase):
def setUp(self):
self._oldTimeout = util.DEFAULT_TIMEOUT_DURATION
util.DEFAULT_TIMEOUT_DURATION = 0.1
def tearDown(self):
util.DEFAULT_TIMEOUT_DURATION = self._oldTimeout
def _loadSuite(self, klass):
loader = runner.TestLoader()
r = reporter.TestResult()
s = loader.loadClass(klass)
return r, s
def test_setUp(self):
self.failIf(detests.DeferredSetUpNeverFire.testCalled)
result, suite = self._loadSuite(detests.DeferredSetUpNeverFire)
suite(result)
self.failIf(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
self.failUnlessEqual(len(result.failures), 0)
self.failUnlessEqual(len(result.errors), 1)
self.failIf(detests.DeferredSetUpNeverFire.testCalled)
self.failUnless(result.errors[0][1].check(defer.TimeoutError))
class TestTester(unittest.TestCase):
def getTest(self, name):
raise NotImplementedError("must override me")
def runTest(self, name):
result = reporter.TestResult()
self.getTest(name).run(result)
return result
class TestDeferred(TestTester):
def getTest(self, name):
return detests.DeferredTests(name)
def test_pass(self):
result = self.runTest('test_pass')
self.failUnless(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
def test_passGenerated(self):
result = self.runTest('test_passGenerated')
self.failUnless(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
self.failUnless(detests.DeferredTests.touched)
def test_fail(self):
result = self.runTest('test_fail')
self.failIf(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
self.failUnlessEqual(len(result.failures), 1)
def test_failureInCallback(self):
result = self.runTest('test_failureInCallback')
self.failIf(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
self.failUnlessEqual(len(result.failures), 1)
def test_errorInCallback(self):
result = self.runTest('test_errorInCallback')
self.failIf(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
self.failUnlessEqual(len(result.errors), 1)
def test_skip(self):
result = self.runTest('test_skip')
self.failUnless(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
self.failUnlessEqual(len(result.skips), 1)
self.failIf(detests.DeferredTests.touched)
def test_todo(self):
result = self.runTest('test_expectedFailure')
self.failUnless(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
self.failUnlessEqual(len(result.errors), 0)
self.failUnlessEqual(len(result.failures), 0)
self.failUnlessEqual(len(result.expectedFailures), 1)
def test_thread(self):
result = self.runTest('test_thread')
self.failUnlessEqual(result.testsRun, 1)
self.failUnless(result.wasSuccessful(), result.errors)
class TestTimeout(TestTester):
def getTest(self, name):
return detests.TimeoutTests(name)
def _wasTimeout(self, error):
self.failUnlessEqual(error.check(defer.TimeoutError),
defer.TimeoutError)
def test_pass(self):
result = self.runTest('test_pass')
self.failUnless(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
def test_passDefault(self):
result = self.runTest('test_passDefault')
self.failUnless(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
def test_timeout(self):
result = self.runTest('test_timeout')
self.failIf(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
self.failUnlessEqual(len(result.errors), 1)
self._wasTimeout(result.errors[0][1])
def test_timeoutZero(self):
result = self.runTest('test_timeoutZero')
self.failIf(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
self.failUnlessEqual(len(result.errors), 1)
self._wasTimeout(result.errors[0][1])
def test_skip(self):
result = self.runTest('test_skip')
self.failUnless(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
self.failUnlessEqual(len(result.skips), 1)
def test_todo(self):
result = self.runTest('test_expectedFailure')
self.failUnless(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
self.failUnlessEqual(len(result.expectedFailures), 1)
self._wasTimeout(result.expectedFailures[0][1])
def test_errorPropagation(self):
result = self.runTest('test_errorPropagation')
self.failIf(result.wasSuccessful())
self.failUnlessEqual(result.testsRun, 1)
self._wasTimeout(detests.TimeoutTests.timedOut)
def test_classTimeout(self):
loader = runner.TestLoader()
suite = loader.loadClass(detests.TestClassTimeoutAttribute)
result = reporter.TestResult()
suite.run(result)
self.failUnlessEqual(len(result.errors), 1)
self._wasTimeout(result.errors[0][1])
def test_callbackReturnsNonCallingDeferred(self):
#hacky timeout
# raises KeyboardInterrupt because Trial sucks
from twisted.internet import reactor
call = reactor.callLater(2, reactor.crash)
result = self.runTest('test_calledButNeverCallback')
if call.active():
call.cancel()
self.failIf(result.wasSuccessful())
self.failUnlessEqual(len(result.errors), 1)
self._wasTimeout(result.errors[0][1])
|
kenorb-contrib/BitTorrent
|
twisted/trial/test/test_deferred.py
|
Python
|
gpl-3.0
| 8,291
|
# -*- coding: utf-8 -*-
from outwiker.gui.baseaction import BaseAction
class CloseTabAction (BaseAction):
"""
Закрыть текущую вкладку
"""
stringId = u"CloseTab"
def __init__(self, application):
self._application = application
@property
def title(self):
return _(u"Close Tab")
@property
def description(self):
return _(u"Close current tab")
def run(self, params):
assert self._application.mainWindow is not None
index = self._application.mainWindow.tabsController.getSelection()
if index != -1:
self._application.mainWindow.tabsController.closeTab(index)
class AddTabAction (BaseAction):
"""
Добавить вкладку
"""
stringId = u"AddTab"
def __init__(self, application):
self._application = application
@property
def title(self):
return _(u"Add Tab")
@property
def description(self):
return _(u"Add tab")
def run(self, params):
assert self._application.mainWindow is not None
self._application.mainWindow.tabsController.cloneTab()
class NextTabAction (BaseAction):
"""
Перейти на следующую вкладку
"""
stringId = u"NextTab"
def __init__(self, application):
self._application = application
@property
def title(self):
return _(u"Next Tab")
@property
def description(self):
return _(u"Go to next tab")
def run(self, params):
assert self._application.mainWindow is not None
self._application.mainWindow.tabsController.nextTab()
class PreviousTabAction (BaseAction):
"""
Перейти на предыдущую вкладку
"""
stringId = u"PreviousTab"
def __init__(self, application):
self._application = application
@property
def title(self):
return _(u"Previous Tab")
@property
def description(self):
return _(u"Go to previous tab")
def run(self, params):
assert self._application.mainWindow is not None
self._application.mainWindow.tabsController.previousTab()
|
unreal666/outwiker
|
src/outwiker/actions/tabs.py
|
Python
|
gpl-3.0
| 2,186
|
# This file is part of waymarkedtrails.org
# Copyright (C) 2012-2013 Espen Oldeman Lund
# Copyright (C) 2015 Sarah Hoffmann
#
# This is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from collections import OrderedDict
from math import ceil, fabs
from osgeo import gdal
import numpy
from scipy.ndimage import map_coordinates
import config.defaults
def compute_elevation(segments, bounds, outdict):
""" Takes a MultiPoint geometry and computes the elevation.
Returns an array of x, y, ele.
"""
# load the relevant elevation data
dem = Dem(config.defaults.DEM_FILE)
band_array, xmax, ymin, xmin, ymax = dem.raster_array(bounds)
del dem
ny, nx = band_array.shape
outdict['segments'] = []
ascent = 0
descent = 0
for xcoord, ycoord, pos in segments:
# Turn these into arrays of x & y coords
xi = numpy.array(xcoord, dtype=numpy.float)
yi = numpy.array(ycoord, dtype=numpy.float)
# Now, we'll set points outside the boundaries to lie along an edge
xi[xi > xmax] = xmax
xi[xi < xmin] = xmin
yi[yi > ymax] = ymax
yi[yi < ymin] = ymin
# We need to convert these to (float) indicies
# (xi should range from 0 to (nx - 1), etc)
xi = (nx - 1) * (xi - xmin) / (xmax - xmin)
yi = -(ny - 1) * (yi - ymax) / (ymax - ymin)
# Interpolate elevation values
# map_coordinates does cubic interpolation by default,
# use "order=1" to preform bilinear interpolation
mapped = map_coordinates(band_array, [yi, xi], order=1)
elev = smooth_list(map_coordinates(band_array, [yi, xi], order=1))
a, d = compute_ascent(elev)
ascent += a
descent += d
elepoints = []
for x, y, ele, p in zip(xcoord, ycoord, elev, pos):
info = OrderedDict()
info['x'] = x
info['y'] = y
info['ele'] = float(ele)
info['pos'] = p
elepoints.append(info)
outdict['segments'].append({'elevation' : elepoints})
outdict['ascent'] = ascent
outdict['descent'] = descent
def round_elevation(x, base=config.defaults.DEM_ROUNDING):
return int(base * round(float(x)/base))
def compute_ascent(elev):
""" Calculate accumulated ascent and descent.
Slightly complicated by the fact that we have to jump over voids.
"""
accuracy = config.defaults.DEM_ACCURACY
formerHeight = None
firstvalid = None
lastvalid = None
accumulatedAscent = 0
for x in range (1, len(elev)-1):
currentHeight = elev[x]
if not numpy.isnan(currentHeight):
lastvalid = currentHeight
if formerHeight is None:
formerHeight = currentHeight
firstvalid = currentHeight
else:
if (elev[x-1] < currentHeight > elev[x+1]) or \
(elev[x-1] > currentHeight < elev[x+1]):
diff = currentHeight-formerHeight
if fabs(diff) > accuracy:
if diff > accuracy:
accumulatedAscent += diff
formerHeight = currentHeight
if lastvalid is None:
# looks like the route is completely within a void
return 0, 0
# collect the final point
diff = lastvalid - formerHeight
if diff > accuracy:
accumulatedAscent += diff
# ascent, descent
return round_elevation(accumulatedAscent), round_elevation(accumulatedAscent - (lastvalid - firstvalid))
class Dem(object):
def __init__(self, src):
self.source = gdal.Open(src)
self.transform = self.source.GetGeoTransform()
def raster_array(self, bbox):
# Calculate pixel coordinates (rounding always toward the outside)
ulx, uly = (int(x) for x in self.geo_to_pixel(bbox[0], bbox[3]))
lrx, lry = (int(ceil(x)) for x in self.geo_to_pixel(bbox[2], bbox[1]))
# Get rasterarray
band_array = self.source.GetRasterBand(1).ReadAsArray(ulx, uly,
lrx - ulx + 1,
lry - uly + 1)
# compute true boundaries (after rounding) of raster array
xmax, ymax = self.pixel_to_geo(ulx, uly)
xmin, ymin = self.pixel_to_geo(lrx, lry)
return band_array, xmin, ymin, xmax, ymax
def geo_to_pixel(self, x, y):
g0, g1, g2, g3, g4, g5 = self.transform
if g2 == 0:
xPixel = (x - g0) / float(g1)
yPixel = (y - g3 - xPixel*g4) / float(g5)
else:
xPixel = (y*g2 - x*g5 + g0*g5 - g2*g3) / float(g2*g4 - g1*g5)
yPixel = (x - g0 - xPixel*g1) / float(g2)
return xPixel, yPixel
def pixel_to_geo(self, x, y):
g0, g1, g2, g3, g4, g5 = self.transform
if g2 == 0:
xout = x*float(g1) + g0
yout = float(g5)*y + float(g4)*(x - g0)/g1 + g3
else:
xout = g2*y + x*g1 + float(g0)
yout = (x*(float(g2*g4)-float(g1*g5)+xout*g5-g0*g5+g2*g3))/float(g2)
return xout, yout
#
# Code from http://stackoverflow.com/questions/5515720/python-smooth-time-series-data
#
def smooth_list(x,window_len=7,window='hanning'):
if len(x) <= window_len:
return x
s = numpy.r_[2*x[0] - x[window_len-1::-1], x, 2*x[-1] - x[-1:-window_len:-1]]
if window == 'flat': #moving average
w = numpy.ones(window_len,'d')
else:
w = getattr(numpy, window)(window_len)
y = numpy.convolve(w/w.sum(), s, mode='same')
return y[window_len:-window_len+1]
|
jschleic/waymarked-trails-site
|
api/elevation.py
|
Python
|
gpl-3.0
| 6,310
|
# -*- coding: utf-8 -*-
from module.plugins.internal.XFSHoster import XFSHoster, create_getInfo
class SecureUploadEu(XFSHoster):
__name__ = "SecureUploadEu"
__type__ = "hoster"
__version__ = "0.05"
__pattern__ = r'https?://(?:www\.)?secureupload\.eu/\w{12}'
__description__ = """SecureUpload.eu hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("z00nx", "z00nx0@gmail.com")]
HOSTER_DOMAIN = "secureupload.eu"
INFO_PATTERN = r'<h3>Downloading (?P<N>[^<]+) \((?P<S>[^<]+)\)</h3>'
getInfo = create_getInfo(SecureUploadEu)
|
sebdelsol/pyload
|
module/plugins/hoster/SecureUploadEu.py
|
Python
|
gpl-3.0
| 584
|
# Copyright (C) 2014 LiuLang <gsushzhsosgsu@gmail.com>
# Use of this source code is governed by GPLv3 license that can be found
# in http://www.gnu.org/licenses/gpl-3.0.html
import os
import time
from gi.repository import Gtk
from bcloud import Config
_ = Config._
from bcloud import util
from bcloud.Widgets import LeftLabel
from bcloud.Widgets import SelectableLeftLabel
(PIXBUF_COL, NAME_COL, PATH_COL, TOOLTIP_COL, SIZE_COL, HUMAN_SIZE_COL,
ISDIR_COL, MTIME_COL, HUMAN_MTIME_COL, TYPE_COL, PCS_FILE_COL) = list(
range(11))
class PropertiesDialog(Gtk.Dialog):
def __init__(self, parent, app, pcs_file):
file_path, file_name = os.path.split(pcs_file['path'])
super().__init__(file_name + _(' Properties'), app.window,
Gtk.DialogFlags.MODAL,
(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE))
self.set_default_response(Gtk.ResponseType.CLOSE)
self.set_border_width(15)
#self.set_default_size(640, 480)
box = self.get_content_area()
grid = Gtk.Grid()
grid.props.row_spacing = 8
grid.props.margin_left = 15
grid.props.column_spacing = 15
box.pack_start(grid, True, True, 10)
name_label = LeftLabel(_('Name:'))
grid.attach(name_label, 0, 0, 1, 1)
name_label2 = SelectableLeftLabel(file_name)
grid.attach(name_label2, 1, 0, 1, 1)
location_label = LeftLabel(_('Location:'))
grid.attach(location_label, 0, 2, 1, 1)
location_label2 = SelectableLeftLabel(file_path)
grid.attach(location_label2, 1, 2, 1, 1)
if pcs_file['isdir']:
pass
else:
size_label = LeftLabel(_('Size'))
grid.attach(size_label, 0, 1, 1, 1)
size_human, size_comma = util.get_human_size(pcs_file['size'])
if size_human:
size_text = ''.join([str(size_human), ' (', size_comma,
_(' bytes'), ')'])
else:
size_text = size_comma + _(' bytes')
size_label2 = SelectableLeftLabel(size_text)
grid.attach(size_label2, 1, 1, 1, 1)
md5_label = LeftLabel('MD5:')
grid.attach(md5_label, 0, 3, 1, 1)
md5_label2 = SelectableLeftLabel(pcs_file['md5'])
grid.attach(md5_label2, 1, 3, 1, 1)
id_label = LeftLabel('FS ID:')
grid.attach(id_label, 0, 4, 1, 1)
id_label2 = SelectableLeftLabel(pcs_file['fs_id'])
grid.attach(id_label2, 1, 4, 1, 1)
ctime_label = LeftLabel(_('Created:'))
grid.attach(ctime_label, 0, 5, 1, 1)
ctime_label2 = SelectableLeftLabel(time.ctime(pcs_file['server_ctime']))
grid.attach(ctime_label2, 1, 5, 1, 1)
mtime_label = LeftLabel(_('Modified:'))
grid.attach(mtime_label, 0, 6, 1, 1)
mtime_label2 = SelectableLeftLabel(time.ctime(pcs_file['server_mtime']))
grid.attach(mtime_label2, 1, 6, 1, 1)
box.show_all()
class FolderPropertyDialog(Gtk.Dialog):
def __init__(self, icon_window, app, path):
file_path, file_name = os.path.split(path)
# modify file_name if path is '/'
if not file_name:
file_name = '/'
super().__init__(file_name + _(' Properties'), app.window,
Gtk.DialogFlags.MODAL,
(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE))
self.set_border_width(15)
box = self.get_content_area()
grid = Gtk.Grid()
grid.props.row_spacing = 8
grid.props.margin_left = 15
grid.props.column_spacing = 15
box.pack_start(grid, True, True, 10)
name_label = LeftLabel(_('Name:'))
grid.attach(name_label, 0, 0, 1, 1)
name_label2 = SelectableLeftLabel(file_name)
grid.attach(name_label2, 1, 0, 1, 1)
location_label = LeftLabel(_('Location:'))
grid.attach(location_label, 0, 1, 1, 1)
location_label2 = SelectableLeftLabel(file_path)
grid.attach(location_label2, 1, 1, 1, 1)
file_count = 0
folder_count = 0
for row in icon_window.liststore:
if row[ISDIR_COL]:
folder_count = folder_count + 1
else:
file_count = file_count + 1
contents = _('{0} folders, {1} files').format(folder_count, file_count)
content_label = LeftLabel(_('Contents:'))
grid.attach(content_label, 0, 2, 1, 1)
content_label2 = SelectableLeftLabel(contents)
grid.attach(content_label2, 1, 2, 1, 1)
box.show_all()
|
ghbhaha/bcloud
|
bcloud/PropertiesDialog.py
|
Python
|
gpl-3.0
| 4,642
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.12 (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = ""
parentdir_prefix = "soapy-"
versionfile_source = "soapy/_version.py"
import os, sys, re, subprocess, errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": keywords["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = { "refnames": git_refnames, "full": git_full }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split(os.sep))):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
|
andrewpaulreeves/soapy
|
soapy/_version.py
|
Python
|
gpl-3.0
| 7,416
|
# Support for the Dublin Core metadata extensions
# Copyright 2010-2020 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from ..datetimes import _parse_date
from ..util import FeedParserDict
class Namespace(object):
supported_namespaces = {
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
}
def _end_dc_author(self):
self._end_author()
def _end_dc_creator(self):
self._end_author()
def _end_dc_date(self):
self._end_updated()
def _end_dc_description(self):
self._end_description()
def _end_dc_language(self):
self._end_language()
def _end_dc_publisher(self):
self._end_webmaster()
def _end_dc_rights(self):
self._end_rights()
def _end_dc_subject(self):
self._end_category()
def _end_dc_title(self):
self._end_title()
def _end_dcterms_created(self):
self._end_created()
def _end_dcterms_issued(self):
self._end_published()
def _end_dcterms_modified(self):
self._end_updated()
def _start_dc_author(self, attrs_d):
self._start_author(attrs_d)
def _start_dc_creator(self, attrs_d):
self._start_author(attrs_d)
def _start_dc_date(self, attrs_d):
self._start_updated(attrs_d)
def _start_dc_description(self, attrs_d):
self._start_description(attrs_d)
def _start_dc_language(self, attrs_d):
self._start_language(attrs_d)
def _start_dc_publisher(self, attrs_d):
self._start_webmaster(attrs_d)
def _start_dc_rights(self, attrs_d):
self._start_rights(attrs_d)
def _start_dc_subject(self, attrs_d):
self._start_category(attrs_d)
def _start_dc_title(self, attrs_d):
self._start_title(attrs_d)
def _start_dcterms_created(self, attrs_d):
self._start_created(attrs_d)
def _start_dcterms_issued(self, attrs_d):
self._start_published(attrs_d)
def _start_dcterms_modified(self, attrs_d):
self._start_updated(attrs_d)
def _start_dcterms_valid(self, attrs_d):
self.push('validity', 1)
def _end_dcterms_valid(self):
for validity_detail in self.pop('validity').split(';'):
if '=' in validity_detail:
key, value = validity_detail.split('=', 1)
if key == 'start':
self._save('validity_start', value, overwrite=True)
self._save('validity_start_parsed', _parse_date(value), overwrite=True)
elif key == 'end':
self._save('validity_end', value, overwrite=True)
self._save('validity_end_parsed', _parse_date(value), overwrite=True)
def _start_dc_contributor(self, attrs_d):
self.incontributor = 1
context = self._get_context()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
|
SickGear/SickGear
|
lib/feedparser_py3/namespaces/dc.py
|
Python
|
gpl-3.0
| 4,446
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Messages used in the Guide module."""
__author__ = [
'davyrisso@google.com (Davy Risso)',
]
MODULE_DESCRIPTION = """
Guide: A new learning experience module. An alternative to the default course
explorer and course experience.
"""
SITE_SETTINGS_GUIDE = """
If True, Guide will be accessible at /modules/guide.
"""
COURSE_SETTINGS_COLOR_DESCRIPTION = """
The color scheme for this course\'s guide. Must be expressed as a web color name
or hex triplet beginning with a "#".
If blank, Material Cyan 500 (#00bcd4) will be used.
"""
COURSE_SETTINGS_ENABLED_DESCRIPTION = """
If checked, this course will be included in the guides experience accessible at
/modules/guide. Course must not be Private.
"""
COURSE_SETTINGS_LESSON_DURATION_DESCRIPTION = """
Specify the average length of each lesson in the course in minutes and it will
be used to estimate the duration of each guide.
If blank or set to 0, duration will not be shown.
"""
|
GirlsCodePy/girlscode-coursebuilder
|
modules/guide/messages.py
|
Python
|
gpl-3.0
| 1,542
|
"""
Django admin dashboard configuration for LMS XBlock infrastructure.
"""
from config_models.admin import ConfigurationModelAdmin
from django.contrib import admin
from cms.djangoapps.xblock_config.models import StudioConfig
admin.site.register(StudioConfig, ConfigurationModelAdmin)
|
eduNEXT/edx-platform
|
cms/djangoapps/xblock_config/admin.py
|
Python
|
agpl-3.0
| 290
|
# -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Workout Manager. If not, see <http://www.gnu.org/licenses/>.
from wger import get_version
VERSION = get_version()
default_app_config = 'wger.email.apps.Config'
|
petervanderdoes/wger
|
wger/email/__init__.py
|
Python
|
agpl-3.0
| 839
|
sticker_list = {
'team2': u'BQADBAADGQADjGt_DbsN-MC0jtwKAg',
'team1': u'BQADBAADHQADjGt_DWh6bmUsF_GeAg',
'team3': u'BQADBAADGwADjGt_DcVn_7MprUVwAg',
'team0': u'BQADBAADHwADjGt_DWwRwTlwYq71Ag',
'pokestop': u'BQADBAADFwADjGt_DaQ-N7VAroZ7Ag',
'1': u'BQADBAADJwADA6ZnAU_NNRcf64d1Ag',
'2': u'BQADBAADKQADA6ZnAd09yoh66pIfAg',
'3': u'BQADBAADKwADA6ZnAdELYlYwzKbIAg',
'4': u'BQADBAADLQADA6ZnAYnKc2Dr3jUmAg',
'5': u'BQADBAADLwADA6ZnAch60dZ72R0AAQI',
'6': u'BQADBAADMQADA6ZnAYFZ_5qjNAytAg',
'7': u'BQADBAADMwADA6ZnAY9noVGNdiDVAg',
'8': u'BQADBAADNQADA6ZnAZpcNOcyNj1PAg',
'9': u'BQADBAADNwADA6ZnAUhWLdtsQ7q9Ag',
'10': u'BQADBAADOQADA6ZnARvXmxliihkVAg',
'11': u'BQADBAADOwADA6ZnASym7G_LJ1tkAg',
'12': u'BQADBAADPQADA6ZnAa2iOXvsY3aCAg',
'13': u'BQADBAADPwADA6ZnAb_csYpSRfRTAg',
'14': u'BQADBAADQQADA6ZnAfTUFQcCDmUNAg',
'15': u'BQADBAADQwADA6ZnAT_h8QyLW6DVAg',
'16': u'BQADBAADRQADA6ZnAVLPw-jNWf5_Ag',
'17': u'BQADBAADRwADA6ZnAdE1sU4M-3SQAg',
'18': u'BQADBAADSQADA6ZnARgu9mEkMLquAg',
'19': u'BQADBAADSwADA6ZnAV9fPY_XhW5_Ag',
'20': u'BQADBAADTQADA6ZnAZ3-Y-eQocbyAg',
'21': u'BQADBAADTwADA6ZnAfk9lxoXOwZ_Ag',
'22': u'BQADBAADUQADA6ZnAShVKm8X2wgSAg',
'23': u'BQADBAADUwADA6ZnARauEgMgbEB9Ag',
'24': u'BQADBAADVQADA6ZnARtDICyQZuaiAg',
'25': u'BQADBAADVwADA6ZnAczvFE7dfBYLAg',
'26': u'BQADBAADWQADA6ZnAc_Lh0IvZdxqAg',
'27': u'BQADBAADWwADA6ZnAXwre3efyX0kAg',
'28': u'BQADBAADXQADA6ZnAZQssQGzd6_qAg',
'29': u'BQADBAADXwADA6ZnAdb_etFcQm7QAg',
'30': u'BQADBAADYQADA6ZnAdrCKGPIRCLTAg',
'31': u'BQADBAADYwADA6ZnAQm3s1oUdXs_Ag',
'32': u'BQADBAADZQADA6ZnAfZdxDqZduP0Ag',
'33': u'BQADBAADZwADA6ZnAWtHR2JimNBEAg',
'34': u'BQADBAADaQADA6ZnAUhBuBB0KHolAg',
'35': u'BQADBAADawADA6ZnAe4Ky5ABuoufAg',
'36': u'BQADBAADbQADA6ZnAZxHQ-pwTVpeAg',
'37': u'BQADBAADbwADA6ZnAQaiwVEnAsQAAQI',
'38': u'BQADBAADcQADA6ZnATXHBNQUYTd4Ag',
'39': u'BQADBAADcwADA6ZnAWOU_YY2d7V0Ag',
'40': u'BQADBAADdQADA6ZnAdGnBQjf7dvLAg',
'41': u'BQADBAADdwADA6ZnAYIOutGsHR2iAg',
'42': u'BQADBAADeQADA6ZnAYlGQQR6zqK8Ag',
'43': u'BQADBAADewADA6ZnAYXXJSAdu6WwAg',
'44': u'BQADBAADfQADA6ZnAZltrfQww-etAg',
'45': u'BQADBAADfwADA6ZnAYtRA7ak5aE_Ag',
'46': u'BQADBAADgwADA6ZnAaDpym5nwE-ZAg',
'47': u'BQADBAADhQADA6ZnAczzQQaLLLeVAg',
'48': u'BQADBAADhwADA6ZnASegVAKAdmKQAg',
'49': u'BQADBAADiQADA6ZnASx88dHmKk9MAg',
'50': u'BQADBAADiwADA6ZnAdYkifTqrBHhAg',
'51': u'BQADBAADjQADA6ZnAb5iLRuZ-MT7Ag',
'52': u'BQADBAADjwADA6ZnAUXZy7haaa38Ag',
'53': u'BQADBAADkQADA6ZnAfYI5xOxllwNAg',
'54': u'BQADBAADkwADA6ZnAR4w9K2ezyuhAg',
'55': u'BQADBAADlQADA6ZnAWIuwUz5NlbVAg',
'56': u'BQADBAADlwADA6ZnAQ6i5D8LOvWuAg',
'57': u'BQADBAADmQADA6ZnAR_MKmfUQLE2Ag',
'58': u'BQADBAADmwADA6ZnAfLEuQot6KjjAg',
'59': u'BQADBAADnQADA6ZnAUDhsV5f7YZGAg',
'60': u'BQADBAADnwADA6ZnARTW2z_2HCD1Ag',
'61': u'BQADBAADoQADA6ZnAVARDDhpUG3yAg',
'62': u'BQADBAADowADA6ZnARH-XrV88RQ8Ag',
'63': u'BQADBAADpQADA6ZnAZCJIefeAAEmfgI',
'64': u'BQADBAADpwADA6ZnAZ0Uk9tIQ6NiAg',
'65': u'BQADBAADqQADA6ZnAbfKD0i88yCRAg',
'66': u'BQADBAADqwADA6ZnAbQ1d0JRxcUNAg',
'67': u'BQADBAADrQADA6ZnAR5fcBQMcOxzAg',
'68': u'BQADBAADrwADA6ZnATmnBCXu9W1iAg',
'69': u'BQADBAADsQADA6ZnAZVmTiH9CziuAg',
'70': u'BQADBAADswADA6ZnAeLr9NrxPNyeAg',
'71': u'BQADBAADtQADA6ZnAS3peyUC2wGZAg',
'72': u'BQADBAADtwADA6ZnAV1BjJAGRZVyAg',
'73': u'BQADBAADuQADA6ZnAVd-oIO7g8KtAg',
'74': u'BQADBAADuwADA6ZnAXgaugJm0yQlAg',
'75': u'BQADBAADvQADA6ZnAQiU9hGdGHuZAg',
'76': u'BQADBAADvwADA6ZnAdfGBGghX3STAg',
'77': u'BQADBAADwQADA6ZnAQNiLcVCAZL4Ag',
'78': u'BQADBAADwwADA6ZnAR3Rw1pdnw1xAg',
'79': u'BQADBAADxQADA6ZnAYpnLOW8sg5IAg',
'80': u'BQADBAADxwADA6ZnAdglk9tiIFj5Ag',
'81': u'BQADBAADyQADA6ZnAc6qChQtQtWkAg',
'82': u'BQADBAADywADA6ZnAcT80CaV62YoAg',
'83': u'BQADBAADzQADA6ZnAZDUR-BgCwEvAg',
'84': u'BQADBAADzwADA6ZnATD0sBYypMYJAg',
'85': u'BQADBAAD0QADA6ZnAY3-Cyah9zVlAg',
'86': u'BQADBAAD0wADA6ZnAYbqNqa5KKO7Ag',
'87': u'BQADBAAD1QADA6ZnAbZfqdPcBtliAg',
'88': u'BQADBAAD1wADA6ZnAfC9p8aaxoIDAg',
'89': u'BQADBAAD2QADA6ZnAeTuuVW1rAPoAg',
'90': u'BQADBAAD2wADA6ZnAWik5KbprDonAg',
'91': u'BQADBAAD3QADA6ZnASfqqi93beFFAg',
'92': u'BQADBAAD3wADA6ZnAW5e6QABKyvL2gI',
'93': u'BQADBAAD4QADA6ZnAQO3sab51TBQAg',
'94': u'BQADBAAD4wADA6ZnAcbzlaR74bh1Ag',
'95': u'BQADBAAD5QADA6ZnAeaHKtrdS1ieAg',
'96': u'BQADBAAD5wADA6ZnAZOxZx2CQbenAg',
'97': u'BQADBAAD6QADA6ZnAcPZQIXfUCofAg',
'98': u'BQADBAAD6wADA6ZnAW7_9VABjZC7Ag',
'99': u'BQADBAAD7QADA6ZnARahOsYP9y_iAg',
'100': u'BQADBAAD7wADA6ZnAf5IfBcs43EnAg',
'101': u'BQADBAAD8QADA6ZnAYJzl8obPhdiAg',
'102': u'BQADBAAD8wADA6ZnAW4EaA1_40PUAg',
'103': u'BQADBAAD9QADA6ZnASfbL8QOivbLAg',
'104': u'BQADBAAD9wADA6ZnAVFRHQIyyyCTAg',
'105': u'BQADBAAD-QADA6ZnAe18tSbN3mg9Ag',
'106': u'BQADBAAD-wADA6ZnAYprm88bDRBPAg',
'107': u'BQADBAAD_QADA6ZnAVhB3fE3vG3pAg',
'108': u'BQADBAAD_wADA6ZnAVoEJINLedE1Ag',
'109': u'BQADBAADAQEAAgOmZwE-d2b4Nxg6LwI',
'110': u'BQADBAADAwEAAgOmZwGH2IdZFV8JXQI',
'111': u'BQADBAADBQEAAgOmZwH1F1U5livuSQI',
'112': u'BQADBAADBwEAAgOmZwEu1poyBbPzXAI',
'113': u'BQADBAADCQEAAgOmZwHRVMUpYv-MnQI',
'114': u'BQADBAADCwEAAgOmZwFZ6eS2xms8GgI',
'115': u'BQADBAADDQEAAgOmZwFbl03M95cnBQI',
'116': u'BQADBAADDwEAAgOmZwH5q7bPIgfzIwI',
'117': u'BQADBAADEQEAAgOmZwF9_sqDyPjSFAI',
'118': u'BQADBAADEwEAAgOmZwHguDaRDVS_GwI',
'119': u'BQADBAADFQEAAgOmZwFMvdJ3CJDpBgI',
'120': u'BQADBAADFwEAAgOmZwESwagGI_1AdAI',
'121': u'BQADBAADGQEAAgOmZwHNZ6AtDtCUBQI',
'122': u'BQADBAADGwEAAgOmZwHLLG4dJeRTngI',
'123': u'BQADBAADHQEAAgOmZwGxIrhZrs-EGwI',
'124': u'BQADBAADHwEAAgOmZwFwt6UBRQ7ZVwI',
'125': u'BQADBAADIQEAAgOmZwHgcxbQ2tXDjQI',
'126': u'BQADBAADIwEAAgOmZwEmpL-0DK3MTwI',
'127': u'BQADBAADJQEAAgOmZwHo2KUZO3dSfQI',
'128': u'BQADBAADJwEAAgOmZwGRDu7XxZiP9AI',
'129': u'BQADBAADKQEAAgOmZwE0WAxT_sGt9wI',
'130': u'BQADBAADKwEAAgOmZwFf0MZx4hcpFQI',
'131': u'BQADBAADLQEAAgOmZwGCkMb-tGKOygI',
'132': u'BQADBAADLwEAAgOmZwH5uFg9NRih5AI',
'133': u'BQADBAADMQEAAgOmZwHiKjbIOGRl8wI',
'134': u'BQADBAADMwEAAgOmZwFY73Kan3u1UwI',
'135': u'BQADBAADNQEAAgOmZwFEAAHEr16bCfcC',
'136': u'BQADBAADNwEAAgOmZwGU8_r_UJoJegI',
'137': u'BQADBAADOQEAAgOmZwGzsiSgsj48PwI',
'138': u'BQADBAADOwEAAgOmZwEI4s3KAAGLuyMC',
'139': u'BQADBAADPQEAAgOmZwHI0KoayDq32QI',
'140': u'BQADBAADPwEAAgOmZwGXHmZFN05qzgI',
'141': u'BQADBAADQQEAAgOmZwHNL5S5J1Z51wI',
'142': u'BQADBAADQwEAAgOmZwEBK637B36WMwI',
'143': u'BQADBAADRQEAAgOmZwEEErgqOkQetQI',
'144': u'BQADBAADRwEAAgOmZwEbDH46CQYmXgI',
'145': u'BQADBAADSQEAAgOmZwFO7ib_KRXZ5QI',
'146': u'BQADBAADSwEAAgOmZwFMJwbowpBd9wI',
'147': u'BQADBAADTQEAAgOmZwEajM0L9YOSUwI',
'148': u'BQADBAADTwEAAgOmZwF3J738dLZ8iQI',
'149': u'BQADBAADUQEAAgOmZwEUPARb8W-uvAI',
'150': u'BQADBAADUwEAAgOmZwF8IVKsxLqftgI',
'151': u'BQADBAADVQEAAgOmZwH-tIRiBimCmwI',
'152': u'CAADBAADWgIAAhbdEgMf593pc4UmBwI',
'153': u'CAADBAADXAIAAhbdEgP4i4HwVUv31QI',
'154': u'CAADBAADXgIAAhbdEgMLmMzxK9_5PwI',
'155': u'CAADBAADYAIAAhbdEgPtirXKvfPtUwI',
'156': u'CAADBAADYgIAAhbdEgMVkPOcrqtkyAI',
'157': u'CAADBAADZAIAAhbdEgNyakbWKdiFRwI',
'158': u'CAADBAADZgIAAhbdEgNOiNqjw1aX4QI',
'159': u'CAADBAADaAIAAhbdEgOS_FsmF-cO5AI',
'160': u'CAADBAADagIAAhbdEgN2pgbUc0FfqAI',
'161': u'CAADBAADbAIAAhbdEgMslcsxBE33igI',
'162': u'CAADBAADbgIAAhbdEgOA15GCWGdKyQI',
'163': u'CAADBAADcAIAAhbdEgO4gqkOc_JkeQI',
'164': u'CAADBAADcgIAAhbdEgPThvF5dte_CwI',
'165': u'CAADBAADdAIAAhbdEgMww4lRxqx9twI',
'166': u'CAADBAADdgIAAhbdEgM_NFeWSM51IwI',
'167': u'CAADBAADeAIAAhbdEgNj4yEkiOncTwI',
'168': u'CAADBAADegIAAhbdEgNy4l8sv-gdZgI',
'169': u'CAADBAADfAIAAhbdEgMD1IGfuKU8cwI',
'170': u'CAADBAADfgIAAhbdEgMgx08g-F9RaAI',
'171': u'CAADBAADgAIAAhbdEgNtCDc2c29-EgI',
'172': u'CAADBAADggIAAhbdEgM46jcuUeAPnwI',
'173': u'CAADBAADhAIAAhbdEgNacuxKYf10WAI',
'174': u'CAADBAADhgIAAhbdEgPncTssuLm98gI',
'175': u'CAADBAADiAIAAhbdEgOO288-HHFmGgI',
'176': u'CAADBAADigIAAhbdEgO0ITM5Xrba-QI',
'177': u'CAADBAADjAIAAhbdEgNNKiQzgVL5kAI',
'178': u'CAADBAADjgIAAhbdEgN9bKWuPat0nQI',
'179': u'CAADBAADkAIAAhbdEgNU9jvN8-hwcgI',
'180': u'CAADBAADkgIAAhbdEgO-HM_qoDgCkQI',
'181': u'CAADBAADlAIAAhbdEgPEjyw0hgjz-QI',
'182': u'CAADBAADmAIAAhbdEgOvzgr5IKa7MAI',
'183': u'CAADBAADmgIAAhbdEgPgcoaJOSWvPQI',
'184': u'CAADBAADnAIAAhbdEgMu7J_0p6bUBwI',
'185': u'CAADBAADngIAAhbdEgOb2tWqlQO3IgI',
'186': u'CAADBAADoAIAAhbdEgMtg1U3WLck6QI',
'187': u'CAADBAADogIAAhbdEgPPgGWn0wFubgI',
'188': u'CAADBAADpAIAAhbdEgORfyN_Q4GrZgI',
'189': u'CAADBAADpgIAAhbdEgPH_4i_2d5nqAI',
'190': u'CAADBAADqAIAAhbdEgMpaK20pnFPfQI',
'191': u'CAADBAADqgIAAhbdEgND3bnO3V5SygI',
'192': u'CAADBAADrAIAAhbdEgNQ9aYkBsxEugI',
'193': u'CAADBAADrgIAAhbdEgPC9ShjtQnr6QI',
'194': u'CAADBAADsAIAAhbdEgNmMLLRfv8wUwI',
'195': u'CAADBAADsgIAAhbdEgOcYW0MXmuDxQI',
'196': u'CAADBAADtAIAAhbdEgOr3eX3q0zizwI',
'197': u'CAADBAADtgIAAhbdEgO6u-NPel8SeQI',
'198': u'CAADBAADuAIAAhbdEgMj2oRbvu_N9gI',
'199': u'CAADBAADugIAAhbdEgM4lBs5yXS7RQI',
'200': u'CAADBAADvAIAAhbdEgMWL8-VJo3vhAI',
'201': u'CAADBAADvgIAAhbdEgNp9iz1a2Yx5QI',
'202': u'CAADBAADwAIAAhbdEgNOQyXZ8rCbdgI',
'203': u'CAADBAADwgIAAhbdEgNuLG8U4juQ0QI',
'204': u'CAADBAADxAIAAhbdEgM41b3e-U1enwI',
'205': u'CAADBAADxgIAAhbdEgMyDxZU61FZbwI',
'206': u'CAADBAADyAIAAhbdEgPIsW1lmj0xgwI',
'207': u'CAADBAADygIAAhbdEgP1OGuh9jrXZgI',
'208': u'CAADBAADzAIAAhbdEgOW99g4o0Ln4QI',
'209': u'CAADBAAD0AIAAhbdEgMAAccllaBpoucC',
'210': u'CAADBAAD0gIAAhbdEgMXBuxGie69qQI',
'211': u'CAADBAAD1AIAAhbdEgPRZhqA_MEbBQI',
'212': u'CAADBAAD1gIAAhbdEgPnI0Q-ISAC7wI',
'213': u'CAADBAAD2gIAAhbdEgP7ZD5eMcW5VwI',
'214': u'CAADBAAD3AIAAhbdEgMiFSDjlF0crwI',
'215': u'CAADBAAD4AIAAhbdEgN4P-MKvhu4KwI',
'216': u'CAADBAAD4gIAAhbdEgPIcSN5r0X5EwI',
'217': u'CAADBAAD5AIAAhbdEgM9Ij8je6GFBgI',
'218': u'CAADBAAD5gIAAhbdEgP0BydRyeQZVQI',
'219': u'CAADBAAD6AIAAhbdEgNcV7uiU4XWpAI',
'220': u'CAADBAAD6gIAAhbdEgPjZ27Yh8IkCgI',
'221': u'CAADBAAD7AIAAhbdEgMjWL5LbexpYgI',
'222': u'CAADBAAD7gIAAhbdEgNVsGHcM8KHRQI',
'223': u'CAADBAAD8AIAAhbdEgNIj9qwSYaKSgI',
'224': u'CAADBAAD8gIAAhbdEgP3g7AevgEX4QI',
'225': u'CAADBAAD9AIAAhbdEgMInnZKBz0VyAI',
'226': u'CAADBAAD9gIAAhbdEgPwyu5KBOMCHgI',
'227': u'CAADBAAD-AIAAhbdEgPhWWMOKqfc5QI',
'228': u'CAADBAAD-gIAAhbdEgNTsmpym2OqGQI',
'229': u'CAADBAAD_AIAAhbdEgMD4ms1g84dmAI',
'230': u'CAADBAAEAwACFt0SA-Dw8Y1izCeWAg',
'231': u'CAADBAADAgMAAhbdEgMte28mw8voKAI',
'232': u'CAADBAADBAMAAhbdEgMc_kBcVUUB1AI',
'233': u'CAADBAADBgMAAhbdEgNlVd8DjXG6WgI',
'234': u'CAADBAADCAMAAhbdEgNXRVkaxVrHqgI',
'235': u'CAADBAADCgMAAhbdEgNprRg5xauRPgI',
'236': u'CAADBAADDAMAAhbdEgO8xXd4TvV7YgI',
'237': u'CAADBAADDgMAAhbdEgMrVgvNPt-bIAI',
'238': u'CAADBAADEAMAAhbdEgM5FSrqsTJbRwI',
'239': u'CAADBAADEgMAAhbdEgNNoAwrWTOL-wI',
'240': u'CAADBAADFAMAAhbdEgORT_IVXrqKPwI',
'241': u'CAADBAADFgMAAhbdEgMccfarSdKdwgI',
'242': u'CAADBAADGAMAAhbdEgMoehnIZxxZmQI',
'243': u'CAADBAADGgMAAhbdEgNDW4kkZDk7hwI',
'244': u'CAADBAADHAMAAhbdEgOq_pMtZS0nvAI',
'245': u'CAADBAADHgMAAhbdEgPoNhBxJ8NCSQI',
'246': u'CAADBAADIAMAAhbdEgMSZfgoLPtlWQI',
'247': u'CAADBAADIgMAAhbdEgMqa2EpPhN5pwI',
'248': u'CAADBAADJAMAAhbdEgNkb6U3nM_UaQI',
'249': u'CAADBAADKAMAAhbdEgMyLM_v2mhSggI',
'250': u'CAADBAADKgMAAhbdEgMQfs7GjVk5pwI',
'251': u'CAADBAADLAMAAhbdEgMcJTlcK_QQOgI',
}
|
xc0ut/PokeAlarm
|
PokeAlarm/Telegram/Stickers.py
|
Python
|
agpl-3.0
| 11,849
|
# This script is used to import boundary polygons and other information
# from the ONS's CD-ROM of Super Output Areas for England and Wales.
# Information about the CD-ROM here: http://bit.ly/63bX97
# Run as: ./manage.py mapit_UK_import_soa shapefile.shp
from django.core.management.base import LabelCommand
from django.contrib.gis.gdal import DataSource
from mapit.models import Area, Generation, Country, Type, NameType, CodeType
class Command(LabelCommand):
help = 'Creates Super Output Area boundaries from ONS shapefiles'
args = '<ONS SOA shapefile>'
def handle_label(self, filename, **options):
print(filename)
generation = Generation.objects.current()
short_filename = filename.split("/")[-1]
filename_prefix = short_filename[:4]
filename_suffix = short_filename.split(".")[0][-3:]
# check shapefile type - we handle both LSOA and MSOA
if filename_prefix == "LSOA":
feat_name = 'LSOA04NM'
feat_code = 'LSOA04CD'
if filename_suffix == 'BGC':
area_type = 'OLG'
else:
area_type = 'OLF'
elif filename_prefix == "MSOA":
feat_name = 'MSOA04NM'
feat_code = 'MSOA04CD'
if filename_suffix == 'BGC':
area_type = 'OMG'
else:
area_type = 'OMF'
else:
raise Exception("Sorry, this script only handles LSOA/MSOA shapefiles!")
ds = DataSource(filename)
layer = ds[0]
for feat in layer:
# retrieve name and code, and set country
name = feat[feat_name].value
lsoa_code = feat[feat_code].value
country = lsoa_code[0]
# skip if the SOA already exists in db (SOAs don't change)
if Area.objects.filter(type__code=area_type, codes__code=lsoa_code).count():
continue
print("Adding %s (%s) %s" % (name, lsoa_code, feat.geom.geom_name))
m = Area(
type=Type.objects.get(code=area_type),
country=Country.objects.get(code=country),
generation_low=generation,
generation_high=generation,
)
m.save()
m.names.update_or_create(type=NameType.objects.get(code='S'), defaults={'name': name})
m.codes.update_or_create(type=CodeType.objects.get(code='ons'), defaults={'code': lsoa_code})
p = feat.geom
if p.geom_name == 'POLYGON':
shapes = [p]
else:
shapes = p
for g in shapes:
m.polygons.create(polygon=g.wkt)
|
chris48s/mapit
|
mapit_gb/management/commands/mapit_UK_import_soa.py
|
Python
|
agpl-3.0
| 2,689
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Caffe(CMakePackage):
"""Caffe is a deep learning framework made with expression, speed, and
modularity in mind. It is developed by the Berkeley Vision and Learning
Center (BVLC) and by community contributors."""
homepage = "http://caffe.berkeleyvision.org"
url = "https://github.com/BVLC/caffe/archive/1.0.tar.gz"
version('1.0', sha256='71d3c9eb8a183150f965a465824d01fe82826c22505f7aa314f700ace03fa77f')
version('rc5', sha256='06592aa8f5254335df3e244dafacc15765e2c60479b4bf2e7c887e8e023802fb')
version('rc4', sha256='018792411d75ee34b6107216550cca2a1d668d45cb366033ba3c647e6a3018df')
version('rc3', sha256='0884207bfba0fbc8b263b87d30f9304f7094eec3a48f975177d142f8c72b6e3b')
version('rc2', sha256='55c9c20870b30ce398e19e4f1a62ade1eff08fce51e28fa5604035b711978eec')
variant('cuda', default=False,
description='Builds with support for GPUs via CUDA and cuDNN')
variant('opencv', default=True,
description='Build with OpenCV support')
variant('leveldb', default=True,
description="Build with levelDB")
variant('lmdb', default=True,
description="Build with lmdb")
variant('python', default=False,
description='Build python wrapper and caffe python layer')
variant('matlab', default=False,
description='Build Matlab wrapper')
depends_on('boost')
depends_on('boost +python', when='+python')
depends_on('cuda', when='+cuda')
depends_on('blas')
depends_on('protobuf')
depends_on('glog')
depends_on('gflags')
depends_on('hdf5 +hl +cxx')
# Optional dependencies
depends_on('opencv@3.2.0+core+highgui+imgproc', when='+opencv')
depends_on('leveldb', when='+leveldb')
depends_on('lmdb', when='+lmdb')
depends_on('python@2.7:', when='+python')
depends_on('py-numpy@1.7:', when='+python', type=('build', 'run'))
depends_on('matlab', when='+matlab')
extends('python', when='+python')
def cmake_args(self):
spec = self.spec
args = ['-DBLAS={0}'.format('open' if spec['blas'].name == 'openblas'
else spec['blas'].name),
'-DCPU_ONLY=%s' % ('~cuda' in spec),
'-DUSE_CUDNN=%s' % ('+cuda' in spec),
'-DBUILD_python=%s' % ('+python' in spec),
'-DBUILD_python_layer=%s' % ('+python' in spec),
'-DBUILD_matlab=%s' % ('+matlab' in spec),
'-DUSE_OPENCV=%s' % ('+opencv' in spec),
'-DUSE_LEVELDB=%s' % ('+leveldb' in spec),
'-DUSE_LMDB=%s' % ('+lmdb' in spec),
'-DGFLAGS_ROOT_DIR=%s' % spec['gflags'].prefix,
'-DGLOG_ROOT_DIR=%s' % spec['glog'].prefix,
]
if spec.satisfies('^openblas'):
env['OpenBLAS_HOME'] = spec['openblas'].prefix
if spec.satisfies('+lmdb'):
env['LMDB_DIR'] = spec['lmdb'].prefix
if spec.satisfies('+leveldb'):
env['LEVELDB_ROOT'] = spec['leveldb'].prefix
if spec.satisfies('+python'):
version = spec['python'].version.up_to(1)
args.append('-Dpython_version=%s' % version)
if spec['hdf5'].satisfies('+mpi'):
args.extend([
'-DCMAKE_C_COMPILER={0}'.format(self.spec['mpi'].mpicc),
'-DCMAKE_CXX_COMPILER={0}'.format(self.spec['mpi'].mpicxx)
])
return args
|
iulian787/spack
|
var/spack/repos/builtin/packages/caffe/package.py
|
Python
|
lgpl-2.1
| 3,670
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyAstunparse(PythonPackage):
"""An AST unparser for Python.
This is a factored out version of unparse found in the Python source
distribution; under Demo/parser in Python 2 and under Tools/parser in
Python 3."""
homepage = "https://pypi.org/project/astunparse/"
url = "https://pypi.io/packages/source/a/astunparse/astunparse-1.6.2.tar.gz"
version('1.6.3', sha256='5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872')
version('1.6.2', sha256='dab3e426715373fd76cd08bb1abe64b550f5aa494cf1e32384f26fd60961eb67')
depends_on('py-setuptools', type='build')
depends_on('py-wheel@0.23.0:0.99.99', type=('build', 'run'))
depends_on('py-six@1.6.1:1.99.99', type=('build', 'run'))
|
iulian787/spack
|
var/spack/repos/builtin/packages/py-astunparse/package.py
|
Python
|
lgpl-2.1
| 976
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Revision.author'
db.alter_column(u'codespeed_revision', 'author', self.gf('django.db.models.fields.CharField')(max_length=100))
# Changing field 'Environment.kernel'
db.alter_column(u'codespeed_environment', 'kernel', self.gf('django.db.models.fields.CharField')(max_length=100))
# Changing field 'Environment.name'
db.alter_column(u'codespeed_environment', 'name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100))
# Changing field 'Environment.memory'
db.alter_column(u'codespeed_environment', 'memory', self.gf('django.db.models.fields.CharField')(max_length=100))
# Changing field 'Environment.os'
db.alter_column(u'codespeed_environment', 'os', self.gf('django.db.models.fields.CharField')(max_length=100))
# Changing field 'Environment.cpu'
db.alter_column(u'codespeed_environment', 'cpu', self.gf('django.db.models.fields.CharField')(max_length=100))
# Changing field 'Benchmark.name'
db.alter_column(u'codespeed_benchmark', 'name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100))
def backwards(self, orm):
# Changing field 'Revision.author'
db.alter_column(u'codespeed_revision', 'author', self.gf('django.db.models.fields.CharField')(max_length=30))
# Changing field 'Environment.kernel'
db.alter_column(u'codespeed_environment', 'kernel', self.gf('django.db.models.fields.CharField')(max_length=30))
# Changing field 'Environment.name'
db.alter_column(u'codespeed_environment', 'name', self.gf('django.db.models.fields.CharField')(max_length=30, unique=True))
# Changing field 'Environment.memory'
db.alter_column(u'codespeed_environment', 'memory', self.gf('django.db.models.fields.CharField')(max_length=30))
# Changing field 'Environment.os'
db.alter_column(u'codespeed_environment', 'os', self.gf('django.db.models.fields.CharField')(max_length=30))
# Changing field 'Environment.cpu'
db.alter_column(u'codespeed_environment', 'cpu', self.gf('django.db.models.fields.CharField')(max_length=30))
# Changing field 'Benchmark.name'
db.alter_column(u'codespeed_benchmark', 'name', self.gf('django.db.models.fields.CharField')(max_length=30, unique=True))
models = {
u'codespeed.benchmark': {
'Meta': {'object_name': 'Benchmark'},
'benchmark_type': ('django.db.models.fields.CharField', [], {'default': "'C'", 'max_length': '1'}),
'default_on_comparison': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lessisbetter': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['codespeed.Benchmark']", 'null': 'True', 'blank': 'True'}),
'units': ('django.db.models.fields.CharField', [], {'default': "'seconds'", 'max_length': '20'}),
'units_title': ('django.db.models.fields.CharField', [], {'default': "'Time'", 'max_length': '30'})
},
u'codespeed.branch': {
'Meta': {'unique_together': "(('name', 'project'),)", 'object_name': 'Branch'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'branches'", 'to': u"orm['codespeed.Project']"})
},
u'codespeed.environment': {
'Meta': {'object_name': 'Environment'},
'cpu': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kernel': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'memory': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'os': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'codespeed.executable': {
'Meta': {'unique_together': "(('name', 'project'),)", 'object_name': 'Executable'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'executables'", 'to': u"orm['codespeed.Project']"})
},
u'codespeed.project': {
'Meta': {'object_name': 'Project'},
'commit_browsing_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'repo_pass': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'repo_path': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'repo_type': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'repo_user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'track': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'codespeed.report': {
'Meta': {'unique_together': "(('revision', 'executable', 'environment'),)", 'object_name': 'Report'},
'_tablecache': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'colorcode': ('django.db.models.fields.CharField', [], {'default': "'none'", 'max_length': '10'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reports'", 'to': u"orm['codespeed.Environment']"}),
'executable': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reports'", 'to': u"orm['codespeed.Executable']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reports'", 'to': u"orm['codespeed.Revision']"}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'})
},
u'codespeed.result': {
'Meta': {'unique_together': "(('revision', 'executable', 'benchmark', 'environment'),)", 'object_name': 'Result'},
'benchmark': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results'", 'to': u"orm['codespeed.Benchmark']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results'", 'to': u"orm['codespeed.Environment']"}),
'executable': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results'", 'to': u"orm['codespeed.Executable']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results'", 'to': u"orm['codespeed.Revision']"}),
'std_dev': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'val_max': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'val_min': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {})
},
u'codespeed.revision': {
'Meta': {'unique_together': "(('commitid', 'branch'),)", 'object_name': 'Revision'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'branch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': u"orm['codespeed.Branch']"}),
'commitid': ('django.db.models.fields.CharField', [], {'max_length': '42'}),
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': u"orm['codespeed.Project']"}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'})
}
}
complete_apps = ['codespeed']
|
nomeata/codespeed
|
codespeed/migrations/0013_auto__chg_field_revision_author__chg_field_environment_kernel__chg_fie.py
|
Python
|
lgpl-2.1
| 9,704
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.scala_jar_dependency import ScalaJarDependency
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.java.jar.jar_dependency import JarDependency
from pants_test.base_test import BaseTest
class ScalaLibraryTest(BaseTest):
@property
def alias_groups(self):
return BuildFileAliases(
targets={
'scala_library': ScalaLibrary,
'java_library': JavaLibrary,
'jar_library': JarLibrary,
},
objects={
'jar': JarDependency,
'scala_jar': ScalaJarDependency,
}
)
def setUp(self):
super(ScalaLibraryTest, self).setUp()
self.context(options={
'scala-platform': {
'version': '2.11'
}
})
self.add_to_build_file('3rdparty', dedent("""
jar_library(
name='hub-and-spoke',
jars=[
jar('org.jalopy', 'hub-and-spoke', '0.0.1')
]
)
"""))
self.add_to_build_file('scala', dedent("""
scala_library(
name='lib',
sources=[],
java_sources=[
'java:explicit_scala_dep',
'java:no_scala_dep',
]
)
"""))
self.add_to_build_file('java', dedent("""
java_library(
name='explicit_scala_dep',
sources=[],
dependencies=[
'scala:lib',
'3rdparty:hub-and-spoke',
]
)
java_library(
name='no_scala_dep',
sources=[],
dependencies=[]
)
"""))
self.lib_hub_and_spoke = self.target('3rdparty:hub-and-spoke')
self.scala_library = self.target('scala:lib')
self.java_library_explicit_dep = self.target('java:explicit_scala_dep')
self.java_library_no_dep = self.target('java:no_scala_dep')
def test_mixed_linkage(self):
# Expect the scala_lib to include the jar_library lib_hub_and_spoke's jar as
# well as one other jar.
self.assertEqual(len(self.lib_hub_and_spoke.jar_dependencies), 1)
self.assertEqual(len(self.scala_library.jar_dependencies), 2)
self.assertTrue(
self.lib_hub_and_spoke.jar_dependencies[0] in self.scala_library.jar_dependencies,
'Expect the scala_lib to include the jar_library lib_hub_and_spoke\'s jar as well '
'as one other jar.')
self.assertEqual(set(self.scala_library.jar_dependencies),
set(self.java_library_explicit_dep.jar_dependencies),
'The java end of a mixed language logical lib with an explicit dep should be '
'unaffected by linking.')
|
UnrememberMe/pants
|
tests/python/pants_test/targets/test_scala_library.py
|
Python
|
apache-2.0
| 3,132
|
"""The tests for the filesize sensor."""
import os
from unittest.mock import patch
import pytest
from homeassistant import config as hass_config
from homeassistant.components.filesize import DOMAIN
from homeassistant.components.filesize.sensor import CONF_FILE_PATHS
from homeassistant.const import SERVICE_RELOAD
from homeassistant.setup import async_setup_component
from tests.common import get_fixture_path
TEST_DIR = os.path.join(os.path.dirname(__file__))
TEST_FILE = os.path.join(TEST_DIR, "mock_file_test_filesize.txt")
def create_file(path):
"""Create a test file."""
with open(path, "w") as test_file:
test_file.write("test")
@pytest.fixture(autouse=True)
def remove_file():
"""Remove test file."""
yield
if os.path.isfile(TEST_FILE):
os.remove(TEST_FILE)
async def test_invalid_path(hass):
"""Test that an invalid path is caught."""
config = {"sensor": {"platform": "filesize", CONF_FILE_PATHS: ["invalid_path"]}}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids("sensor")) == 0
async def test_valid_path(hass):
"""Test for a valid path."""
create_file(TEST_FILE)
config = {"sensor": {"platform": "filesize", CONF_FILE_PATHS: [TEST_FILE]}}
hass.config.allowlist_external_dirs = {TEST_DIR}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids("sensor")) == 1
state = hass.states.get("sensor.mock_file_test_filesize_txt")
assert state.state == "0.0"
assert state.attributes.get("bytes") == 4
async def test_reload(hass, tmpdir):
"""Verify we can reload filesize sensors."""
testfile = f"{tmpdir}/file"
await hass.async_add_executor_job(create_file, testfile)
with patch.object(hass.config, "is_allowed_path", return_value=True):
await async_setup_component(
hass,
"sensor",
{
"sensor": {
"platform": "filesize",
"file_paths": [testfile],
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("sensor.file")
yaml_path = get_fixture_path("configuration.yaml", "filesize")
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path), patch.object(
hass.config, "is_allowed_path", return_value=True
):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("sensor.file") is None
|
jawilson/home-assistant
|
tests/components/filesize/test_sensor.py
|
Python
|
apache-2.0
| 2,762
|
# Copyright (c) 2014 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from copy import deepcopy
from itertools import ifilter
from logging import getLogger
from random import shuffle, choice, randint, randrange
from tests.comparison.common import TableExprList, ValExpr, ValExprList
from tests.comparison.funcs import (
AGG_FUNCS,
AggFunc,
ANALYTIC_FUNCS,
AnalyticFunc,
And,
Coalesce,
Equals,
Func,
FUNCS,
PartitionByClause,
Trim,
WindowBoundary,
WindowClause)
from tests.comparison.types import (
Char,
Boolean,
Int,
JOINABLE_TYPES,
String,
TYPES,
VarChar)
from tests.comparison.query import (
FromClause,
GroupByClause,
HavingClause,
InlineView,
JoinClause,
LimitClause,
OrderByClause,
Query,
SelectClause,
SelectItem,
Subquery,
UnionClause,
WhereClause,
WithClause,
WithClauseInlineView)
UNBOUNDED_PRECEDING = WindowBoundary.UNBOUNDED_PRECEDING
PRECEDING = WindowBoundary.PRECEDING
CURRENT_ROW = WindowBoundary.CURRENT_ROW
FOLLOWING = WindowBoundary.FOLLOWING
UNBOUNDED_FOLLOWING = WindowBoundary.UNBOUNDED_FOLLOWING
LOG = getLogger(__name__)
class QueryGenerator(object):
def __init__(self, query_profile):
self.profile = query_profile
self.queries_under_construction = list()
self.max_nested_query_count = None
@property
def current_query(self):
if self.queries_under_construction:
return self.queries_under_construction[-1]
@property
def root_query(self):
if self.queries_under_construction:
return self.queries_under_construction[0]
def create_query(self,
table_exprs,
allow_with_clause=True,
allow_union_clause=True,
select_item_data_types=None,
required_select_item_type=None,
required_table_expr_col_type=None,
create_aggregate=None,
table_alias_prefix='t'):
'''Create a random query using various language features.
The initial call to this method should only use tables in the table_exprs
parameter, and not inline views or WITH definitions. The other types of
table exprs may be added as part of the query generation.
Due to implementation limitations, nested queries are not distributed evenly by
default even if the query profile assigns an equal likelihood of use to each
possible nested query decision. This is because the implementation chooses nested
queries in a fixed order (WITH -> FROM -> WHERE). For example if only one nested
query were allowed and each clause had a 50% chance of using a subquery, the
resulting set of generated queries would contain a subquery in the WITH clause
50% of the time, in the FROM 25% of the time, and WHERE 12.5% of the time. If the
max nested queries were two, it's most likely that the generated query would
contain a WITH clause which has the second nested query within it....
If select_item_data_types is specified it must be a sequence or iterable of
DataType. The generated query.select_clause.select will have data types suitable
for use in a UNION.
required_select_item_type may be set to a DataType to force at least one of the
SELECT items to be of the given type. This can be used to ensure that inline
views will have at least one joinable column.
required_table_expr_col_type may be set to ensure that at least one of the
TableExprs used in the FROM clause will have a column of the given type. This
can be used to unsure that a correlation condition can be made for a Subquery.
create_aggregate can be set to True or False to force or disable the creation
of an aggregate query. This is used during Subquery creation where the context
may require an aggregate or non-aggregate.
'''
query = Query()
query.parent = self.current_query
self.queries_under_construction.append(query)
self.profile.query = query
# Make a copy so tables can be added if a WITH clause is used
table_exprs = TableExprList(table_exprs)
if self.max_nested_query_count is None:
self.max_nested_query_count = self.profile.get_max_nested_query_count()
elif self.max_nested_query_count == 0:
raise Exception('Maximum nested query count exceeded')
else:
self.max_nested_query_count -= 1
with_clause = None
if allow_with_clause \
and self.allow_more_nested_queries \
and self.profile.use_with_clause():
with_clause = self._create_with_clause(table_exprs)
table_exprs.extend(with_clause.table_exprs)
query.with_clause = with_clause
from_clause = self._create_from_clause(
table_exprs, table_alias_prefix, required_table_expr_col_type)
query.from_clause = from_clause
select_clause = self._create_select_clause(
from_clause.visible_table_exprs,
select_item_data_types,
required_select_item_type,
create_aggregate)
query.select_clause = select_clause
if self.profile.use_where_clause():
query.where_clause = self._create_where_clause(
from_clause.visible_table_exprs, table_exprs, table_alias_prefix)
# If agg and non-agg SELECT items are present then a GROUP BY is required otherwise
# it's optional and is effectively a "SELECT DISTINCT".
if (select_clause.agg_items and select_clause.basic_items) \
or (create_aggregate is None \
and select_clause.basic_items \
and self.profile.use_group_by_clause()):
group_by_items = [item for item in select_clause.basic_items
if not item.val_expr.is_constant]
if group_by_items:
query.group_by_clause = GroupByClause(group_by_items)
# Impala doesn't support DISTINCT with analytics or "SELECT DISTINCT" when
# GROUP BY is used.
if not select_clause.analytic_items \
and not (query.group_by_clause and not select_clause.agg_items) \
and self.profile.use_distinct():
if select_clause.agg_items:
self._enable_distinct_on_random_agg_items(select_clause.agg_items)
else:
select_clause.distinct = True
if self.profile.use_having_clause() \
and (query.group_by_clause or select_clause.agg_items):
basic_select_item_exprs = \
ValExprList(item.val_expr for item in select_clause.basic_items)
query.having_clause = self._create_having_clause(
from_clause.visible_table_exprs, basic_select_item_exprs)
if allow_union_clause \
and self.allow_more_nested_queries \
and self.profile.use_union_clause():
data_type_candidates_by_base_type = defaultdict(list)
for data_type in TYPES:
data_type_candidates_by_base_type[data_type.get_base_type()].append(data_type)
select_item_data_types = list()
for select_item in select_clause.items:
select_item_data_types.append(
choice(data_type_candidates_by_base_type[select_item.val_expr.base_type]))
query.union_clause = UnionClause(self.create_query(
table_exprs,
allow_with_clause=False,
select_item_data_types=select_item_data_types))
query.union_clause.all = self.profile.use_union_all()
self.queries_under_construction.pop()
if self.queries_under_construction:
self.profile.query = self.queries_under_construction[-1]
else:
self.profile.query = None
self.max_nested_query_count = None
return query
@property
def allow_more_nested_queries(self):
return self.max_nested_query_count > 0
def _create_with_clause(self, table_exprs):
# Make a copy so newly created tables can be added and made available for use in
# future table definitions.
table_exprs = TableExprList(table_exprs)
with_clause_inline_views = TableExprList()
for with_clause_inline_view_idx \
in xrange(self.profile.get_with_clause_table_ref_count()):
query = self.create_query(table_exprs)
with_clause_alias_count = getattr(self.root_query, 'with_clause_alias_count', 0) + 1
self.root_query.with_clause_alias_count = with_clause_alias_count
with_clause_inline_view = \
WithClauseInlineView(query, 'with_%s' % with_clause_alias_count)
table_exprs.append(with_clause_inline_view)
with_clause_inline_views.append(with_clause_inline_view)
if not self.allow_more_nested_queries:
break
return WithClause(with_clause_inline_views)
def _create_select_clause(self,
table_exprs,
select_item_data_types,
required_select_item_type,
create_aggregate):
# XXX: Prevent GROUP BY float_col, the groupings will differ across databases.
if select_item_data_types:
select_item_data_types = tuple(select_item_data_types)
if select_item_data_types \
and required_select_item_type \
and not issubclass(required_select_item_type, select_item_data_types):
raise Exception('Required select item type is not in allowed types')
if select_item_data_types:
desired_item_count = len(select_item_data_types)
else:
desired_item_count = self.profile.get_select_item_count()
basic_count, agg_count, analytic_count = \
self.profile.split_select_item_count(desired_item_count, create_aggregate)
select_items = list()
for item_idx in xrange(desired_item_count):
if select_item_data_types:
col_type = select_item_data_types[item_idx]
else:
if desired_item_count - 1 == item_idx and required_select_item_type:
col_types = [required_select_item_type]
else:
col_types = table_exprs.col_types
col_type = self.profile.choose_type(col_types)
if required_select_item_type and issubclass(col_type, required_select_item_type):
required_select_item_type = None
category_idx = randint(0, basic_count + agg_count + analytic_count - 1)
if category_idx < basic_count:
select_items.append(self._create_basic_select_item(table_exprs, col_type))
basic_count -= 1
elif category_idx < basic_count + agg_count:
select_items.append(('AGG', col_type))
agg_count -= 1
else:
select_items.append(('ANALYTIC', col_type))
analytic_count -= 1
select_item_exprs = \
ValExprList(item.val_expr for item in select_items if type(item) == SelectItem)
for idx, item in enumerate(select_items):
if type(item) == tuple and item[0] == 'AGG':
select_items[idx] = \
self._create_agg_select_item(table_exprs, select_item_exprs, item[1])
for item in select_items:
if type(item) == tuple:
continue
if item.is_agg:
select_item_exprs.append(item.val_expr)
for idx, item in enumerate(select_items):
if type(item) == tuple:
select_items[idx] = self._create_analytic_select_item(
table_exprs,
select_item_exprs,
len(select_item_exprs) == 0 and analytic_count == 1,
item[1])
# So far all the SELECT items are defined and set but none of them have aliases. If
# an item is a simple column reference, then it will only get an alias if there is a
# conflict with another simple column ref. All other item types, such as functions
# or constants, will always have an alias.
item_name_counts = defaultdict(int)
for item in select_items:
if item.alias or not item.val_expr.is_col:
continue
if item.val_expr.name in item_name_counts:
item.alias = '*CONFLICT*'
else:
item_name_counts[item.val_expr.name] += 1
for item in select_items:
if item.alias == '*CONFLICT*' or (not item.val_expr.is_col and not item.alias):
# Use names close to the Impala functional test database so that bugs in
# resolution will be more likely to surface.
alias = '%s_col' % item.type.__name__.lower()
if alias in item_name_counts:
item.alias = alias + '_%s' % (item_name_counts[alias] + 1)
else:
item.alias = alias
item_name_counts[alias] += 1
return SelectClause(select_items)
def _create_basic_select_item(self, table_exprs, return_type):
max_children = self.profile.choose_nested_expr_count()
if max_children:
value = self._create_func_tree(return_type)
value = self._populate_func_with_vals(value, table_exprs)
elif return_type in table_exprs.col_types:
value = self.profile.choose_val_expr(table_exprs.cols_by_type[return_type])
else:
value = self.profile.choose_constant(return_type)
return SelectItem(value)
def _create_func_tree(self, return_type, allow_subquery=False):
'''Returns an instance of a basic function that has all of it's arguments either set
to None or another instance of a function that has it's arguments set likewise. The
caller should replace the None values with column references or constants as
desired. The depth of the tree is determined by the query profile (self.profile).
'''
signatures = self._funcs_to_allowed_signatures(FUNCS)
root_signatures = self._find_matching_signatures(
signatures, return_type=return_type, allow_subquery=allow_subquery)
root_signature = self.profile.choose_func_signature(root_signatures)
func = root_signature.func(root_signature) # An instance of a function
max_children = self.profile.choose_nested_expr_count()
if max_children:
# Impala does not allow functions that contain subqueries to have arguments
# that contain subqueries. Ex: ... WHERE (int_col IN (SELECT 1)) IN (SELECT TRUE)
subquery_allowed_null_args = list()
subquery_not_allowed_null_args = list()
if func.contains_subquery:
null_args = subquery_not_allowed_null_args
else:
null_args = subquery_allowed_null_args
null_args = [(func, idx) for idx, arg in enumerate(func.args)
if type(arg) != list and arg.val is None]
while max_children \
and (subquery_allowed_null_args or subquery_not_allowed_null_args):
idx = randrange(
len(subquery_allowed_null_args) + len(subquery_not_allowed_null_args))
if idx < len(subquery_allowed_null_args):
null_args = subquery_allowed_null_args
else:
null_args = subquery_not_allowed_null_args
shuffle(null_args)
parent_func, parent_arg_idx = null_args.pop()
child_signatures = self._find_matching_signatures(
signatures,
return_type=parent_func.args[parent_arg_idx].type,
allow_subquery=(allow_subquery and null_args == subquery_allowed_null_args))
child_signature = self.profile.choose_func_signature(child_signatures)
child_func = child_signature.func(child_signature)
parent_func.args[parent_arg_idx] = child_func
if child_func.contains_subquery:
null_args = subquery_not_allowed_null_args
else:
null_args = subquery_allowed_null_args
null_args.extend((child_func, idx) for idx, arg in enumerate(child_func.args)
if type(arg) != list and arg.val is None)
max_children -= 1
return func
def _funcs_to_allowed_signatures(self, funcs):
'''Return a list of the signatures contained in "funcs" that are eligible for use
based on the query profile.
'''
return [signature for func in funcs for signature in func.signatures()
if self.profile.allow_func_signature(signature)]
def _find_matching_signatures(self,
signatures,
return_type=None,
accepts=None,
allow_subquery=False):
matching_signatures = list()
for signature in signatures:
if return_type and not issubclass(signature.return_type, return_type):
continue
if accepts and not any(not arg.is_subquery and issubclass(arg.type, accepts)
for arg in signature.args):
continue
if not allow_subquery and any(arg.is_subquery for arg in signature.args):
continue
matching_signatures.append(signature)
return matching_signatures
def _populate_func_with_vals(self,
func,
table_exprs=TableExprList(),
val_exprs=ValExprList(),
table_alias_prefix='',
allow_subquery=False,
_allow_table_exprs=None):
if not _allow_table_exprs and func.is_agg:
_allow_table_exprs = True
elif _allow_table_exprs is None and func.contains_agg:
_allow_table_exprs = False
elif _allow_table_exprs is None:
_allow_table_exprs = True
# If a function's return type depends on some of its args then at least one of those
# args must not be the NULL literal. Example: IF(false, NULL, NULL) is considered
# invalid because the return type cannot be determined.
has_non_null_literal_arg = False
for idx, arg in enumerate(func.args):
signature_arg = func.signature.args[idx]
if signature_arg.is_subquery \
or (allow_subquery \
and self.allow_more_nested_queries \
and self.profile.use_scalar_subquery()):
usage = self.profile.choose_subquery_predicate_category(
func.name(),
self.current_query.from_clause.table_exprs.joinable_cols_by_type)
if usage is not None \
and self.allow_more_nested_queries \
and (usage[1] == 'UNCORRELATED'
or self.current_query.from_clause.table_exprs.joinable_cols_by_type):
use_scalar_subquery = (usage[0] == 'Scalar')
use_agg_subquery = (usage[1] == 'AGG')
use_correlated_subquery = (usage[2] == 'CORRELATED')
if use_correlated_subquery:
join_expr_type = self.profile.choose_type(list(
self.current_query.from_clause.table_exprs.joinable_cols_by_type))
else:
join_expr_type = None
select_item_data_types = \
[signature_arg.type] if use_scalar_subquery else signature_arg.type
query = self.create_query(
table_exprs,
select_item_data_types=select_item_data_types,
required_table_expr_col_type=join_expr_type,
create_aggregate=use_agg_subquery,
# Don't use UNION + LIMIT; https://issues.cloudera.org/browse/IMPALA-1379
allow_union_clause=(not signature_arg.is_subquery),
table_alias_prefix=(table_alias_prefix +
('t' if use_correlated_subquery else '')))
if use_scalar_subquery and not use_agg_subquery:
# Impala will assume the query will return more than one row unless a LIMIT 1
# is added. An ORDER BY will also be added under the assumption that we want
# deterministic results.
query.order_by_clause = OrderByClause([Int(1)])
query.limit_clause = LimitClause(Int(1))
if use_correlated_subquery:
outer_table_expr = choice(
self.current_query.from_clause.table_exprs.by_col_type[join_expr_type])
inner_table_expr = choice(
query.from_clause.table_exprs.by_col_type[join_expr_type])
correlation_condition = self._create_relational_join_condition(
outer_table_expr, inner_table_expr)
if query.where_clause:
query.where_clause.boolean_expr = And.create_from_args(
query.where_clause.boolean_expr, correlation_condition)
else:
query.where_clause = WhereClause(correlation_condition)
func.args[idx] = Subquery(query)
else:
replacement_func = self._create_func_tree(func.type)
return self._populate_func_with_vals(
replacement_func,
table_exprs=table_exprs,
val_exprs=val_exprs,
table_alias_prefix=table_alias_prefix,
allow_subquery=allow_subquery,
_allow_table_exprs=_allow_table_exprs)
else:
if arg.is_constant and arg.val is None:
candidate_val_exprs = ValExprList()
if val_exprs:
candidate_val_exprs.extend(val_exprs.by_type[arg.type])
if _allow_table_exprs:
candidate_val_exprs.extend(table_exprs.cols_by_type[arg.type])
if candidate_val_exprs:
val = self.profile.choose_val_expr(candidate_val_exprs)
else:
val = self.profile.choose_constant(
return_type=arg.type,
allow_null=(signature_arg.can_be_null \
and signature_arg.can_be_null_literal \
and (has_non_null_literal_arg \
or not signature_arg.determines_signature)))
func.args[idx] = val
arg = val
elif arg.is_func:
func.args[idx] = self._populate_func_with_vals(
arg,
table_exprs=table_exprs,
val_exprs=val_exprs,
_allow_table_exprs=_allow_table_exprs)
if not signature_arg.can_be_null and not arg.is_constant:
val = self.profile.choose_constant(return_type=arg.type, allow_null=False)
func.args[idx] = Coalesce.create_from_args(arg, val)
if not arg.is_constant or not arg.val is None:
has_non_null_literal_arg = True
return func
def _create_agg_select_item(self, table_exprs, basic_select_item_exprs, return_type):
value = self._create_agg_func_tree(return_type)
value = self._populate_func_with_vals(value, table_exprs, basic_select_item_exprs)
return SelectItem(value)
def _create_agg_func_tree(self, return_type):
return self._create_agg_or_analytic_tree(return_type, agg_funcs=AGG_FUNCS)
def _create_agg_or_analytic_tree(self, return_type, agg_funcs=[], analytic_funcs=[]):
'''Returns an instance of a function that is guaranteed to either be or contain an
aggregate or analytic function. The arguments of the returned function will either
be None or an instance of a function as in _create_func_tree.
The chosen aggregate or analytic functions will be restricted to the list of
functions in agg_funcs and analytic_funcs.
If analytic_funcs is non-empty the returned function will be guaranteed to
be an analytic or contain at least on analytic function.
return_type must be set and refers to the data type of the function output.
agg_funcs and analytic_funcs should be used to determine the class of the
returned function. The caller is responsible for restricting the return_type
to types that can be generated by permutations of available functions. If the max
nested expr count in the query profile is at least one, then any return_type
should be possible to generate but this is not guaranteed.
'''
# The creation of aggregate and analytic functions is so similar that they are
# combined here. "basic" function creation is much simpler so that is kept separate.
# What's going to happen is there will be essentially two important data structures:
#
# 1) A tree of functions, the root of which will be returned. The leaves of the
# tree are "place holders" which are actually instances of a concrete DataType,
# such as Int, with a value of None. In other words, the arguments to all
# functions are either other functions or SQL NULL.
#
# 2) A mapping to place holders from the type of function that they may be replaced
# with. The actual data structure is
# dict<func class> -> list<tuple<tree node, index of place holder in tree node>>
# where "func class" is one of "AggFunc", "AnalyticFunc" or "Func".
#
# This means once a child function is generated, a spot where it can be placed into
# the tree can easily be identified. Although the work is actually done in reverse
# order, a place holder is chosen, then a replacement is generated.
if not agg_funcs and not analytic_funcs:
raise Exception('At least one analytic or aggregate function is required')
basic_signatures_by_return_type = self._group_signatures_by_return_type(
self._find_matching_signatures(
self._funcs_to_allowed_signatures(FUNCS),
allow_subquery=False))
agg_signatures_by_return_type = self._group_signatures_by_return_type(
self._funcs_to_allowed_signatures(agg_funcs))
analytic_signatures_by_return_type = self._group_signatures_by_return_type(
self._funcs_to_allowed_signatures(analytic_funcs))
if analytic_funcs:
return_class = AnalyticFunc
return_signatures_by_return_type = analytic_signatures_by_return_type
else:
return_class = AggFunc
return_signatures_by_return_type = agg_signatures_by_return_type
min_children, max_children = self.profile.bounds('MAX_NESTED_EXPR_COUNT')
if max_children == 0 and return_type not in return_signatures_by_return_type:
raise Exception(('At least one child expr is required to create a %s expr'
+ ' using an %s function')
% (return_type.__name__,
'aggregate' if return_class == AggFunc else 'analytic'))
if min_children == 0 and return_type not in return_signatures_by_return_type:
min_children = 1
max_children = self.profile._choose_from_bounds(min_children, max_children)
if not max_children:
signature = self.profile.choose_func_signature(
return_signatures_by_return_type[return_type])
return signature.func(signature)
root_func = None
# Every time a function is created its arguments will initially be NULL. Those NULL
# arguments may be replaced by other functions up to "max_children". The types of
# valid child functions depends on the type of parent function.
#
# * Analytics may not contain other analytics
# * Analytics may contain aggregates or basic functions
# * Aggregates may not contain other aggregates or analytics
# * Aggregates may contain basic functions
# * Basic functions may contain any function so long as the above conditions are
# not broken (indirectly).
null_args_by_func_allowed = {AggFunc: list(), AnalyticFunc: list(), Func: list()}
while max_children:
null_arg_pool = None
parent_func, parent_arg_idx = None, None
chosen_signature= None
# Since aggregate (and let's assume analytic functions) return a limited set of
# types, some prep work may be needed if the return type isn't in the set of
# directly producible types. For example if a Boolean is desired and there are
# only two children remaining, the next child must be something that accepts an
# aggregate and returns a Boolean, such as GreaterThan.
if (not root_func and max_children == 1) \
or (root_func and max_children == 2 \
and ((return_class == AggFunc and not root_func.contains_agg)
or (return_class == AnalyticFunc and not root_func.contains_analytic))):
if root_func:
idx = randrange(len(null_args_by_func_allowed[return_class])
+ len(null_args_by_func_allowed[Func]))
use_return_class = idx < len(null_args_by_func_allowed[return_class])
if use_return_class:
null_arg_pool = null_args_by_func_allowed[return_class]
signature_pools_by_return_type = return_signatures_by_return_type
else:
null_arg_pool = null_args_by_func_allowed[Func]
signature_pools_by_return_type = basic_signatures_by_return_type
shuffle(null_arg_pool)
parent_func, parent_arg_idx = null_arg_pool.pop()
parent_arg_type = parent_func.args[parent_arg_idx].type
if use_return_class:
signature_pool = signature_pools_by_return_type[parent_arg_type]
else:
# This is a basic functions so it needs to accept one of the types returned
# by the desired return_class.
signature_pool = self._find_matching_signatures(
signature_pools_by_return_type[parent_arg_type],
accepts=tuple(return_signatures_by_return_type))
else:
signature_pool = list()
if return_type in return_signatures_by_return_type:
signature_pool.extend(return_signatures_by_return_type[return_type])
if return_type in basic_signatures_by_return_type:
signature_pool.extend(self._find_matching_signatures(
basic_signatures_by_return_type[return_type],
accepts=tuple(return_signatures_by_return_type)))
chosen_signature = self.profile.choose_func_signature(signature_pool)
elif max_children == 1 \
and ((return_class == AggFunc and not root_func.contains_agg)
or (return_class == AnalyticFunc and not root_func.contains_analytic)):
# This is the last iteration and the generated function still doesn't contain
# an instance of the desired function class. From the setup above, at least
# one of the available place holder arguments can be replaced by an aggregate or
# analytic.
null_arg_pool = null_args_by_func_allowed[return_class]
if not null_arg_pool:
raise Exception(
'No leaves in the expr tree may be replaced by an %s function'
% ('aggregate' if return_class == AggFunc else 'analytic'))
shuffle(null_arg_pool)
return_types = tuple(return_signatures_by_return_type)
while null_arg_pool:
parent_func, parent_arg_idx = null_arg_pool.pop()
parent_arg_type = parent_func.args[parent_arg_idx].type
if issubclass(parent_arg_type, return_types):
break
if not null_arg_pool:
raise Exception('No functions could accept an %s function'
% ('aggregate' if return_class == AggFunc else 'analytic'))
chosen_signature = self.profile.choose_func_signature(
return_signatures_by_return_type[parent_arg_type])
elif not root_func:
# No restrictions, just choose a root_func that returns the needed type.
signature_pool = list()
if return_type in return_signatures_by_return_type:
signature_pool.extend(return_signatures_by_return_type[return_type])
if return_type in basic_signatures_by_return_type:
signature_pool.extend(basic_signatures_by_return_type[return_type])
chosen_signature= self.profile.choose_func_signature(signature_pool)
else:
# A root_func was chosen and it's children are in one or more of the
# null_args_by_func_allowed pools. A pool will be chosen, then a child function.
null_arg_counts_by_pool = dict((pool_category, len(pool)) for pool_category, pool
in null_args_by_func_allowed.iteritems())
# There is a special case that would lead to a dead end. If there is only one
# distinct place holder across all the pools and an analytic is still needed,
# then that place holder cannot be replaced by an aggregate since aggregates
# are not allowed to contain analytics. Example: an analytic is impossible once
# the tree is FLOOR(AVG(NULL)).
if return_class == AnalyticFunc \
and not root_func.contains_analytic \
and len(null_args_by_func_allowed[AnalyticFunc]) == 1 \
and null_args_by_func_allowed[AnalyticFunc][0] \
in null_args_by_func_allowed[AggFunc]:
null_arg_counts_by_pool[AggFunc] = 0
null_arg_pool_category = \
self.profile._choose_from_weights(null_arg_counts_by_pool)
null_arg_pool = null_args_by_func_allowed[null_arg_pool_category]
shuffle(null_arg_pool)
parent_func, parent_arg_idx = null_arg_pool.pop()
parent_arg_type = parent_func.args[parent_arg_idx].type
if null_arg_pool_category == AggFunc:
signature_pool = agg_signatures_by_return_type[parent_arg_type]
elif null_arg_pool_category == AnalyticFunc:
signature_pool = analytic_signatures_by_return_type[parent_arg_type]
else:
signature_pool = basic_signatures_by_return_type[parent_arg_type]
chosen_signature = self.profile.choose_func_signature(signature_pool)
chosen_func = chosen_signature.func(chosen_signature)
if root_func:
max_children -= 1
else:
root_func = chosen_func
if parent_func:
# Remove the place holder from all of the other pools.
for pool_category, pool in null_args_by_func_allowed.iteritems():
for null_arg_idx, (func, arg_idx) in enumerate(pool):
if func is parent_func and arg_idx == parent_arg_idx:
del pool[null_arg_idx]
# Replace the place holder with the child function
parent_func.args[parent_arg_idx] = chosen_func
parent_func.validate()
chosen_func.parent = parent_func
else:
chosen_func.parent = None
# Place the args of the chosen function into the appropriate pools. Aggregate and
# analytic functions have different rules about which functions they accept as
# arguments. If the chosen function is neither then the tree must be inspected to
# find the first aggregate or analytic ancestor.
child_null_arg_pools = set()
node = chosen_func
while True:
if node.is_analytic:
child_null_arg_pools.add(AggFunc)
child_null_arg_pools.add(Func)
break
elif node.is_agg:
child_null_arg_pools.add(Func)
break
node = node.parent
if not node:
# This means the root_func is a non-aggregate and non-analytic, it can accept
# anything.
child_null_arg_pools.add(AggFunc)
child_null_arg_pools.add(Func)
if return_class == AnalyticFunc:
child_null_arg_pools.add(AnalyticFunc)
break
for func_allowed in child_null_arg_pools:
null_args = null_args_by_func_allowed[func_allowed]
for idx, arg in enumerate(chosen_func.args):
if arg.val is not None:
# Some functions come with unusual arguments pre-populated.
# Ex: The analytic function LEAD(foo, constant) will have "constant" non-NULL.
continue
if func_allowed == AggFunc:
returnable_types = agg_signatures_by_return_type
elif func_allowed == AnalyticFunc:
returnable_types = analytic_signatures_by_return_type
else:
returnable_types = basic_signatures_by_return_type
# Skip place holders that are impossible to replace. The function selection
# logic depends on this.
if arg.type not in returnable_types:
continue
null_args.append((chosen_func, idx))
if not any(null_args_by_func_allowed.itervalues()):
# Some analytic functions take no arguments. Ex: ROW_NUM()
break
if (return_class == AggFunc and not root_func.contains_agg) \
or (return_class == AnalyticFunc and not root_func.contains_analytic):
raise Exception('Unable to create an %s function'
% ('aggregate' if return_class == AggFunc else 'analytic'))
return root_func
def _create_analytic_select_item(self,
table_exprs,
select_item_exprs,
is_only_item,
return_type):
'''Create and return a list of SelectItems where each item includes an anayltic
function in its val_expr.
table_exprs must be a non-empty collection of tables exprs to choose columns from.
select_items may be an empty collection. If not empty, anayltic function
expressions will be restricted so that PARTITION BY and ORDER BY values are
chosen from within this set. If empty, any random expr may be chosen.
is_only_item should be True if this analytic item will be the only SelectItem in
the SELECT clause. When True, an additional analytic variation that would
otherwise produce a non-deterministic result set is enabled.
'''
# Results may not be deterministic when analytics have an ORDER BY. The code below
# will assume we want deterministic results though we know this may not be true
# for certain use cases such as just searching for crashes where results will be
# thrown away.
#
# These cases will produce a deterministic result when ORDER BY is used
#
# 1) There is only one SELECT item and the analytic function has no argument
# or the ORDER BY includes the argument.
#
# 2) The analytic has set of ORDER BY expressions that has a deterministic
# result. Ex: ORDER BY primary_key. This is only reliable if there are no
# JOINs.
#
# 3) A window is explicitly specified as UNBOUNDED PRECEDING to UNBOUNDED
# FOLLOWING and either the analytic function is an aggregate or the
# analytic function is FIRST_VALUE or LAST_VALUE.
#
# 4) RANK is a special case, it is intrinsically deterministic
excluded_designs = list()
if len(self.queries_under_construction) > 1:
excluded_designs.append('TOP_LEVEL_QUERY_WITHOUT_LIMIT')
if not is_only_item:
excluded_designs.append('ONLY_ITEM')
if len(table_exprs) == 1:
unique_col_combos = []
if table_exprs[0].unique_cols:
if select_item_exprs:
select_cols = set(val_expr for val_expr in select_item_exprs if val_expr.is_col)
for unique_cols in table_exprs[0].unique_cols:
if unique_cols <= select_cols:
unique_col_combos.append(unique_cols)
else:
unique_col_combos = table_exprs[0].unique_cols
if not unique_col_combos:
excluded_designs.append('DETERMINISTIC_ORDER')
else:
excluded_designs.append('DETERMINISTIC_ORDER')
allow_agg = any(ifilter(lambda expr: expr.contains_agg, select_item_exprs))
value = self._create_analytic_func_tree(return_type, excluded_designs, allow_agg)
value = self._populate_func_with_vals(
value,
table_exprs=TableExprList(),
val_exprs=ValExprList())
self._populate_analytic_clauses(value, table_exprs, select_item_exprs)
return SelectItem(value)
def _create_analytic_func_tree(self, return_type, excluded_designs, allow_agg):
# The results of queries with analytic functions may not be deterministic. Let's
# assume deterministic results are required. To generate deterministic results, the
# structure of queries will be limited to one of several "designs". The goal of each
# design is to ensure that results are deterministic. See the notes in
# _create_analytic_select_item for more info on designs.
designs = set(self.profile.allowed_analytic_designs())
for design in excluded_designs:
if design in designs:
designs.remove(design)
window_weigts = self.profile.weights('ANALYTIC_WINDOW')
if 'UNBOUNDED_WINDOW' in designs \
and window_weigts[('ROWS', UNBOUNDED_PRECEDING, UNBOUNDED_FOLLOWING)] == 0 \
and window_weigts[('RANGE', UNBOUNDED_PRECEDING, UNBOUNDED_FOLLOWING)] == 0:
designs.remove('UNBOUNDED_WINDOW')
if not designs:
raise Exception('No options for creating a deterministic analytic function are'
' available')
available_funcs = list()
for func in ANALYTIC_FUNCS:
if 'TOP_LEVEL_QUERY_WITHOUT_LIMIT' in designs \
or 'DETERMINISTIC_ORDER_BY' in designs \
or 'ONLY_SELECT_ITEM' in designs \
or ('RANK_FUNC' in designs and func.name() == 'Rank') \
or ('NO_ORDER_BY' in designs and not func.REQUIRES_ORDER_BY) \
or ('UNBOUNDED_WINDOW' in designs and func.SUPPORTS_WINDOWING):
available_funcs.append(func)
if not available_funcs:
raise Exception('No analytic functions available')
# In general this may be a tree-like val expr that is guaranteed to contain at least
# one analytic function. The arguments to the function are already outlined but the
# analytic clause is still undetermined.
root_func = self._create_agg_or_analytic_tree(
return_type,
analytic_funcs=available_funcs,
agg_funcs=(AGG_FUNCS if allow_agg else []))
# The following designs are preferred because they place no restrictions on the
# analytic. From now on, if "designs" is non-empty this means that a design must
# be chosen.
if 'TOP_LEVEL_QUERY_WITHOUT_LIMIT' in designs \
or 'DETERMINISTIC_ORDER_BY' in designs \
or 'ONLY_SELECT_ITEM' in designs:
designs.clear()
else:
designs = list(designs)
# Set the analytic clauses, such as PARTITION BY, etc, for each analytic function.
analytic_funcs = list()
if root_func.is_analytic:
analytic_funcs.append(root_func)
for func in root_func.iter_exprs(lambda expr: expr.is_analytic):
analytic_funcs.append(func)
for func in analytic_funcs:
if designs:
func.design = choice([design for design in designs
if design != 'RANK' or func.name() != 'Rank'])
else:
func.design = 'NO_RESTRICTIONS'
use_window = (func.design == 'UNBOUNDED_WINDOW') \
or (func.design != 'NO_ORDER_BY'
and (func.SUPPORTS_WINDOWING and self.profile.use_window_in_analytic()))
if use_window:
window_range_or_rows, window_start_boundary_type, window_end_boundary_type \
= self.profile.choose_window_type()
if window_start_boundary_type in (PRECEDING, FOLLOWING):
start_val = Int(self.profile._choose_from_bounds('ANALYTIC_WINDOW_OFFSET'))
else:
start_val = None
if window_end_boundary_type in (PRECEDING, FOLLOWING):
end_val = Int(self.profile.get_window_offset())
if window_start_boundary_type == PRECEDING \
and window_end_boundary_type == PRECEDING:
if start_val.val < end_val.val:
start_val, end_val = end_val, start_val
elif start_val.val == end_val.val:
end_val.val -= 1
elif window_start_boundary_type == FOLLOWING \
and window_end_boundary_type == FOLLOWING:
if start_val.val > end_val.val:
start_val, end_val = end_val, start_val
elif start_val.val == end_val.val:
start_val.val -= 1
else:
end_val = None
func.window_clause = WindowClause(
window_range_or_rows,
WindowBoundary(window_start_boundary_type, start_val),
WindowBoundary(window_end_boundary_type, end_val) \
if window_end_boundary_type else None)
if self.profile.use_partition_by_clause_in_analytic():
func.partition_by_clause = PartitionByClause([])
if (func.design == 'DETERMINISTIC_ORDER') \
or use_window \
or func.REQUIRES_ORDER_BY \
or (func.design != 'NO_ORDER_BY' \
and self.profile.use_order_by_clause_in_analytic()):
func.order_by_clause = OrderByClause([])
if func.name() in ('Lead', 'Lag') and len(func.args) == 2:
func.args[1].val = self.profile.get_offset_for_analytic_lead_or_lag()
return root_func
def _group_signatures_by_return_type(self, signatures):
groups = defaultdict(list)
for signature in signatures:
groups[signature.return_type].append(signature)
return dict(groups)
def _populate_analytic_clauses(self, func, table_exprs, val_exprs):
for arg in func.args:
if arg.is_func:
self._populate_analytic_clauses(arg, table_exprs, val_exprs)
if func.is_analytic:
if func.design == 'DETERMINISTIC_ORDER':
unique_cols = shuffle(list(choice(table_exprs[0].unique_cols)))
else:
unique_cols = list()
if func.partition_by_clause:
if len(unique_cols) > 1:
func.partition_by_clause.val_exprs.append(unique_cols.pop())
elif len(val_exprs) > 1:
shuffle(val_exprs)
func.partition_by_clause.val_exprs.append(val_exprs[0])
else:
func.partition_by_clause = None
if func.order_by_clause:
if unique_cols:
order_by_exprs = unique_cols
elif val_exprs:
order_by_exprs = val_exprs[:2]
else:
cols = list(table_exprs.cols)
shuffle(cols)
order_by_exprs = cols[:2]
for order_by_expr in order_by_exprs:
func.order_by_clause.exprs_to_order.append(
(order_by_expr, choice([None, 'ASC', 'DESC', 'DESC'])))
def _create_analytic_partition_by_or_order_by_exprs(self,
table_exprs,
select_items,
required_exprs=None):
# TODO: Make more complicated exprs by combining the ones below. Ex: instead of
# returning [a, b, c] return [(a + b) * c, etc].
# TODO: The current implementation is more restrictive than it needs to be. I think
# we only need to know if a GROUP BY query is being generated, and in that case
# limit column exprs to those of the set of GROUP BY columns.
if select_items:
val_exprs = [choice(select_items).val_expr
for _ in xrange(self.profile.get_col_count_to_use_in_val_expr())]
else:
val_exprs = [self._create_val_expr(table_exprs)
for _ in xrange(self.profile.get_col_count_to_use_in_val_expr())]
if required_exprs:
used_cols = set()
for expr in val_exprs:
used_cols.update(expr.count_col_refs().keys())
remaining_cols = required_exprs - used_cols
if remaining_cols:
val_exprs.extend(remaining_cols)
return val_exprs
def _create_from_clause(self,
table_exprs,
table_alias_prefix,
required_table_expr_col_type):
from_clause = None
table_count = self.profile.get_table_count()
for idx in xrange(table_count):
if idx == 0:
candidate_table_exprs = TableExprList(table_exprs)
if required_table_expr_col_type:
candidate_table_exprs = \
candidate_table_exprs.by_col_type[required_table_expr_col_type]
if not candidate_table_exprs:
raise Exception('No tables have the required column type')
if table_count > 0 \
and required_table_expr_col_type not in JOINABLE_TYPES:
candidate_table_exprs = TableExprList(
table_expr for table_expr in candidate_table_exprs
if table_expr.joinable_cols_by_type)
if not candidate_table_exprs:
raise Exception('No tables have any joinable types')
table_expr = self._create_table_expr(candidate_table_exprs)
table_expr.alias = table_alias_prefix + str(idx + 1)
from_clause = FromClause(table_expr)
if not table_expr.joinable_cols:
# A CROSS JOIN is still possible but let's assume that isn't wanted.
break
else:
join_clause = self._create_join_clause(from_clause, table_exprs)
join_clause.table_expr.alias = table_alias_prefix + str(idx + 1)
from_clause.join_clauses.append(join_clause)
# Note: the HAVING clause creation assumes the only case when a table will not have
# an alias is the case below.
if len(from_clause.table_exprs) == 1 and not from_clause.table_expr.is_inline_view:
from_clause.table_expr.alias = None
return from_clause
def _create_table_expr(self, table_exprs, required_type=None):
if not table_exprs:
raise Exception('At least one table_expr is required')
if self.allow_more_nested_queries and self.profile.use_inline_view():
return self._create_inline_view(table_exprs, required_type=required_type)
if required_type:
candidiate_table_exprs = TableExprList()
for table_expr in table_exprs:
for col in table_expr.cols:
if issubclass(col.type, required_type):
candidiate_table_exprs.append(table_expr)
break
if not candidiate_table_exprs:
raise Exception('No tables have a column with the required data type')
table_exprs = candidiate_table_exprs
return deepcopy(self.profile.choose_table(table_exprs))
def _create_inline_view(self, table_exprs, required_type=None):
return InlineView(self.create_query(
table_exprs, required_select_item_type=required_type))
def _create_join_clause(self, from_clause, table_exprs):
join_type = self.profile.choose_join_type(JoinClause.JOINS_TYPES)
if join_type == 'CROSS':
table_expr = self._create_table_expr(table_exprs)
else:
available_join_expr_types = set(from_clause.table_exprs.joinable_cols_by_type) \
& set(table_exprs.col_types)
if not available_join_expr_types:
raise Exception('No tables have any colums eligible for joining')
join_expr_type = self.profile.choose_type(tuple(available_join_expr_types))
table_expr = self._create_table_expr(table_exprs, required_type=join_expr_type)
if join_type in ('LEFT ANTI', 'LEFT SEMI'):
table_expr.is_visible = False
join_clause = JoinClause(join_type, table_expr)
if join_type != 'CROSS':
join_table_expr_candidates = from_clause.table_exprs.by_col_type[join_expr_type]
if not join_table_expr_candidates:
raise Exception('table_expr has no common joinable columns')
related_table_expr = choice(join_table_expr_candidates)
join_clause.boolean_expr = self._create_relational_join_condition(
table_expr, related_table_expr)
return join_clause
def _create_relational_join_condition(self, left_table_expr, right_table_expr):
if not left_table_expr.joinable_cols_by_type:
raise Exception('All columns in table disallowed: %s' % left_table_expr)
if not right_table_expr.joinable_cols_by_type:
raise Exception('All columns in table disallowed: %s' % right_table_expr)
common_col_types = set(left_table_expr.joinable_cols_by_type) \
& set(right_table_expr.joinable_cols_by_type)
if not common_col_types:
raise Exception('Tables have no joinable columns in common')
predicates = list()
for _ in xrange(1 + self.profile.choose_nested_expr_count()):
col_type = choice(list(common_col_types))
left_col = choice(left_table_expr.joinable_cols_by_type[col_type])
right_col = choice(right_table_expr.joinable_cols_by_type[col_type])
# As of this writing Impala doesn't trim CHARs when comparing against VARCHAR. It's
# expected that the user will explicitly do this. Eventually an implicit trim should
# be done.
if issubclass(right_col.type, (String, VarChar)) and left_col.type == Char:
left_col = Trim.create_from_args(left_col)
elif left_col.type == Char and issubclass(right_col.type, (String, VarChar)):
right_col = Trim.create_from_args(right_col)
predicates.append(Equals.create_from_args(left_col, right_col))
while len(predicates) > 1:
predicates.append(And.create_from_args(predicates.pop(), predicates.pop()))
return predicates[0]
def _create_where_clause(self,
from_clause_table_exprs,
table_exprs,
table_alias_prefix):
predicate = self._create_func_tree(Boolean, allow_subquery=True)
predicate = self._populate_func_with_vals(
predicate,
table_exprs=from_clause_table_exprs,
table_alias_prefix=table_alias_prefix)
if predicate.contains_subquery and not from_clause_table_exprs[0].alias:
# TODO: Figure out if an alias is really needed.
from_clause_table_exprs[0].alias = 't1'
return WhereClause(predicate)
def _create_having_clause(self, table_exprs, basic_select_item_exprs):
predicate = self._create_agg_func_tree(Boolean)
predicate = self._populate_func_with_vals(
predicate, table_exprs=table_exprs, val_exprs=basic_select_item_exprs)
# https://issues.cloudera.org/browse/IMPALA-1423
# Make sure any cols used have a table identifier. As of this writing the only
# single table FROM clauses don't use table aliases. Setting a table alias
# automatically propagates as a column table identifier ("t1.col" instead of "col").
for arg in predicate.iter_exprs():
if isinstance(arg, ValExpr) and arg.is_col and not arg.owner.alias:
# TODO: Figure out if an alias is really needed.
arg.owner.alias = 't1'
return HavingClause(predicate)
def _enable_distinct_on_random_agg_items(self, agg_items):
'''Randomly choose an agg func and set it to use DISTINCT'''
# Impala has a limitation where 'DISTINCT' may only be applied to one agg
# expr. If an agg expr is used more than once, each usage may
# or may not include DISTINCT.
#
# Examples:
# OK: SELECT COUNT(DISTINCT a) + SUM(DISTINCT a) + MAX(a)...
# Not OK: SELECT COUNT(DISTINCT a) + COUNT(DISTINCT b)...
#
# Given a select list like:
# COUNT(a), SUM(a), MAX(b)
#
# We want to ouput one of:
# COUNT(DISTINCT a), SUM(DISTINCT a), AVG(b)
# COUNT(DISTINCT a), SUM(a), AVG(b)
# COUNT(a), SUM(a), AVG(DISTINCT b)
#
# This will be done by first grouping all agg funcs by their inner
# expr:
# {a: [COUNT(a), SUM(a)],
# b: [MAX(b)]}
#
# then choosing a random val (which is a list of aggs) in the above dict, and
# finally randomly adding DISTINCT to items in the list.
exprs_to_funcs = defaultdict(list)
for item in agg_items:
for expr, funcs in self._group_agg_funcs_by_expr(item.val_expr).iteritems():
exprs_to_funcs[expr].extend(funcs)
funcs = choice(exprs_to_funcs.values())
for func in funcs:
if self.profile.use_distinct_in_func():
func.distinct = True
def _group_agg_funcs_by_expr(self, val_expr):
'''Group exprs and return a dict mapping the expr to the agg items
it is used in.
Example: COUNT(a) * SUM(a) - MAX(b) + MIN(c) -> {a: [COUNT(a), SUM(a)],
b: [MAX(b)],
c: [MIN(c)]}
'''
exprs_to_funcs = defaultdict(list)
if val_expr.is_agg:
exprs_to_funcs[tuple(val_expr.args)].append(val_expr)
elif val_expr.is_func:
for arg in val_expr.args:
for expr, funcs in self._group_agg_funcs_by_expr(arg).iteritems():
exprs_to_funcs[expr].extend(funcs)
# else: The remaining case could happen if the original expr was something like
# "SUM(a) + b + 1" where b is a GROUP BY field.
return exprs_to_funcs
|
scalingdata/Impala
|
tests/comparison/query_generator.py
|
Python
|
apache-2.0
| 55,531
|
"""Vera tests."""
from unittest.mock import MagicMock
import pyvera as pv
from homeassistant.core import HomeAssistant
from .common import ComponentFactory, new_simple_controller_config
async def test_scene(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_scene = MagicMock(spec=pv.VeraScene) # type: pv.VeraScene
vera_scene.scene_id = 1
vera_scene.vera_scene_id = vera_scene.scene_id
vera_scene.name = "dev1"
entity_id = "scene.dev1_1"
await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(scenes=(vera_scene,)),
)
await hass.services.async_call(
"scene",
"turn_on",
{"entity_id": entity_id},
)
await hass.async_block_till_done()
|
partofthething/home-assistant
|
tests/components/vera/test_scene.py
|
Python
|
apache-2.0
| 832
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
from synthtool import gcp
gapic = gcp.GAPICGenerator()
#----------------------------------------------------------------------------
# Generate datastore client
#----------------------------------------------------------------------------
library = gapic.py_library(
'datastore',
'v1',
config_path='/google/datastore/artman_datastore.yaml',
artman_output_name='datastore-v1')
s.move(library / 'google/cloud/datastore_v1/proto')
s.move(library / 'google/cloud/datastore_v1/gapic')
|
jonparrott/gcloud-python
|
datastore/synth.py
|
Python
|
apache-2.0
| 1,170
|
# Copyright 2005-2010 Wesabe, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '../3rdparty')
sys.path.insert(0, '../lib')
import ofx
import unittest
class ErrorTests(unittest.TestCase):
def test_ofx_error_to_str(self):
error = ofx.Error("test", code=9999, severity="ERROR", message="Test")
expected = "Test\n(ERROR 9999: Unknown error code)"
self.assertEqual(expected, error.str())
self.assertEqual(expected, str(error))
if __name__ == '__main__':
unittest.main()
|
myfreecomm/fixofx
|
test/ofx_error.py
|
Python
|
apache-2.0
| 1,053
|
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Firewall configuration plugin for Engine.
"""
import gettext
_ = lambda m: gettext.dgettext(message=m, domain='ovirt-engine-setup')
from otopi import util
from otopi import plugin
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine_common \
import constants as oengcommcons
@util.export
class Plugin(plugin.PluginBase):
"""
Firewall configuration plugin for Engine
"""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
after=(
osetupcons.Stages.NET_FIREWALL_MANAGER_AVAILABLE,
),
)
def _configuration(self):
self.environment[
osetupcons.NetEnv.FIREWALLD_SUBST
].update({
'@JBOSS_HTTP_PORT@': self.environment[
oengcommcons.ConfigEnv.JBOSS_DIRECT_HTTP_PORT
],
'@JBOSS_HTTPS_PORT@': self.environment[
oengcommcons.ConfigEnv.JBOSS_DIRECT_HTTPS_PORT
],
})
if self.environment[
oengcommcons.ConfigEnv.JBOSS_DIRECT_HTTP_PORT
] is not None:
self.environment[osetupcons.NetEnv.FIREWALLD_SERVICES].extend([
{
'name': 'ovirt-jboss-http',
'directory': 'ovirt-engine'
},
{
'name': 'ovirt-jboss-https',
'directory': 'ovirt-engine'
},
])
# vim: expandtab tabstop=4 shiftwidth=4
|
phoenixsbk/kvmmgr
|
packaging/setup/plugins/ovirt-engine-setup/ovirt-engine/config/firewall.py
|
Python
|
apache-2.0
| 2,217
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import ConfigParser
from pprint import pformat
try:
from neutron.openstack.common import log as logging
except ImportError:
from oslo_log import log as logging
from oslo.config import cfg
from httplib2 import Http
import re
import string
import sys
import cgitb
import uuid
import requests
from cfgm_common import exceptions as vnc_exc
from vnc_api import vnc_api
LOG = logging.getLogger(__name__)
vnc_conn = None
class QuotaDriver(object):
"""Configuration driver.
Driver to perform necessary checks to enforce quotas and obtain
quota information. The default driver utilizes the default values
in neutron.conf.
"""
quota_neutron_to_contrail_type = {
'subnet': 'subnet',
'network': 'virtual_network',
'floatingip': 'floating_ip',
'route_table': 'logical_router',
'security_group': 'security_group',
'security_group_rule': 'security_group_rule',
'router': 'logical_router',
'port': 'virtual_machine_interface',
'pool': 'loadbalancer_pool',
'vip': 'virtual_ip',
'member': 'loadbalancer_member',
'health_monitor': 'loadbalancer_healthmonitor'
};
@classmethod
def _get_vnc_conn(cls):
global vnc_conn
if vnc_conn:
return vnc_conn
# Retry till a api-server is up
while True:
try:
vnc_conn = vnc_api.VncApi(
cfg.CONF.keystone_authtoken.admin_user,
cfg.CONF.keystone_authtoken.admin_password,
cfg.CONF.keystone_authtoken.admin_tenant_name,
cfg.CONF.APISERVER.api_server_ip,
cfg.CONF.APISERVER.api_server_port,
auth_host=cfg.CONF.keystone_authtoken.auth_host,
auth_port=cfg.CONF.keystone_authtoken.auth_port,
auth_protocol=cfg.CONF.keystone_authtoken.auth_protocol)
return vnc_conn
except requests.exceptions.RequestException as e:
time.sleep(3)
# end _get_vnc_conn
def limit_check(self, context, tenant_id,
resources, values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it is not a simple limit
resource.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns
nothing.
:param context: The request context, for access checks.
:param tennant_id: The tenant_id to check quota.
:param resources: A dictionary of the registered resources.
:param values: A dictionary of the values to check against the
quota.
"""
@classmethod
def get_tenant_quotas(cls, context, resources, tenant_id):
try:
default_project = cls._get_vnc_conn().project_read(
fq_name=['default-domain', 'default-project'])
default_quota = default_project.get_quota()
except vnc_exc.NoIdError:
default_quota = None
return cls._get_tenant_quotas(context, resources, tenant_id,
default_quota)
@classmethod
def _get_tenant_quotas(cls, context, resources, tenant_id, default_quota):
try:
proj_id = str(uuid.UUID(tenant_id))
proj_obj = cls._get_vnc_conn().project_read(id=proj_id)
quota = proj_obj.get_quota()
except vnc_exc.NoIdError:
return {}
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise e
qn2c = cls.quota_neutron_to_contrail_type
quotas = {}
for resource in resources:
quota_res = None
if quota and resource in qn2c:
quota_res = getattr(quota, qn2c[resource], None)
if quota_res is None and default_quota and resource in qn2c:
quota_res = getattr(default_quota, qn2c[resource], None)
if quota_res is None:
quota_res = default_quota.get_defaults()
if quota_res is None:
quota_res = resources[resource].default
quotas[resource] = quota_res
return quotas
@classmethod
def get_all_quotas(cls, context, resources):
try:
default_project = cls._get_vnc_conn().project_read(
fq_name=['default-domain', 'default-project'])
default_quota = default_project.get_quota()
except vnc_exc.NoIdError:
default_quota = None
project_list = cls._get_vnc_conn().projects_list()['projects']
ret_list = []
for project in project_list:
if default_quota and (project['uuid'] == default_project.uuid):
continue
quotas = cls._get_tenant_quotas(context, resources, project['uuid'],
default_quota)
quotas['tenant_id'] = project['uuid']
ret_list.append(quotas)
return ret_list
@classmethod
def delete_tenant_quota(cls, context, tenant_id):
try:
proj_id = str(uuid.UUID(tenant_id))
proj_obj = cls._get_vnc_conn().project_read(id=proj_id)
quota = proj_obj.get_quota()
except vnc_exc.NoIdError:
return
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise e
for k,v in quota.__dict__.items():
if k != 'defaults':
quota.__dict__[k] = None
proj_obj.set_quota(quota)
cls._get_vnc_conn().project_update(proj_obj)
@classmethod
def update_quota_limit(cls, context, tenant_id, resource, limit):
try:
proj_id = str(uuid.UUID(tenant_id))
proj_obj = cls._get_vnc_conn().project_read(id=proj_id)
quota = proj_obj.get_quota() or vnc_api.QuotaType()
except vnc_exc.NoIdError:
return
except Exception as e:
cgitb.Hook(format="text").handle(sys.exc_info())
raise e
qn2c = cls.quota_neutron_to_contrail_type
if resource in qn2c:
quota_method = 'set_' + qn2c[resource]
set_quota = getattr(quota, quota_method)
set_quota(limit)
proj_obj.set_quota(quota)
cls._get_vnc_conn().project_update(proj_obj)
|
vpramo/contrail-neutron-plugin
|
neutron_plugin_contrail/plugins/opencontrail/quota/driver.py
|
Python
|
apache-2.0
| 6,906
|
#!/usr/bin/env python
# coding: utf-8
#
# Usage: python -matx screen [-s 0.8]
import os
import time
import traceback
import cv2
from functools import partial
from atx.adbkit.client import Client
from atx.adbkit.device import Device
from atx.adbkit.mixins import MinicapStreamMixin, RotationWatcherMixin, MinitouchStreamMixin
class AdbWrapper(RotationWatcherMixin, MinicapStreamMixin, MinitouchStreamMixin, Device):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self._display = self.display
def on_rotation_change(v):
self.open_minicap_stream()
self._display = self.display
self.open_rotation_watcher(on_rotation_change=on_rotation_change)
self.open_minitouch_stream()
def send_touch(self, cmd):
self._MinitouchStreamMixin__touch_queue.put(cmd)
def input(self, char):
self.shell('input', 'text', char)
def get_adb(host, port, serial):
client = Client(host, port)
if serial is None:
serial = list(client.devices().keys())[0]
return AdbWrapper(client, serial)
__dir__ = os.path.dirname(os.path.abspath(__file__))
def screen_with_controls(host, port, serial, scale=0.5):
from PIL import Image, ImageTk
import Tkinter as tk
import tkFileDialog
adb = get_adb(host, port, serial)
class Screen(object):
def __init__(self):
self.root = tk.Tk()
self.root.title('Sync Screen')
# self.image = Image.open(os.path.join(__dir__, 'static', 'screen.png'))
self.image = None
self.tkimage = None
self.canvas_image = None
self.make_toolbar()
self.make_canvas()
def make_toolbar(self):
# tools: capture, power, home, menu, back, volume_up, volume_down, turn_screen, keymapping_settings
toolbar = tk.Frame(self.root)
self.icons = [] # need to keep a reference for tk images. wtf.
def capture():
if self.image is None:
print 'Not initialized, try later.'
return
d = tkFileDialog.asksaveasfilename(filetypes=(('Images', '*.png;*.jpg;'),), initialfile='screen.png')
if not d: # canceled
return
if not d.endswith('.png') and not d.endswith('.jpg'):
d += '.png'
print 'Save to', d
self.image.save(d)
icon = ImageTk.PhotoImage(file=os.path.join(__dir__, 'static', 'icons', 'save.ico'))
tk.Button(toolbar, image=icon, command=capture).pack(side=tk.LEFT, padx=2, pady=2)
self.icons.append(icon)
# def rotate():
# print 'rotate screen (Not Implemented yet.)'
# icon = ImageTk.PhotoImage(file=os.path.join(__dir__, 'static', 'icons', 'rotate.ico'))
# tk.Button(toolbar, image=icon, command=rotate).pack(side=tk.LEFT, padx=2, pady=2)
# self.icons.append(icon)
for key in ('power', 'home', 'menu', 'back', 'volume_up', 'volume_down'):
icon = ImageTk.PhotoImage(file=os.path.join(__dir__, 'static', 'icons', '%s.ico' % key))
self.icons.append(icon)
b = tk.Button(toolbar, image=icon, command=lambda k=key:adb.keyevent('KEYCODE_%s' % k.upper()))
b.pack(side=tk.LEFT, padx=2, pady=2)
toolbar.pack(side=tk.TOP, fill=tk.X)
def make_canvas(self):
# screen canvas, bind mouse input & keyboard input
self.canvas = tk.Canvas(self.root, bg='black', bd=0, highlightthickness=0)
self.canvas.pack()
def screen2touch(x, y):
'''convert touch position'''
w, h, o = adb._display
if o == 0:
return x, y
elif o == 1: # landscape-right
return w-y, x
elif o == 2: # upsidedown
return w-x, h-y
elif o == 3: # landscape-left
return y, h-x
return x, y
def on_mouse_down(event):
self.canvas.focus_set()
x, y = int(event.x/scale), int(event.y/scale)
x, y = screen2touch(x, y)
adb.send_touch('d 0 %d %d 30\nc\n' % (x, y))
def on_mouse_up(event):
adb.send_touch('u 0\nc\n')
def on_mouse_drag(event):
x, y = int(event.x/scale), int(event.y/scale)
x, y = screen2touch(x, y)
adb.send_touch('m 0 %d %d 30\nc\n' % (x, y))
self.canvas.bind('<ButtonPress-1>', on_mouse_down)
self.canvas.bind('<ButtonRelease-1>', on_mouse_up)
self.canvas.bind('<B1-Motion>', on_mouse_drag)
keymap = {'\r':'KEYCODE_ENTER', ' ':'KEYCODE_SPACE', '\x08':'KEYCODE_DEL', }
def on_key(event):
c = event.char
# print 'key pressed', repr(c), type(c)
if c in 'adbcdefghijklmnopqrstuvwxyz0123456789':
adb.input(c)
return 'break'
if c in keymap:
adb.keyevent(keymap[c])
return 'break'
self.canvas.bind('<Key>', on_key)
def _refresh_screen(self):
img = adb.screenshot_cv2()
if scale != 1.0:
h, w = img.shape[:2]
h, w = int(scale*h), int(scale*w)
img = cv2.resize(img, (w, h))
self.image = Image.fromarray(img[:, :, ::-1])
self.tkimage = ImageTk.PhotoImage(self.image)
w, h = self.image.size
self.canvas.config(width=w, height=h)
if self.canvas_image is None:
self.canvas_image = self.canvas.create_image(0, 0, anchor=tk.NW, image=self.tkimage)
else:
self.canvas.itemconfig(self.canvas_image, image=self.tkimage)
self.root.after(10, self._refresh_screen)
def run(self):
self._refresh_screen()
self.root.mainloop()
s = Screen()
img = adb.screenshot_cv2()
while img is None:
time.sleep(1)
img = adb.screenshot_cv2()
s.run()
def screen_simple(host, port, serial, scale=0.5):
adb = get_adb(host, port, serial)
img = adb.screenshot_cv2()
while img is None:
time.sleep(1)
img = adb.screenshot_cv2()
print 'Press Ctrl-C or Esc to quit.'
winname = 'Sync Screen'
cv2.namedWindow(winname)
while True:
try:
img = adb.screenshot_cv2()
if scale != 1.0:
h, w = img.shape[:2]
h, w = int(scale*h), int(scale*w)
img = cv2.resize(img, (w, h))
cv2.imshow(winname, img)
key = cv2.waitKey(10)
if key == 27: # Escape
break
except KeyboardInterrupt:
print 'Done'
break
except:
traceback.print_exc()
break
cv2.destroyWindow(winname)
def main(serial=None, host=None, port=None, scale=0.5, simple=False):
'''interact'''
if simple:
screen_simple(host, port, serial, scale)
else:
screen_with_controls(host, port, serial, scale)
if __name__ == '__main__':
main()
|
codeskyblue/AutomatorX
|
atx/cmds/screen.py
|
Python
|
apache-2.0
| 7,668
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.sensors.base import BaseSensorOperator
from airflow.utils import timezone
class TimeSensor(BaseSensorOperator):
"""
Waits until the specified time of the day.
:param target_time: time after which the job succeeds
:type target_time: datetime.time
"""
def __init__(self, *, target_time, **kwargs):
super().__init__(**kwargs)
self.target_time = target_time
def poke(self, context):
self.log.info('Checking if the time (%s) has come', self.target_time)
return timezone.make_naive(timezone.utcnow(), self.dag.timezone).time() > self.target_time
|
nathanielvarona/airflow
|
airflow/sensors/time_sensor.py
|
Python
|
apache-2.0
| 1,406
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import copy
import functools
import traceback
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
__all__ = ["VariableScope", "get_variable_scope",
"get_variable", "get_local_variable", "variable_scope",
"variable_op_scope", "no_regularizer"]
class _PartitionInfo(object):
"""Holds partition info used by initializer functions.
"""
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape
of the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, collections_lib.Sequence) or isinstance(
full_shape, six.string_types):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, collections_lib.Sequence) or isinstance(
var_offset, six.string_types):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for i in xrange(len(full_shape)):
offset = var_offset[i]
shape = full_shape[i]
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, collections_lib.Sequence) or isinstance(
shape, six.string_types):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(
shape), self.full_shape, len(self.full_shape)))
for i in xrange(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in xrange(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys
and the corresponding TensorFlow Variables as values.
"""
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self.variable_scopes_count = {} # Count re-used variable scopes.
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in self.variable_scopes_count:
if not scope_name or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable(self, name, shape=None, dtype=dtypes.float32,
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None,
partitioner=None, validate_shape=True, use_resource=None,
custom_getter=None):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
If `reuse` is `None` (the default), both new and existing variables are
returned.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean or `None`. Controls reuse or creation of variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True).
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError(
"Passed a custom_getter which is not callable: %s" % custom_getter)
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter(name, shape=None, dtype=dtypes.float32, # pylint: disable=missing-docstring
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None,
partitioner=None, validate_shape=True, use_resource=None):
is_scalar = (shape is not None and isinstance(shape, collections_lib.Sequence)
and len(shape) == 0)
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError(
"Partitioner must be callable, but received: %s" % partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, reuse=reuse,
trainable=trainable, collections=collections,
caching_device=caching_device, validate_shape=validate_shape,
use_resource=use_resource)
if custom_getter is not None:
return custom_getter(
getter=_true_getter, name=name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
reuse=reuse, trainable=trainable, collections=collections,
caching_device=caching_device, partitioner=partitioner,
validate_shape=validate_shape, use_resource=use_resource)
else:
return _true_getter(
name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
reuse=reuse, trainable=trainable, collections=collections,
caching_device=caching_device, partitioner=partitioner,
validate_shape=validate_shape, use_resource=use_resource)
def _get_partitioned_variable(
self, name, partitioner, shape=None, dtype=dtypes.float32,
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None,
validate_shape=True, use_resource=None):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
If `reuse` is `None` (the default), both new and existing variables are
returned.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable
(defaults to `DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean or `None`. Controls reuse or creation of variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
reuse_without_partition = reuse is True and partitioner is None
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
if not reuse_without_partition:
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable (%s) must be "
"fully defined, but instead was %s." % (name, shape))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
partitions = partitioner(shape=shape, dtype=dtype)
if not isinstance(partitions, collections_lib.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s"
% partitions)
if len(partitions) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (partitions, shape))
if any([p < 1 for p in partitions]):
raise ValueError(
"Partitioner returned zero partitions for some axes: %s" %
partitions)
should_check = reuse is not None
if name in self._partitioned_vars:
if should_check and not reuse:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True in VarScope?"
% name)
existing_var = self._partitioned_vars[name]
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s."
% (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s."
% (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (not reuse_without_partition and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if should_check and reuse:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=None in VarScope?" % name)
slice_dim, slice_shape = _compute_slice_dim_and_shape(
shape.as_list(), partitions)
vs = []
num_slices = partitions[slice_dim]
num_slices_with_excess = shape[slice_dim].value % num_slices
slice_offset = [0] * shape.ndims
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not."
% (num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d."
% (num_slices, name, name, num_slices))
for i in xrange(num_slices):
var_shape = slice_shape[:]
var_offset = slice_offset[:]
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
if i < num_slices_with_excess:
var_shape[slice_dim] += 1
slice_offset[slice_dim] += var_shape[slice_dim]
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(var_full_name + "/PartitionedInitializer"):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource)
# pylint: disable=protected-access
var._set_save_slice_info(variables.Variable.SaveSliceInfo(
name, shape.as_list(), var_offset, var_shape))
vs.append(var)
# pylint: enable=protected-access
# pylint: disable=protected-access
partitioned_var = variables.PartitionedVariable(name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
# pylint: enable=protected-access
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=True,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,):
"""Get or create a single Variable (e.g. a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
should_check = reuse is not None
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if should_check and not reuse:
tb = self._vars[name].op.traceback[::-1]
# Throw away internal tf entries and only take a few lines.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:3]
raise ValueError("Variable %s already exists, disallowed."
" Did you mean to set reuse=True in VarScope? "
"Originally defined at:\n\n%s" % (
name, "".join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." % (name, shape,
found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." % (name, dtype_str,
found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if should_check and reuse:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set reuse=None in "
"VarScope?" % name)
if not shape.is_fully_defined() and not initializing_from_value:
raise ValueError("Shape of a new variable (%s) must be fully defined, "
"but instead was %s." % (name, shape))
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Clear control dependencies while creating the initializer.
with ops.control_dependencies(None):
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(), dtype=dtype, partition_info=partition_info)
variable_dtype = dtype.base_dtype
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = False
if use_resource:
v = resource_variable_ops.ResourceVariable(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape)
else:
v = variables.Variable(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape)
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
with ops.colocate_with(v.op):
with ops.name_scope(name + "/Regularizer/"):
loss = regularizer(v)
if loss is not None:
logging.vlog(1, "Applied regularizer to %s and added the result %s "
"to REGULARIZATION_LOSSES.", v.name, loss.name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
initializer = init_ops.zeros_initializer()(
shape=shape, dtype=dtype.base_dtype)
initializing_from_value = True
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required"
% (name, dtype.base_dtype))
return initializer, initializing_from_value
# To stop regularization, use this regularizer
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
class VariableScope(object):
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean or None, setting the reuse in get_variable.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults
to False (will later change to True).
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + "/" if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if reuse is None:
reuse = self._reuse
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
return var_store.get_variable(
full_name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, reuse=reuse, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, custom_getter=custom_getter)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None):
"""Gets an existing variable with this name or create a new one."""
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, reuse=self.reuse, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPE_KEY = ("__varscope",)
def get_variable_scope():
"""Returns the current variable scope."""
scope = ops.get_collection(_VARSCOPE_KEY)
if scope: # This collection has at most 1 element, the default scope at [0].
return scope[0]
scope = VariableScope(False)
ops.add_to_collection(_VARSCOPE_KEY, scope)
return scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None):
return get_variable_scope().get_variable(
_get_default_variable_store(), name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, custom_getter=custom_getter)
get_variable_or_local_docstring = (
"""%s
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
@{$variables$Variable Scope How To}
for an extensive description of how reusing works. Here is a basic example:
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1]) # v.name == "foo/v:0"
w = tf.get_variable("w", [1]) # w.name == "foo/w:0"
with tf.variable_scope("foo", reuse=True):
v1 = tf.get_variable("v") # The same as v above.
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
@{tf.GraphKeys.REGULARIZATION_LOSSES} and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True).
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.",
"",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ",
"GraphKeys.GLOBAL_VARIABLES")
@functools.wraps(get_variable)
def get_local_variable(*args, **kwargs):
kwargs["trainable"] = False
if "collections" in kwargs:
kwargs["collections"] += [ops.GraphKeys.LOCAL_VARIABLES]
else:
kwargs["collections"] = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(*args, **kwargs)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n",
"",
"GraphKeys.LOCAL_VARIABLES")
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
If `reuse` is `None` (the default), both new and existing variables are
returned.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(), name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource)
# pylint: enable=protected-access
@tf_contextlib.contextmanager
def _pure_variable_scope(name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or `None`; if `True`, we go into reuse mode for this scope as
well as all sub-scopes; if `None`, we just inherit the parent scope reuse.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
Yields:
A scope that can be captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
get_variable_scope() # Ensure that a default exists, then get a pointer.
# Get the reference to the collection as we want to modify it in place.
default_varscope = ops.get_collection_ref(_VARSCOPE_KEY)
old = default_varscope[0]
var_store = _get_default_variable_store()
if isinstance(name_or_scope, VariableScope):
new_name = name_or_scope.name
else:
new_name = old.name + "/" + name_or_scope if old.name else name_or_scope
try:
var_store.open_variable_scope(new_name)
if isinstance(name_or_scope, VariableScope):
old_subscopes = copy.copy(var_store.variable_scopes_count)
name_scope = name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope.
# We create a new VariableScope (default_varscope[0]) that contains
# a copy of the provided shared scope, possibly with changed reuse
# and initializer, if the user requested this.
default_varscope[0] = VariableScope(
name_or_scope.reuse if reuse is None else reuse,
name=new_name,
initializer=name_or_scope.initializer,
regularizer=name_or_scope.regularizer,
caching_device=name_or_scope.caching_device,
partitioner=name_or_scope.partitioner,
dtype=name_or_scope.dtype,
custom_getter=name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=name_or_scope.use_resource)
if initializer is not None:
default_varscope[0].set_initializer(initializer)
if regularizer is not None:
default_varscope[0].set_regularizer(regularizer)
if caching_device is not None:
default_varscope[0].set_caching_device(caching_device)
if partitioner is not None:
default_varscope[0].set_partitioner(partitioner)
if custom_getter is not None:
default_varscope[0].set_custom_getter(
_maybe_wrap_custom_getter(
custom_getter, name_or_scope.custom_getter))
if dtype is not None:
default_varscope[0].set_dtype(dtype)
if use_resource is not None:
default_varscope[0].set_use_resource(use_resource)
yield default_varscope[0]
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
reuse = reuse or old.reuse # Re-using is inherited by sub-scopes.
default_varscope[0] = VariableScope(
reuse,
name=new_name,
initializer=old.initializer,
regularizer=old.regularizer,
caching_device=old.caching_device,
partitioner=old.partitioner,
dtype=old.dtype,
use_resource=old.use_resource,
custom_getter=old.custom_getter,
name_scope=old_name_scope or name_or_scope)
if initializer is not None:
default_varscope[0].set_initializer(initializer)
if regularizer is not None:
default_varscope[0].set_regularizer(regularizer)
if caching_device is not None:
default_varscope[0].set_caching_device(caching_device)
if partitioner is not None:
default_varscope[0].set_partitioner(partitioner)
if custom_getter is not None:
default_varscope[0].set_custom_getter(
_maybe_wrap_custom_getter(custom_getter, old.custom_getter))
if dtype is not None:
default_varscope[0].set_dtype(dtype)
if use_resource is not None:
default_varscope[0].set_use_resource(use_resource)
yield default_varscope[0]
finally:
var_store.close_variable_subscopes(new_name)
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(name_or_scope, VariableScope):
var_store.variable_scopes_count = old_subscopes
default_varscope[0] = old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(
functools.partial(old_getter, getter),
*args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_store = _get_default_variable_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def variable_scope(name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None):
"""Returns a context manager for defining ops that creates variables (layers).
This context manager validates that the (optional) `values` are from
the same graph, ensures that graph is the default graph, and pushes a
name scope and a variable scope.
If `name_or_scope` is not None, it is used as is. If `scope` is None, then
`default_name` is used. In that case, if the same name has been previously
used in the same scope, it will made unique be appending `_N` to it.
Variable scope allows to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the @{$variables$Variable Scope How To},
here we present only a few basic examples.
Simple example of how to create a new variable:
```python
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Basic example of sharing a variable:
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
with tf.variable_scope("foo", reuse=True):
v1 = tf.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.variable_scope("foo") as scope:
v = tf.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when
getting an existing variable in a non-reusing scope.
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
v1 = tf.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that
does not exist in reuse mode.
```python
with tf.variable_scope("foo", reuse=True):
v = tf.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope,
then all its sub-scopes become reusing as well.
A note about name scoping: Setting `reuse` does not impact the naming of other
ops such as mult. See related discussion on [github#6189](https://github.com/tensorflow/tensorflow/issues/6189)
Note that up to and including version 1.0, it was allowed (though
explicitly discouraged) to pass False to the reuse argument, yielding
undocumented behaviour slightly different from None. Starting at 1.1.0
passing None and False as reuse has exactly the same effect.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True` or `None`; if `True`, we go into reuse mode for this scope as
well as all sub-scopes; if `None`, we just inherit the parent scope reuse.
dtype: type of variables created in this scope (defaults to the type
in the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True).
Returns:
A scope that can be to captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
if default_name is None and name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if not (reuse is True or reuse is False or reuse is None):
raise ValueError("The reuse parameter must be True or False or None.")
if reuse is False: # We don't allow non-inheriting scopes, False = None here.
reuse = None
if values is None:
values = []
g = ops._get_graph_from_inputs(values) # pylint: disable=protected-access
with g.as_default():
if name_or_scope is not None:
if not isinstance(name_or_scope, (VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(name_or_scope, six.string_types):
name_scope = name_or_scope
else:
name_scope = name_or_scope.name.split("/")[-1]
if name_scope:
with ops.name_scope(name_scope) as cur_name_scope:
if isinstance(name_or_scope, six.string_types):
old_name_scope = cur_name_scope
else:
old_name_scope = name_or_scope.original_name_scope
with _pure_variable_scope(
name_or_scope,
reuse=reuse,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
old_name_scope=old_name_scope,
dtype=dtype,
use_resource=use_resource) as vs:
yield vs
else:
# This can only happen if someone is entering the root variable scope.
with _pure_variable_scope(
name_or_scope,
reuse=reuse,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
dtype=dtype,
use_resource=use_resource) as vs:
yield vs
else: # Here name_or_scope is None. Using default name, but made unique.
if reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
with ops.name_scope(default_name) as scope:
unique_default_name = _get_unique_variable_scope(default_name)
with _pure_variable_scope(
unique_default_name,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
old_name_scope=scope,
dtype=dtype,
use_resource=use_resource) as vs:
yield vs
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource) as scope:
yield scope
def _compute_slice_dim_and_shape(full_shape, slicing):
"""Computes which dimension is being sliced and the typical slice shape."""
slice_shape = [0] * len(full_shape)
slice_dim = None
for dim, num_slices in enumerate(slicing):
dim_size = full_shape[dim]
if num_slices <= 0 or dim_size < num_slices:
raise ValueError("Cannot create %d slices for size %d. shape: %s, "
"slicing: %s" %
(num_slices, full_shape[dim], full_shape, slicing))
if num_slices == 1:
# Not slicing in this dimension.
slice_shape[dim] = dim_size
elif slice_dim is not None:
# We only support slicing along one of the dimensions.
raise ValueError("Can only slice a variable along one dimension: "
"shape: %s, slicing: %s" % (full_shape, slicing))
else:
# Note: We will add any extras onto the last slice, later.
slice_dim = dim
slice_shape[dim] = dim_size // num_slices
# Degenerate case: If "slicing" was all ones, pretend we are slicing along
# the first dimension.
if slice_dim is None:
slice_dim = 0
return slice_dim, slice_shape
def variable(initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None):
if get_variable_scope().use_resource:
return resource_variable_ops.ResourceVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype)
else:
return variables.Variable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype)
|
mixturemodel-flow/tensorflow
|
tensorflow/python/ops/variable_scope.py
|
Python
|
apache-2.0
| 71,239
|
hiddenimports = ['sip', 'PyQt4.QtGui', 'PyQt4._qt']
from PyInstaller.hooks.hookutils import qt4_plugins_binaries
def hook(mod):
mod.binaries.extend(qt4_plugins_binaries('phonon_backend'))
return mod
|
supercheetah/diceroller
|
pyinstaller/PyInstaller/hooks/hook-PyQt4.phonon.py
|
Python
|
artistic-2.0
| 210
|
# Generated by Django 2.2.5 on 2019-09-09 13:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bhs', '0003_auto_20190906_2147'),
]
operations = [
migrations.RemoveField(
model_name='group',
name='chapters',
),
migrations.RemoveField(
model_name='group',
name='participants',
),
migrations.RemoveField(
model_name='group',
name='pos',
),
]
|
dbinetti/barberscore-django
|
project/apps/bhs/migrations/0004_auto_20190909_0637.py
|
Python
|
bsd-2-clause
| 532
|
#! /usr/bin/env python
import os
import magic
import argparse
from termcolor import colored
from subprocess import Popen, PIPE
parser = argparse.ArgumentParser(description='Toby', epilog='Yes, a queer mongrel, with a most amazing power of scent')
parser.add_argument('-i', dest='case', action='store_true', default=False, help='Ignore case distinctions')
parser.add_argument('-s', dest='string', metavar='string', required=True, type=str, help='String to search for')
parser.add_argument('-d', dest='directory', metavar='directory', required=True, type=str, help='Directory to search')
args = parser.parse_args()
for root, dirs, files in os.walk(args.directory):
if files:
for file in files:
filepath = root + '/' + file
if os.path.exists(filepath):
filetype = magic.from_file(filepath)
mime = magic.from_file(filepath, mime=True)
grep_args = ["grep","-a", "--color=always"]
strings_args = ["strings"]
if args.case:
grep_args.append('-i')
#we will ignore symlinks since most of them will be broken anyway
if 'inode/symlink' not in mime:
if 'application' in mime:
strings_args.append(filepath)
grep_args.append(args.string)
p1 = Popen(strings_args , stdout=PIPE)
p2 = Popen(grep_args, stdin=p1.stdout, stdout=PIPE)
p1.stdout.close()
else:
grep_args.extend(["-n", args.string, filepath])
p2 = Popen(grep_args, stdout=PIPE)
output = p2.communicate()[0]
if output:
print colored(filepath, 'magenta', attrs=['bold']), colored(mime, 'blue')
print output
|
zigitax/toby
|
toby.py
|
Python
|
bsd-2-clause
| 1,582
|
# -*- coding: utf-8 -*-
import pytest
from datetime import datetime
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, Timestamp,
CategoricalIndex, date_range, DatetimeIndex,
period_range, timedelta_range, NaT,
Interval, IntervalIndex)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
class TestCategoricalConstructors(object):
def test_validate_ordered(self):
# see gh-14058
exp_msg = "'ordered' must either be 'True' or 'False'"
exp_err = TypeError
# This should be a boolean.
ordered = np.array([0, 1, 2])
with tm.assert_raises_regex(exp_err, exp_msg):
Categorical([1, 2, 3], ordered=ordered)
with tm.assert_raises_regex(exp_err, exp_msg):
Categorical.from_codes([0, 0, 1], categories=['a', 'b', 'c'],
ordered=ordered)
def test_constructor_empty(self):
# GH 17248
c = Categorical([])
expected = Index([])
tm.assert_index_equal(c.categories, expected)
c = Categorical([], categories=[1, 2, 3])
expected = pd.Int64Index([1, 2, 3])
tm.assert_index_equal(c.categories, expected)
def test_constructor_tuples(self):
values = np.array([(1,), (1, 2), (1,), (1, 2)], dtype=object)
result = Categorical(values)
expected = Index([(1,), (1, 2)], tupleize_cols=False)
tm.assert_index_equal(result.categories, expected)
assert result.ordered is False
def test_constructor_tuples_datetimes(self):
# numpy will auto reshape when all of the tuples are the
# same len, so add an extra one with 2 items and slice it off
values = np.array([(Timestamp('2010-01-01'),),
(Timestamp('2010-01-02'),),
(Timestamp('2010-01-01'),),
(Timestamp('2010-01-02'),),
('a', 'b')], dtype=object)[:-1]
result = Categorical(values)
expected = Index([(Timestamp('2010-01-01'),),
(Timestamp('2010-01-02'),)], tupleize_cols=False)
tm.assert_index_equal(result.categories, expected)
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical(arr, ordered=False)
assert not factor.ordered
# this however will raise as cannot be sorted
pytest.raises(
TypeError, lambda: Categorical(arr, ordered=True))
def test_constructor_interval(self):
result = Categorical([Interval(1, 2), Interval(2, 3), Interval(3, 6)],
ordered=True)
ii = IntervalIndex([Interval(1, 2), Interval(2, 3), Interval(3, 6)])
exp = Categorical(ii, ordered=True)
tm.assert_categorical_equal(result, exp)
tm.assert_index_equal(result.categories, ii)
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"], dtype=np.object_)
c1 = Categorical(exp_arr)
tm.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
pytest.raises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
pytest.raises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
assert not c1.ordered
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c1.__array__(), c2.__array__())
tm.assert_index_equal(c2.categories, Index(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(["a", "b", "c", "a"]),
categories=["a", "b", "c", "d"])
tm.assert_categorical_equal(c1, c2)
# This should result in integer categories, not float!
cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
assert is_integer_dtype(cat.categories)
# https://github.com/pandas-dev/pandas/issues/3678
cat = Categorical([np.nan, 1, 2, 3])
assert is_integer_dtype(cat.categories)
# this should result in floats
cat = Categorical([np.nan, 1, 2., 3])
assert is_float_dtype(cat.categories)
cat = Categorical([np.nan, 1., 2., 3.])
assert is_float_dtype(cat.categories)
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notna()])
# assert is_integer_dtype(vals)
# corner cases
cat = Categorical([1])
assert len(cat.categories) == 1
assert cat.categories[0] == 1
assert len(cat.codes) == 1
assert cat.codes[0] == 0
cat = Categorical(["a"])
assert len(cat.categories) == 1
assert cat.categories[0] == "a"
assert len(cat.codes) == 1
assert cat.codes[0] == 0
# Scalars should be converted to lists
cat = Categorical(1)
assert len(cat.categories) == 1
assert cat.categories[0] == 1
assert len(cat.codes) == 1
assert cat.codes[0] == 0
# two arrays
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(None):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(None):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_not_sequence(self):
# https://github.com/pandas-dev/pandas/issues/16022
with pytest.raises(TypeError):
Categorical(['a', 'b'], categories='a')
def test_constructor_with_null(self):
# Cannot have NaN in categories
with pytest.raises(ValueError):
Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
with pytest.raises(ValueError):
Categorical([None, "a", "b", "c"],
categories=[None, "a", "b", "c"])
with pytest.raises(ValueError):
Categorical(DatetimeIndex(['nat', '20160101']),
categories=[NaT, Timestamp('20160101')])
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
tm.assert_categorical_equal(ci.values, Categorical(ci))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
tm.assert_categorical_equal(ci.values,
Categorical(ci.astype(object),
categories=ci.categories))
def test_constructor_with_generator(self):
# This was raising an Error in isna(single_val).any() because isna
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = Categorical(xrange(3))
tm.assert_categorical_equal(cat, exp)
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = Categorical([0, 1, 2], categories=xrange(3))
tm.assert_categorical_equal(cat, exp)
def test_constructor_with_datetimelike(self):
# 12077
# constructor wwth a datetimelike and NaT
for dtl in [date_range('1995-01-01 00:00:00', periods=5, freq='s'),
date_range('1995-01-01 00:00:00', periods=5,
freq='s', tz='US/Eastern'),
timedelta_range('1 day', periods=5, freq='s')]:
s = Series(dtl)
c = Categorical(s)
expected = type(dtl)(s)
expected.freq = None
tm.assert_index_equal(c.categories, expected)
tm.assert_numpy_array_equal(c.codes, np.arange(5, dtype='int8'))
# with NaT
s2 = s.copy()
s2.iloc[-1] = NaT
c = Categorical(s2)
expected = type(dtl)(s2.dropna())
expected.freq = None
tm.assert_index_equal(c.categories, expected)
exp = np.array([0, 1, 2, 3, -1], dtype=np.int8)
tm.assert_numpy_array_equal(c.codes, exp)
result = repr(c)
assert 'NaT' in result
def test_constructor_from_index_series_datetimetz(self):
idx = date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_timedelta(self):
idx = timedelta_range('1 days', freq='D', periods=3)
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_period(self):
idx = period_range('2015-01-01', freq='D', periods=3)
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_invariant(self):
# GH 14190
vals = [
np.array([1., 1.2, 1.8, np.nan]),
np.array([1, 2, 3], dtype='int64'),
['a', 'b', 'c', np.nan],
[pd.Period('2014-01'), pd.Period('2014-02'), NaT],
[Timestamp('2014-01-01'), Timestamp('2014-01-02'), NaT],
[Timestamp('2014-01-01', tz='US/Eastern'),
Timestamp('2014-01-02', tz='US/Eastern'), NaT],
]
for val in vals:
c = Categorical(val)
c2 = Categorical(c)
tm.assert_categorical_equal(c, c2)
@pytest.mark.parametrize('ordered', [True, False])
def test_constructor_with_dtype(self, ordered):
categories = ['b', 'a', 'c']
dtype = CategoricalDtype(categories, ordered=ordered)
result = Categorical(['a', 'b', 'a', 'c'], dtype=dtype)
expected = Categorical(['a', 'b', 'a', 'c'], categories=categories,
ordered=ordered)
tm.assert_categorical_equal(result, expected)
assert result.ordered is ordered
def test_constructor_dtype_and_others_raises(self):
dtype = CategoricalDtype(['a', 'b'], ordered=True)
with tm.assert_raises_regex(ValueError, "Cannot"):
Categorical(['a', 'b'], categories=['a', 'b'], dtype=dtype)
with tm.assert_raises_regex(ValueError, "Cannot"):
Categorical(['a', 'b'], ordered=True, dtype=dtype)
with tm.assert_raises_regex(ValueError, "Cannot"):
Categorical(['a', 'b'], ordered=False, dtype=dtype)
@pytest.mark.parametrize('categories', [
None, ['a', 'b'], ['a', 'c'],
])
@pytest.mark.parametrize('ordered', [True, False])
def test_constructor_str_category(self, categories, ordered):
result = Categorical(['a', 'b'], categories=categories,
ordered=ordered, dtype='category')
expected = Categorical(['a', 'b'], categories=categories,
ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_constructor_str_unknown(self):
with tm.assert_raises_regex(ValueError, "Unknown `dtype`"):
Categorical([1, 2], dtype="foo")
def test_constructor_from_categorical_with_dtype(self):
dtype = CategoricalDtype(['a', 'b', 'c'], ordered=True)
values = Categorical(['a', 'b', 'd'])
result = Categorical(values, dtype=dtype)
# We use dtype.categories, not values.categories
expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'c'],
ordered=True)
tm.assert_categorical_equal(result, expected)
def test_constructor_from_categorical_with_unknown_dtype(self):
dtype = CategoricalDtype(None, ordered=True)
values = Categorical(['a', 'b', 'd'])
result = Categorical(values, dtype=dtype)
# We use values.categories, not dtype.categories
expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'd'],
ordered=True)
tm.assert_categorical_equal(result, expected)
def test_constructor_from_categorical_string(self):
values = Categorical(['a', 'b', 'd'])
# use categories, ordered
result = Categorical(values, categories=['a', 'b', 'c'], ordered=True,
dtype='category')
expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'c'],
ordered=True)
tm.assert_categorical_equal(result, expected)
# No string
result = Categorical(values, categories=['a', 'b', 'c'], ordered=True)
tm.assert_categorical_equal(result, expected)
def test_constructor_with_categorical_categories(self):
# GH17884
expected = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
result = Categorical(
['a', 'b'], categories=Categorical(['a', 'b', 'c']))
tm.assert_categorical_equal(result, expected)
result = Categorical(
['a', 'b'], categories=CategoricalIndex(['a', 'b', 'c']))
tm.assert_categorical_equal(result, expected)
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
pytest.raises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
pytest.raises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
pytest.raises(ValueError, f)
# NaN categories included
def f():
Categorical.from_codes([0, 1, 2], ["a", "b", np.nan])
pytest.raises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
pytest.raises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
tm.assert_categorical_equal(exp, res)
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
Categorical.from_codes(codes, categories=["train", "test"])
def test_from_codes_with_categorical_categories(self):
# GH17884
expected = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
result = Categorical.from_codes(
[0, 1], categories=Categorical(['a', 'b', 'c']))
tm.assert_categorical_equal(result, expected)
result = Categorical.from_codes(
[0, 1], categories=CategoricalIndex(['a', 'b', 'c']))
tm.assert_categorical_equal(result, expected)
# non-unique Categorical still raises
with pytest.raises(ValueError):
Categorical.from_codes([0, 1], Categorical(['a', 'b', 'a']))
@pytest.mark.parametrize('dtype', [None, 'category'])
def test_from_inferred_categories(self, dtype):
cats = ['a', 'b']
codes = np.array([0, 0, 1, 1], dtype='i8')
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical.from_codes(codes, cats)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, 'category'])
def test_from_inferred_categories_sorts(self, dtype):
cats = ['b', 'a']
codes = np.array([0, 1, 1, 1], dtype='i8')
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical.from_codes([1, 0, 0, 0], ['a', 'b'])
tm.assert_categorical_equal(result, expected)
def test_from_inferred_categories_dtype(self):
cats = ['a', 'b', 'd']
codes = np.array([0, 1, 0, 2], dtype='i8')
dtype = CategoricalDtype(['c', 'b', 'a'], ordered=True)
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical(['a', 'b', 'a', 'd'],
categories=['c', 'b', 'a'],
ordered=True)
tm.assert_categorical_equal(result, expected)
def test_from_inferred_categories_coerces(self):
cats = ['1', '2', 'bad']
codes = np.array([0, 0, 1, 2], dtype='i8')
dtype = CategoricalDtype([1, 2])
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical([1, 1, 2, np.nan])
tm.assert_categorical_equal(result, expected)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
assert not cat.ordered
cat = Categorical([0, 1, 2], ordered=False)
assert not cat.ordered
cat = Categorical([0, 1, 2], ordered=True)
assert cat.ordered
@pytest.mark.xfail(reason="Imaginary values not supported in Categorical")
def test_constructor_imaginary(self):
values = [1, 2, 3 + 1j]
c1 = Categorical(values)
tm.assert_index_equal(c1.categories, Index(values))
tm.assert_numpy_array_equal(np.array(c1), np.array(values))
|
louispotok/pandas
|
pandas/tests/categorical/test_constructors.py
|
Python
|
bsd-3-clause
| 20,017
|
from __future__ import unicode_literals, print_function
import os
import io
import errno
import sys
import time
import subprocess
from itertools import tee
from six import BytesIO
from six.moves import zip
from pkg_resources import working_set, resource_filename
import pytest
from wex.readable import EXT_WEXIN
from wex.output import EXT_WEXOUT, TeeStdOut
from wex.url import URL
from wex import command
url = URL('http://httpbin.org/get?this=that')
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def setup_module():
entry = resource_filename(__name__, 'fixtures/TestMe.egg')
working_set.add_entry(entry)
def find_file_paths(top):
paths = []
for dirpath, dirs, filenames in os.walk(top):
paths.extend(os.path.join(dirpath, filename) for filename in filenames)
return set(paths)
def start_wex_subprocess(args=['--help']):
env = dict(os.environ)
egg = resource_filename(__name__, 'fixtures/TestMe.egg')
env['PYTHONPATH'] = egg
# This test will fail unless you run setup.py develop or setup.py install
exe = os.path.join(os.path.dirname(sys.executable), 'wex')
cmd = [exe] + args
return subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env)
def test_wex_console_script():
# this url is cunningly crafted to generate UTF-8 output
url = 'http://httpbin.org/get?this=that%C2%AE'
wex = start_wex_subprocess([url])
output = wex.stdout.read()
assert wex.wait() == 0
assert output == b'"this"\t"that\xc2\xae"\n'
def test_wex_multiprocessing():
url = 'http://httpbin.org/get?this=that%C2%AE'
wex = start_wex_subprocess(['-P', url])
ret = None
for i in range(300):
ret = wex.poll()
if ret is None:
time.sleep(0.1)
assert ret is not None
def run_main(monkeypatch, args):
argv = sys.argv[:1] + list(args)
monkeypatch.setattr(sys, 'argv', argv)
stdout = io.StringIO()
monkeypatch.setattr('wex.output.StdOut.stdout', stdout)
command.main()
return stdout.getvalue()
def test_main_url(monkeypatch):
assert run_main(monkeypatch, [url]) == '"this"\t"that"\n'
def test_main_tarfile(monkeypatch):
example_tar = resource_filename(__name__, 'fixtures/example.tar')
assert run_main(monkeypatch, [example_tar]) == '"this"\t"that"\n'
def test_main_save(monkeypatch, tmpdir):
destdir = tmpdir.strpath
args = ['--save-dir', destdir, url]
assert run_main(monkeypatch, args) == '"this"\t"that"\n'
sentinel = object()
expected_dirs = [
'http',
'httpbin.org',
'get',
'this%3Dthat',
'178302e981e586827bd8ca962c1c27f8',
sentinel
]
dirpath = destdir
for dirname, subdir in pairwise(expected_dirs):
dirpath = os.path.join(dirpath, dirname)
if subdir is not sentinel:
assert os.listdir(dirpath) == [subdir]
assert sorted(os.listdir(dirpath)) == ['0.wexin', '0.wexout']
def test_main_no_such_file(monkeypatch):
argv = sys.argv[:1] + ['no-such-file']
monkeypatch.setattr(sys, 'argv', argv)
with pytest.raises(SystemExit) as excinfo:
command.main()
assert isinstance(excinfo.value.args[0], IOError)
assert excinfo.value.args[0].errno == errno.ENOENT
def test_main_output_return_list(monkeypatch, tmpdir):
empty = resource_filename(__name__, 'fixtures/empty.wexin_')
args = [empty]
monkeypatch.chdir(tmpdir)
with tmpdir.join('entry_points.txt').open('w') as fp:
fp.write("[wex]\nreturn_list = testme:return_list")
assert run_main(monkeypatch, args) == '[1,2]\n'
def test_main_output_return_tuple(monkeypatch, tmpdir):
empty = resource_filename(__name__, 'fixtures/empty.wexin_')
args = [empty]
monkeypatch.chdir(tmpdir)
with tmpdir.join('entry_points.txt').open('w') as fp:
fp.write("[wex]\nreturn_tuple = testme:return_tuple")
# The tuple is encoded as a JSON array
assert run_main(monkeypatch, args) == '[1,2]\n'
def test_main_output_return_dict(monkeypatch, tmpdir):
empty = resource_filename(__name__, 'fixtures/empty.wexin_')
args = [empty]
monkeypatch.chdir(tmpdir)
with tmpdir.join('entry_points.txt').open('w') as fp:
fp.write("[wex]\nreturn_dict = testme:return_dict")
# The tuple is encoded as a JSON array
assert run_main(monkeypatch, args) == '{"a":1}\n'
wexin = b"""HTTP/1.1 200 OK
Hello
"""
def test_write_extracted_values_tee_stdout(tmpdir):
readable = BytesIO(wexin)
readable.name = tmpdir.join('0' + EXT_WEXIN).strpath
def extract(src):
yield 1
writer = command.WriteExtractedValues(TeeStdOut, extract)
ret = writer(readable)
assert ret is None
with tmpdir.join('0' + EXT_WEXOUT).open() as fp:
assert fp.read() == '1\n'
def test_write_extracted_values_tee_stdout_readable_has_no_name():
readable = BytesIO(wexin)
def extract(src):
yield 1
writer = command.WriteExtractedValues(TeeStdOut, extract)
ret = writer(readable)
assert ret is None
|
gilessbrown/wextracto
|
tests/test_command.py
|
Python
|
bsd-3-clause
| 5,108
|
import sys
#from CPAC.interfaces.afni import preprocess
from nipype.interfaces.afni import preprocess
import os
import commands
import nipype.pipeline.engine as pe
import nipype.algorithms.rapidart as ra
import nipype.interfaces.afni as afni
import nipype.interfaces.fsl as fsl
import nipype.interfaces.io as nio
import nipype.interfaces.utility as util
from CPAC.sca.utils import *
def create_sca(name_sca='sca'):
"""
Map of the correlations of the Region of Interest(Seed in native or MNI space) with the rest of brain voxels.
The map is normalized to contain Z-scores, mapped in standard space and treated with spatial smoothing.
Parameters
----------
name_sca : a string
Name of the SCA workflow
Returns
-------
sca_workflow : workflow
Seed Based Correlation Analysis Workflow
Notes
-----
`Source <https://github.com/FCP-INDI/C-PAC/blob/master/CPAC/sca/sca.py>`_
Workflow Inputs::
inputspec.rest_res_filt : string (existing nifti file)
Band passed Image with Global Signal , white matter, csf and motion regression. Recommended bandpass filter (0.001,0.1) )
inputspec.timeseries_one_d : string (existing nifti file)
1D 3dTcorr1D compatible timeseries file. 1D file can be timeseries from a mask or from a parcellation containing ROIs
Workflow Outputs::
outputspec.correlation_file : string (nifti file)
Correlations of the functional file and the input time series
outputspec.Z_score : string (nifti file)
Fisher Z transformed correlations of the seed
SCA Workflow Procedure:
1. Compute pearson correlation between input timeseries 1D file and input functional file
Use 3dTcorr1D to compute that. Input timeseries can be a 1D file containing parcellation ROI's
or a 3D mask
2. Compute Fisher Z score of the correlation computed in step above. If a mask is provided then a
a single Z score file is returned, otherwise z-scores for all ROIs are returned as a list of
nifti files
Workflow:
.. image:: ../images/sca_graph.dot.png
:width: 500
Detailed Workflow:
.. image:: ../images/sca_detailed_graph.dot.png
:width: 500
Examples
--------
>>> sca_w = create_sca("sca_wf")
>>> sca_w.inputs.inputspec.functional_file = '/home/data/subject/func/rest_bandpassed.nii.gz'
>>> sca_w.inputs.inputspec.timeseries_one_d = '/home/data/subject/func/ts.1D'
>>> sca_w.run() # doctest: +SKIP
"""
from CPAC.utils.utils import get_roi_num_list
sca = pe.Workflow(name=name_sca)
inputNode = pe.Node(util.IdentityInterface(fields=['timeseries_one_d',
'functional_file',
]),
name='inputspec')
outputNode = pe.Node(util.IdentityInterface(fields=[
'correlation_stack',
'correlation_files',
'Z_score',
]),
name='outputspec')
# # 2. Compute voxel-wise correlation with Seed Timeseries
corr = pe.Node(interface=preprocess.TCorr1D(),
name='3dTCorr1D')
corr.inputs.pearson = True
corr.inputs.outputtype = 'NIFTI_GZ'
sca.connect(inputNode, 'timeseries_one_d',
corr, 'y_1d')
sca.connect(inputNode, 'functional_file',
corr, 'xset')
if "roi" in name_sca:
# Transform the sub-bricks into volumes
concat = pe.Node(interface=preprocess.TCat(),
name='3dTCat')
concat.inputs.outputtype = 'NIFTI_GZ'
# also write out volumes as individual files
split = pe.Node(interface=fsl.Split(), name='split_raw_volumes_sca')
split.inputs.dimension = 't'
split.inputs.out_base_name = 'sca_roi_'
get_roi_num_list = pe.Node(util.Function(input_names=['timeseries_file', 'prefix'], output_names=['roi_list'], function=get_roi_num_list), name='get_roi_num_list')
get_roi_num_list.inputs.prefix = "sca_roi"
rename_rois = pe.MapNode(interface=util.Rename(), name='output_rois',
iterfield=['in_file','format_string'])
rename_rois.inputs.keep_ext = True
sca.connect(corr, 'out_file', concat, 'in_files')
sca.connect(concat, 'out_file', split, 'in_file')
sca.connect(concat, 'out_file',
outputNode, 'correlation_stack')
sca.connect(inputNode, 'timeseries_one_d', get_roi_num_list,
'timeseries_file')
sca.connect(split, 'out_files', rename_rois, 'in_file')
sca.connect(get_roi_num_list, 'roi_list', rename_rois, 'format_string')
sca.connect(rename_rois, 'out_file', outputNode,
'correlation_files')
else:
sca.connect(corr, 'out_file', outputNode, 'correlation_files')
return sca
def create_temporal_reg(wflow_name='temporal_reg', which='SR'):
"""
Temporal multiple regression workflow
Provides a spatial map of parameter estimates corresponding to each
provided timeseries in a timeseries.txt file as regressors
Parameters
----------
wflow_name : a string
Name of the temporal regression workflow
which: a string
SR: Spatial Regression, RT: ROI Timeseries
NOTE: If you set (which = 'RT'), the output of this workflow will be
renamed based on the header information provided in the
timeseries.txt file.
If you run the temporal regression workflow manually, don\'t set
(which = 'RT') unless you provide a timeseries.txt file with a header
containing the names of the timeseries.
Returns
-------
wflow : workflow
temporal multiple regression Workflow
Notes
-----
`Source <https://github.com/FCP-INDI/C-PAC/blob/master/CPAC/sca/sca.py>`_
Workflow Inputs::
inputspec.subject_rest : string (existing nifti file)
Band passed Image with Global Signal , white matter, csf and motion regression. Recommended bandpass filter (0.001,0.1) )
inputspec.subject_timeseries : string (existing txt file)
text file containing the timeseries to be regressed on the subjects
functional file
timeseries are organized by columns, timepoints by rows
inputspec.subject_mask : string (existing nifti file)
path to subject functional mask
inputspec.demean : Boolean
control whether to demean model and data
inputspec.normalize : Boolean
control whether to normalize the input timeseries to unit standard deviation
Workflow Outputs::
outputspec.temp_reg_map : string (nifti file)
GLM parameter estimate image for each timeseries in the input file
outputspec.temp_reg_map_zstat : string (nifti file)
Normalized version of the GLM parameter estimates
Temporal Regression Workflow Procedure:
Enter all timeseries into a general linear model and regress these
timeseries to the subjects functional file to get spatial maps of voxels
showing activation patterns related to those in the timeseries.
Workflow:
.. image:: ../images/create_temporal_regression.png
:width: 500
Detailed Workflow:
.. image:: ../images/detailed_graph_create_temporal_regression.png
:width: 500
References
----------
`http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/DualRegression/UserGuide <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/DualRegression/UserGuide>`_
Examples
--------
>>> tr_wf = create_temporal_reg('temporal regression')
>>> tr_wf.inputs.inputspec.subject_rest = '/home/data/subject/func/rest_bandpassed.nii.gz'
>>> tr_wf.inputs.inputspec.subject_timeseries = '/home/data/subject/func/timeseries.txt'
>>> tr_wf.inputs.inputspec.subject_mask = '/home/data/spatialmaps/spatial_map.nii.gz'
>>> tr_wf.inputs.inputspec.demean = True
>>> tr_wf.inputs.inputspec.normalize = True
>>> tr_wf.run() # doctest: +SKIP
"""
wflow = pe.Workflow(name=wflow_name)
inputNode = pe.Node(util.IdentityInterface
(fields=['subject_rest',
'subject_timeseries',
'subject_mask',
'demean',
'normalize']),
name='inputspec')
outputNode = pe.Node(util.IdentityInterface
(fields=['temp_reg_map',
'temp_reg_map_files',
'temp_reg_map_z',
'temp_reg_map_z_files']),
name='outputspec')
check_timeseries = pe.Node(util.Function(input_names=['in_file'],
output_names=['out_file'],
function=check_ts),
name='check_timeseries')
wflow.connect(inputNode, 'subject_timeseries',
check_timeseries, 'in_file')
temporalReg = pe.Node(interface=fsl.GLM(),
name='temporal_regression')
temporalReg.inputs.out_file = 'temp_reg_map.nii.gz'
temporalReg.inputs.out_z_name = 'temp_reg_map_z.nii.gz'
wflow.connect(inputNode, 'subject_rest',
temporalReg, 'in_file')
wflow.connect(check_timeseries, 'out_file',
temporalReg, 'design')
wflow.connect(inputNode, 'demean',
temporalReg, 'demean')
wflow.connect(inputNode, 'normalize',
temporalReg, 'des_norm')
wflow.connect(inputNode, 'subject_mask',
temporalReg, 'mask')
wflow.connect(temporalReg, 'out_file',
outputNode, 'temp_reg_map')
wflow.connect(temporalReg, 'out_z',
outputNode, 'temp_reg_map_z')
split = pe.Node(interface=fsl.Split(),
name='split_raw_volumes')
split.inputs.dimension = 't'
split.inputs.out_base_name = 'temp_reg_map_'
wflow.connect(temporalReg, 'out_file',
split, 'in_file')
split_zstat = pe.Node(interface=fsl.Split(),
name='split_zstat_volumes')
split_zstat.inputs.dimension = 't'
split_zstat.inputs.out_base_name = 'temp_reg_map_z_'
wflow.connect(temporalReg, 'out_z',
split_zstat, 'in_file')
if which == 'SR':
wflow.connect(split, 'out_files',
outputNode, 'temp_reg_map_files')
wflow.connect(split_zstat, 'out_files',
outputNode, 'temp_reg_map_z_files')
elif which == 'RT':
# get roi order and send to output node for raw outputs
get_roi_order = pe.Node(util.Function(input_names=['maps',
'timeseries'],
output_names=['labels',
'maps'],
function=map_to_roi),
name='get_roi_order')
wflow.connect(split, 'out_files',
get_roi_order, 'maps')
wflow.connect(inputNode, 'subject_timeseries',
get_roi_order, 'timeseries')
rename_maps = pe.MapNode(interface=util.Rename(),
name='rename_maps',
iterfield=['in_file',
'format_string'])
rename_maps.inputs.keep_ext = True
wflow.connect(get_roi_order, 'labels',
rename_maps, 'format_string')
wflow.connect(get_roi_order, 'maps',
rename_maps, 'in_file')
wflow.connect(rename_maps, 'out_file',
outputNode, 'temp_reg_map_files')
# get roi order and send to output node for z-stat outputs
get_roi_order_zstat = pe.Node(util.Function(input_names=['maps',
'timeseries'],
output_names=['labels',
'maps'],
function=map_to_roi),
name='get_roi_order_zstat')
wflow.connect(split_zstat, 'out_files',
get_roi_order_zstat, 'maps')
wflow.connect(inputNode, 'subject_timeseries',
get_roi_order_zstat, 'timeseries')
rename_maps_zstat = pe.MapNode(interface=util.Rename(),
name='rename_maps_zstat',
iterfield=['in_file',
'format_string'])
rename_maps_zstat.inputs.keep_ext = True
wflow.connect(get_roi_order_zstat, 'labels',
rename_maps_zstat, 'format_string')
wflow.connect(get_roi_order_zstat, 'maps',
rename_maps_zstat, 'in_file')
wflow.connect(rename_maps_zstat, 'out_file',
outputNode, 'temp_reg_map_z_files')
return wflow
|
danlurie/C-PAC
|
CPAC/sca/sca.py
|
Python
|
bsd-3-clause
| 13,711
|
"""
Tests for discrete models
Notes
-----
DECIMAL_3 is used because it seems that there is a loss of precision
in the Stata *.dta -> *.csv output, NOT the estimator for the Poisson
tests.
"""
# pylint: disable-msg=E1101
import os
import numpy as np
from numpy.testing import (assert_, assert_raises, assert_almost_equal,
assert_equal, assert_array_equal)
from statsmodels.discrete.discrete_model import (Logit, Probit, MNLogit,
Poisson, NegativeBinomial)
from statsmodels.discrete.discrete_margins import _iscount, _isdummy
import statsmodels.api as sm
from nose import SkipTest
from results.results_discrete import Spector, DiscreteL1
from statsmodels.tools.sm_exceptions import PerfectSeparationError
try:
import cvxopt
has_cvxopt = True
except ImportError:
has_cvxopt = False
try:
from scipy.optimize import basinhopping
has_basinhopping = True
except ImportError:
has_basinhopping = False
DECIMAL_14 = 14
DECIMAL_10 = 10
DECIMAL_9 = 9
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_0 = 0
class CheckModelResults(object):
"""
res2 should be the test results from RModelWrap
or the results as defined in model_results_data
"""
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int, DECIMAL_4)
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_4)
def pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
# def test_cov_params(self):
# assert_almost_equal(self.res1.cov_params(), self.res2.cov_params,
# DECIMAL_4)
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4)
def test_llnull(self):
assert_almost_equal(self.res1.llnull, self.res2.llnull, DECIMAL_4)
def test_llr(self):
assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_3)
def test_llr_pvalue(self):
assert_almost_equal(self.res1.llr_pvalue, self.res2.llr_pvalue,
DECIMAL_4)
def test_normalized_cov_params(self):
pass
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_dof(self):
assert_equal(self.res1.df_model, self.res2.df_model)
assert_equal(self.res1.df_resid, self.res2.df_resid)
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.model.predict(self.res1.params),
self.res2.phat, DECIMAL_4)
def test_predict_xb(self):
assert_almost_equal(self.res1.model.predict(self.res1.params,
linear=True),
self.res2.yhat, DECIMAL_4)
def test_loglikeobs(self):
#basic cross check
llobssum = self.res1.model.loglikeobs(self.res1.params).sum()
assert_almost_equal(llobssum, self.res1.llf, DECIMAL_14)
def test_jac(self):
#basic cross check
jacsum = self.res1.model.jac(self.res1.params).sum(0)
score = self.res1.model.score(self.res1.params)
assert_almost_equal(jacsum, score, DECIMAL_9) #Poisson has low precision ?
class CheckBinaryResults(CheckModelResults):
def test_pred_table(self):
assert_array_equal(self.res1.pred_table(), self.res2.pred_table)
def test_resid_dev(self):
assert_almost_equal(self.res1.resid_dev, self.res2.resid_dev,
DECIMAL_4)
def test_resid_generalized(self):
assert_almost_equal(self.res1.resid_generalized,
self.res2.resid_generalized, DECIMAL_4)
def smoke_test_resid_response(self):
self.res1.resid_response
class CheckMargEff(object):
"""
Test marginal effects (margeff) and its options
"""
def test_nodummy_dydxoverall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydx_se, DECIMAL_4)
def test_nodummy_dydxmean(self):
me = self.res1.get_margeff(at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxmean_se, DECIMAL_4)
def test_nodummy_dydxmedian(self):
me = self.res1.get_margeff(at='median')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxmedian_se, DECIMAL_4)
def test_nodummy_dydxzero(self):
me = self.res1.get_margeff(at='zero')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxzero, DECIMAL_4)
def test_nodummy_dyexoverall(self):
me = self.res1.get_margeff(method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyex, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyex_se, DECIMAL_4)
def test_nodummy_dyexmean(self):
me = self.res1.get_margeff(at='mean', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexmean_se, DECIMAL_4)
def test_nodummy_dyexmedian(self):
me = self.res1.get_margeff(at='median', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexmedian_se, DECIMAL_4)
def test_nodummy_dyexzero(self):
me = self.res1.get_margeff(at='zero', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexzero_se, DECIMAL_4)
def test_nodummy_eydxoverall(self):
me = self.res1.get_margeff(method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydx_se, DECIMAL_4)
def test_nodummy_eydxmean(self):
me = self.res1.get_margeff(at='mean', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxmean_se, DECIMAL_4)
def test_nodummy_eydxmedian(self):
me = self.res1.get_margeff(at='median', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxmedian_se, DECIMAL_4)
def test_nodummy_eydxzero(self):
me = self.res1.get_margeff(at='zero', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxzero_se, DECIMAL_4)
def test_nodummy_eyexoverall(self):
me = self.res1.get_margeff(method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyex, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyex_se, DECIMAL_4)
def test_nodummy_eyexmean(self):
me = self.res1.get_margeff(at='mean', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexmean_se, DECIMAL_4)
def test_nodummy_eyexmedian(self):
me = self.res1.get_margeff(at='median', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexmedian_se, DECIMAL_4)
def test_nodummy_eyexzero(self):
me = self.res1.get_margeff(at='zero', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexzero_se, DECIMAL_4)
def test_dummy_dydxoverall(self):
me = self.res1.get_margeff(dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_dydx_se, DECIMAL_4)
def test_dummy_dydxmean(self):
me = self.res1.get_margeff(at='mean', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_dydxmean_se, DECIMAL_4)
def test_dummy_eydxoverall(self):
me = self.res1.get_margeff(method='eydx', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_eydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_eydx_se, DECIMAL_4)
def test_dummy_eydxmean(self):
me = self.res1.get_margeff(at='mean', method='eydx', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_eydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_eydxmean_se, DECIMAL_4)
def test_count_dydxoverall(self):
me = self.res1.get_margeff(count=True)
assert_almost_equal(me.margeff,
self.res2.margeff_count_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dydx_se, DECIMAL_4)
def test_count_dydxmean(self):
me = self.res1.get_margeff(count=True, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_count_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dydxmean_se, DECIMAL_4)
def test_count_dummy_dydxoverall(self):
me = self.res1.get_margeff(count=True, dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_count_dummy_dydxoverall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dummy_dydxoverall_se, DECIMAL_4)
def test_count_dummy_dydxmean(self):
me = self.res1.get_margeff(count=True, dummy=True, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_count_dummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dummy_dydxmean_se, DECIMAL_4)
class TestProbitNewton(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Probit(data.endog, data.exog).fit(method="newton", disp=0)
res2 = Spector()
res2.probit()
cls.res2 = res2
#def test_predict(self):
# assert_almost_equal(self.res1.model.predict(self.res1.params),
# self.res2.predict, DECIMAL_4)
class TestProbitBFGS(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Probit(data.endog, data.exog).fit(method="bfgs",
disp=0)
res2 = Spector()
res2.probit()
cls.res2 = res2
class TestProbitNM(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="nm",
disp=0, maxiter=500)
class TestProbitPowell(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="powell",
disp=0, ftol=1e-8)
class TestProbitCG(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="cg",
disp=0, maxiter=500, gtol=1e-08)
class TestProbitNCG(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="ncg",
disp=0, avextol=1e-8)
class TestProbitBasinhopping(CheckBinaryResults):
@classmethod
def setupClass(cls):
if not has_basinhopping:
raise SkipTest("Skipped TestProbitBasinhopping since"
" basinhopping solver is not available")
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
fit = Probit(data.endog, data.exog).fit
cls.res1 = fit(method="basinhopping", disp=0, niter=5,
minimizer={'method' : 'L-BFGS-B', 'tol' : 1e-8})
class CheckLikelihoodModelL1(object):
"""
For testing results generated with L1 regularization
"""
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(
self.res1.conf_int(), self.res2.conf_int, DECIMAL_4)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_nnz_params(self):
assert_almost_equal(
self.res1.nnz_params, self.res2.nnz_params, DECIMAL_4)
def test_aic(self):
assert_almost_equal(
self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(
self.res1.bic, self.res2.bic, DECIMAL_3)
class TestProbitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0.1, 0.2, 0.3, 10]) #/ data.exog.shape[0]
cls.res1 = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, trim_mode='auto',
auto_trim_tol=0.02, acc=1e-10, maxiter=1000)
res2 = DiscreteL1()
res2.probit()
cls.res2 = res2
def test_cov_params(self):
assert_almost_equal(
self.res1.cov_params(), self.res2.cov_params, DECIMAL_4)
class TestMNLogitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
alpha = 10. * np.ones((mlogit_mod.J - 1, mlogit_mod.K)) #/ anes_exog.shape[0]
alpha[-1,:] = 0
cls.res1 = mlogit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='auto', auto_trim_tol=0.02,
acc=1e-10, disp=0)
res2 = DiscreteL1()
res2.mnlogit()
cls.res2 = res2
class TestLogitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.alpha = 3 * np.array([0., 1., 1., 1.]) #/ data.exog.shape[0]
cls.res1 = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=cls.alpha, disp=0, trim_mode='size',
size_trim_tol=1e-5, acc=1e-10, maxiter=1000)
res2 = DiscreteL1()
res2.logit()
cls.res2 = res2
def test_cov_params(self):
assert_almost_equal(
self.res1.cov_params(), self.res2.cov_params, DECIMAL_4)
class TestCVXOPT(object):
@classmethod
def setupClass(self):
self.data = sm.datasets.spector.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=True)
def test_cvxopt_versus_slsqp(self):
#Compares resutls from cvxopt to the standard slsqp
if has_cvxopt:
self.alpha = 3. * np.array([0, 1, 1, 1.]) #/ self.data.endog.shape[0]
res_slsqp = Logit(self.data.endog, self.data.exog).fit_regularized(
method="l1", alpha=self.alpha, disp=0, acc=1e-10, maxiter=1000,
trim_mode='auto')
res_cvxopt = Logit(self.data.endog, self.data.exog).fit_regularized(
method="l1_cvxopt_cp", alpha=self.alpha, disp=0, abstol=1e-10,
trim_mode='auto', auto_trim_tol=0.01, maxiter=1000)
assert_almost_equal(res_slsqp.params, res_cvxopt.params, DECIMAL_4)
else:
raise SkipTest("Skipped test_cvxopt since cvxopt is not available")
class TestSweepAlphaL1(object):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.model = Logit(data.endog, data.exog)
cls.alphas = np.array(
[[0.1, 0.1, 0.1, 0.1],
[0.4, 0.4, 0.5, 0.5],
[0.5, 0.5, 1, 1]]) #/ data.exog.shape[0]
cls.res1 = DiscreteL1()
cls.res1.sweep()
def test_sweep_alpha(self):
for i in range(3):
alpha = self.alphas[i, :]
res2 = self.model.fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-10,
trim_mode='off', maxiter=1000)
assert_almost_equal(res2.params, self.res1.params[i], DECIMAL_4)
class CheckL1Compatability(object):
"""
Tests compatability between l1 and unregularized by setting alpha such
that certain parameters should be effectively unregularized, and others
should be ignored by the model.
"""
def test_params(self):
m = self.m
assert_almost_equal(
self.res_unreg.params, self.res_reg.params[:m], DECIMAL_4)
# The last entry should be close to zero
assert_almost_equal(0, self.res_reg.params[m:], DECIMAL_4)
def test_cov_params(self):
m = self.m
# The restricted cov_params should be equal
assert_almost_equal(
self.res_unreg.cov_params(), self.res_reg.cov_params()[:m, :m],
DECIMAL_1)
def test_df(self):
assert_equal(self.res_unreg.df_model, self.res_reg.df_model)
assert_equal(self.res_unreg.df_resid, self.res_reg.df_resid)
def test_t_test(self):
m = self.m
kvars = self.kvars
t_unreg = self.res_unreg.t_test(np.eye(m))
t_reg = self.res_reg.t_test(np.eye(kvars))
assert_almost_equal(t_unreg.effect, t_reg.effect[:m], DECIMAL_3)
assert_almost_equal(t_unreg.sd, t_reg.sd[:m], DECIMAL_3)
assert_almost_equal(np.nan, t_reg.sd[m])
assert_almost_equal(t_unreg.tvalue, t_reg.tvalue[:m], DECIMAL_3)
assert_almost_equal(np.nan, t_reg.tvalue[m])
def test_f_test(self):
m = self.m
kvars = self.kvars
f_unreg = self.res_unreg.f_test(np.eye(m))
f_reg = self.res_reg.f_test(np.eye(kvars)[:m])
assert_almost_equal(f_unreg.fvalue, f_reg.fvalue, DECIMAL_2)
assert_almost_equal(f_unreg.pvalue, f_reg.pvalue, DECIMAL_3)
def test_bad_r_matrix(self):
kvars = self.kvars
assert_raises(ValueError, self.res_reg.f_test, np.eye(kvars) )
class TestPoissonL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog = sm.add_constant(rand_exog, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
cls.res_unreg = sm.Poisson(
rand_data.endog, exog_no_PSI).fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars)
alpha[:cls.m] = 0
cls.res_reg = sm.Poisson(rand_data.endog, rand_exog).fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
class TestLogitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = np.array([0, 0, 0, 10])
cls.res_reg = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = Logit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15)
class TestMNLogitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0, 0, 0, 10])
cls.res_reg = MNLogit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = MNLogit(data.endog, exog_no_PSI).fit(
disp=0, tol=1e-15)
def test_t_test(self):
m = self.m
kvars = self.kvars
t_unreg = self.res_unreg.t_test(np.eye(m))
t_reg = self.res_reg.t_test(np.eye(kvars))
assert_almost_equal(t_unreg.effect, t_reg.effect[:m], DECIMAL_3)
assert_almost_equal(t_unreg.sd, t_reg.sd[:m], DECIMAL_3)
assert_almost_equal(np.nan, t_reg.sd[m])
assert_almost_equal(t_unreg.tvalue, t_reg.tvalue[:m, :m], DECIMAL_3)
def test_f_test(self):
raise SkipTest("Skipped test_f_test for MNLogit")
class TestProbitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0, 0, 0, 10])
cls.res_reg = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = Probit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15)
class CompareL1(object):
"""
For checking results for l1 regularization.
Assumes self.res1 and self.res2 are two legitimate models to be compared.
"""
def test_basic_results(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
assert_almost_equal(self.res1.cov_params(), self.res2.cov_params(), DECIMAL_4)
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int(), DECIMAL_4)
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
assert_almost_equal(self.res1.pred_table(), self.res2.pred_table(), DECIMAL_4)
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4)
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_4)
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_4)
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
class CompareL11D(CompareL1):
"""
Check t and f tests. This only works for 1-d results
"""
def test_tests(self):
restrictmat = np.eye(len(self.res1.params.ravel()))
assert_almost_equal(self.res1.t_test(restrictmat).pvalue,
self.res2.t_test(restrictmat).pvalue, DECIMAL_4)
assert_almost_equal(self.res1.f_test(restrictmat).pvalue,
self.res2.f_test(restrictmat).pvalue, DECIMAL_4)
class TestL1AlphaZeroLogit(CompareL11D):
"""
Compares l1 model with alpha = 0 to the unregularized model.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.res1 = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = Logit(data.endog, data.exog).fit(disp=0, tol=1e-15)
class TestL1AlphaZeroProbit(CompareL11D):
"""
Compares l1 model with alpha = 0 to the unregularized model.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.res1 = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = Probit(data.endog, data.exog).fit(disp=0, tol=1e-15)
class TestL1AlphaZeroMNLogit(CompareL1):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = MNLogit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = MNLogit(data.endog, data.exog).fit(disp=0, tol=1e-15)
class TestLogitNewton(CheckBinaryResults, CheckMargEff):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Logit(data.endog, data.exog).fit(method="newton", disp=0)
res2 = Spector()
res2.logit()
cls.res2 = res2
def test_resid_pearson(self):
assert_almost_equal(self.res1.resid_pearson,
self.res2.resid_pearson, 5)
def test_nodummy_exog1(self):
me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.})
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_atexog1, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_atexog1_se, DECIMAL_4)
def test_nodummy_exog2(self):
me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_atexog2, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_atexog2_se, DECIMAL_4)
def test_dummy_exog1(self):
me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.}, dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_atexog1, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_atexog1_se, DECIMAL_4)
def test_dummy_exog2(self):
me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean',
dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_atexog2, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_atexog2_se, DECIMAL_4)
class TestLogitBFGS(CheckBinaryResults, CheckMargEff):
@classmethod
def setupClass(cls):
#import scipy
#major, minor, micro = scipy.__version__.split('.')[:3]
#if int(minor) < 9:
# raise SkipTest
#Skip this unconditionally for release 0.3.0
#since there are still problems with scipy 0.9.0 on some machines
#Ralf on mailing list 2011-03-26
raise SkipTest
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.logit()
cls.res2 = res2
cls.res1 = Logit(data.endog, data.exog).fit(method="bfgs",
disp=0)
class TestPoissonNewton(CheckModelResults):
@classmethod
def setupClass(cls):
from results.results_discrete import RandHIE
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Poisson(data.endog, exog).fit(method='newton', disp=0)
res2 = RandHIE()
res2.poisson()
cls.res2 = res2
def test_margeff_overall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_overall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_overall_se, DECIMAL_4)
def test_margeff_dummy_overall(self):
me = self.res1.get_margeff(dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_overall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_overall_se, DECIMAL_4)
def test_resid(self):
assert_almost_equal(self.res1.resid, self.res2.resid, 2)
class TestNegativeBinomialNB2Newton(CheckModelResults):
@classmethod
def setupClass(cls):
from results.results_discrete import RandHIE
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit(method='newton', disp=0)
res2 = RandHIE()
res2.negativebinomial_nb2_bfgs()
cls.res2 = res2
def test_jac(self):
pass
#NOTE: The bse is much closer precitions to stata
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_alpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha,
DECIMAL_4)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues,
DECIMAL_2)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10],
np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def no_info(self):
pass
test_jac = no_info
class TestNegativeBinomialNB1Newton(CheckModelResults):
@classmethod
def setupClass(cls):
from results.results_discrete import RandHIE
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb1').fit(
method="newton",
maxiter=100,
disp=0)
res2 = RandHIE()
res2.negativebinomial_nb1_bfgs()
cls.res2 = res2
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def test_lnalpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
# the bse for alpha is not high precision from the hessian
# approximation
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_2)
def test_jac(self):
pass
def test_predict(self):
pass
def test_predict_xb(self):
pass
class TestNegativeBinomialNB2BFGS(CheckModelResults):
@classmethod
def setupClass(cls):
from results.results_discrete import RandHIE
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit(
method='bfgs', disp=0)
res2 = RandHIE()
res2.negativebinomial_nb2_bfgs()
cls.res2 = res2
def test_jac(self):
pass
#NOTE: The bse is much closer precitions to stata
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_alpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha,
DECIMAL_4)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues,
DECIMAL_2)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10],
np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def no_info(self):
pass
test_jac = no_info
class TestNegativeBinomialNB1BFGS(CheckModelResults):
@classmethod
def setupClass(cls):
from results.results_discrete import RandHIE
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb1').fit(method="bfgs",
maxiter=100,
disp=0)
res2 = RandHIE()
res2.negativebinomial_nb1_bfgs()
cls.res2 = res2
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def test_lnalpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
# the bse for alpha is not high precision from the hessian
# approximation
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_2)
def test_jac(self):
pass
def test_predict(self):
pass
def test_predict_xb(self):
pass
class TestNegativeBinomialGeometricBFGS(CheckModelResults):
"""
Cannot find another implementation of the geometric to cross-check results
we only test fitted values because geometric has fewer parameters than nb1 and nb2
and we want to make sure that predict() np.dot(exog, params) works
"""
@classmethod
def setupClass(cls):
from results.results_discrete import RandHIE
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'geometric').fit(method='bfgs', disp=0)
res2 = RandHIE()
res2.negativebinomial_geometric_bfgs()
cls.res2 = res2
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_1)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_1)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int, DECIMAL_3)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10], self.res2.fittedvalues[:10], DECIMAL_3)
def test_jac(self):
pass
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10], np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10], self.res2.fittedvalues[:10], DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def no_info(self):
pass
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_1)
def test_llr(self):
assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_2)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
test_jac = no_info
class TestMNLogitNewtonBaseZero(CheckModelResults):
@classmethod
def setupClass(cls):
from results.results_discrete import Anes
data = sm.datasets.anes96.load()
cls.data = data
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
cls.res1 = MNLogit(data.endog, exog).fit(method="newton", disp=0)
res2 = Anes()
res2.mnlogit_basezero()
cls.res2 = res2
def test_margeff_overall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff, self.res2.margeff_dydx_overall, 6)
assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_overall_se, 6)
def test_margeff_mean(self):
me = self.res1.get_margeff(at='mean')
assert_almost_equal(me.margeff, self.res2.margeff_dydx_mean, 7)
assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_mean_se, 7)
def test_margeff_dummy(self):
data = self.data
vote = data.data['vote']
exog = np.column_stack((data.exog, vote))
exog = sm.add_constant(exog, prepend=False)
res = MNLogit(data.endog, exog).fit(method="newton", disp=0)
me = res.get_margeff(dummy=True)
assert_almost_equal(me.margeff, self.res2.margeff_dydx_dummy_overall,
6)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dydx_dummy_overall_se, 6)
me = res.get_margeff(dummy=True, method="eydx")
assert_almost_equal(me.margeff, self.res2.margeff_eydx_dummy_overall,
5)
assert_almost_equal(me.margeff_se,
self.res2.margeff_eydx_dummy_overall_se, 6)
def test_j(self):
assert_equal(self.res1.model.J, self.res2.J)
def test_k(self):
assert_equal(self.res1.model.K, self.res2.K)
def test_endog_names(self):
assert_equal(self.res1._get_endog_name(None,None)[1],
['y=1', 'y=2', 'y=3', 'y=4', 'y=5', 'y=6'])
def test_pred_table(self):
# fitted results taken from gretl
pred = [6, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 6, 0, 1, 6, 0, 0,
1, 1, 6, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 6, 0, 0, 6, 6, 0, 0, 1,
1, 6, 1, 6, 0, 0, 0, 1, 0, 1, 0, 0, 0, 6, 0, 0, 6, 0, 0, 0, 1,
1, 0, 0, 6, 6, 6, 6, 1, 0, 5, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,
6, 0, 6, 6, 1, 0, 1, 1, 6, 5, 1, 0, 0, 0, 5, 0, 0, 6, 0, 1, 0,
0, 0, 0, 0, 1, 1, 0, 6, 6, 6, 6, 5, 0, 1, 1, 0, 1, 0, 6, 6, 0,
0, 0, 6, 0, 0, 0, 6, 6, 0, 5, 1, 0, 0, 0, 0, 6, 0, 5, 6, 6, 0,
0, 0, 0, 6, 1, 0, 0, 1, 0, 1, 6, 1, 1, 1, 1, 1, 0, 0, 0, 6, 0,
5, 1, 0, 6, 6, 6, 0, 0, 0, 0, 1, 6, 6, 0, 0, 0, 1, 1, 5, 6, 0,
6, 1, 0, 0, 1, 6, 0, 0, 1, 0, 6, 6, 0, 5, 6, 6, 0, 0, 6, 1, 0,
6, 0, 1, 0, 1, 6, 0, 1, 1, 1, 6, 0, 5, 0, 0, 6, 1, 0, 6, 5, 5,
0, 6, 1, 1, 1, 0, 0, 6, 0, 0, 5, 0, 0, 6, 6, 6, 6, 6, 0, 1, 0,
0, 6, 6, 0, 0, 1, 6, 0, 0, 6, 1, 6, 1, 1, 1, 0, 1, 6, 5, 0, 0,
1, 5, 0, 1, 6, 6, 1, 0, 0, 1, 6, 1, 5, 6, 1, 0, 0, 1, 1, 0, 6,
1, 6, 0, 1, 1, 5, 6, 6, 5, 1, 1, 1, 0, 6, 1, 6, 1, 0, 1, 0, 0,
1, 5, 0, 1, 1, 0, 5, 6, 0, 5, 1, 1, 6, 5, 0, 6, 0, 0, 0, 0, 0,
0, 1, 6, 1, 0, 5, 1, 0, 0, 1, 6, 0, 0, 6, 6, 6, 0, 2, 1, 6, 5,
6, 1, 1, 0, 5, 1, 1, 1, 6, 1, 6, 6, 5, 6, 0, 1, 0, 1, 6, 0, 6,
1, 6, 0, 0, 6, 1, 0, 6, 1, 0, 0, 0, 0, 6, 6, 6, 6, 5, 6, 6, 0,
0, 6, 1, 1, 6, 0, 0, 6, 6, 0, 6, 6, 0, 0, 6, 0, 0, 6, 6, 6, 1,
0, 6, 0, 0, 0, 6, 1, 1, 0, 1, 5, 0, 0, 5, 0, 0, 0, 1, 1, 6, 1,
0, 0, 0, 6, 6, 1, 1, 6, 5, 5, 0, 6, 6, 0, 1, 1, 0, 6, 6, 0, 6,
5, 5, 6, 5, 1, 0, 6, 0, 6, 1, 0, 1, 6, 6, 6, 1, 0, 6, 0, 5, 6,
6, 5, 0, 5, 1, 0, 6, 0, 6, 1, 5, 5, 0, 1, 5, 5, 2, 6, 6, 6, 5,
0, 0, 1, 6, 1, 0, 1, 6, 1, 0, 0, 1, 5, 6, 6, 0, 0, 0, 5, 6, 6,
6, 1, 5, 6, 1, 0, 0, 6, 5, 0, 1, 1, 1, 6, 6, 0, 1, 0, 0, 0, 5,
0, 0, 6, 1, 6, 0, 6, 1, 5, 5, 6, 5, 0, 0, 0, 0, 1, 1, 0, 5, 5,
0, 0, 0, 0, 1, 0, 6, 6, 1, 1, 6, 6, 0, 5, 5, 0, 0, 0, 6, 6, 1,
6, 0, 0, 5, 0, 1, 6, 5, 6, 6, 5, 5, 6, 6, 1, 0, 1, 6, 6, 1, 6,
0, 6, 0, 6, 5, 0, 6, 6, 0, 5, 6, 0, 6, 6, 5, 0, 1, 6, 6, 1, 0,
1, 0, 6, 6, 1, 0, 6, 6, 6, 0, 1, 6, 0, 1, 5, 1, 1, 5, 6, 6, 0,
1, 6, 6, 1, 5, 0, 5, 0, 6, 0, 1, 6, 1, 0, 6, 1, 6, 0, 6, 1, 0,
0, 0, 6, 6, 0, 1, 1, 6, 6, 6, 1, 6, 0, 5, 6, 0, 5, 6, 6, 5, 5,
5, 6, 0, 6, 0, 0, 0, 5, 0, 6, 1, 2, 6, 6, 6, 5, 1, 6, 0, 6, 0,
0, 0, 0, 6, 5, 0, 5, 1, 6, 5, 1, 6, 5, 1, 1, 0, 0, 6, 1, 1, 5,
6, 6, 0, 5, 2, 5, 5, 0, 5, 5, 5, 6, 5, 6, 6, 5, 2, 6, 5, 6, 0,
0, 6, 5, 0, 6, 0, 0, 6, 6, 6, 0, 5, 1, 1, 6, 6, 5, 2, 1, 6, 5,
6, 0, 6, 6, 1, 1, 5, 1, 6, 6, 6, 0, 0, 6, 1, 0, 5, 5, 1, 5, 6,
1, 6, 0, 1, 6, 5, 0, 0, 6, 1, 5, 1, 0, 6, 0, 6, 6, 5, 5, 6, 6,
6, 6, 2, 6, 6, 6, 5, 5, 5, 0, 1, 0, 0, 0, 6, 6, 1, 0, 6, 6, 6,
6, 6, 1, 0, 6, 1, 5, 5, 6, 6, 6, 6, 6, 5, 6, 1, 6, 2, 5, 5, 6,
5, 6, 6, 5, 6, 6, 5, 5, 6, 1, 5, 1, 6, 0, 2, 5, 0, 5, 0, 2, 1,
6, 0, 0, 6, 6, 1, 6, 0, 5, 5, 6, 6, 1, 6, 6, 6, 5, 6, 6, 1, 6,
5, 6, 1, 1, 0, 6, 6, 5, 1, 0, 0, 6, 6, 5, 6, 0, 1, 6, 0, 5, 6,
5, 2, 5, 2, 0, 0, 1, 6, 6, 1, 5, 6, 6, 0, 6, 6, 6, 6, 6, 5]
assert_array_equal(self.res1.predict().argmax(1), pred)
# the rows should add up for pred table
assert_array_equal(self.res1.pred_table().sum(0), np.bincount(pred))
# note this is just a regression test, gretl doesn't have a prediction
# table
pred = [[ 126., 41., 2., 0., 0., 12., 19.],
[ 77., 73., 3., 0., 0., 15., 12.],
[ 37., 43., 2., 0., 0., 19., 7.],
[ 12., 9., 1., 0., 0., 9., 6.],
[ 19., 10., 2., 0., 0., 20., 43.],
[ 22., 25., 1., 0., 0., 31., 71.],
[ 9., 7., 1., 0., 0., 18., 140.]]
assert_array_equal(self.res1.pred_table(), pred)
def test_resid(self):
assert_array_equal(self.res1.resid_misclassified, self.res2.resid)
def test_perfect_prediction():
cur_dir = os.path.dirname(os.path.abspath(__file__))
iris_dir = os.path.join(cur_dir, '..', '..', 'genmod', 'tests', 'results')
iris_dir = os.path.abspath(iris_dir)
iris = np.genfromtxt(os.path.join(iris_dir, 'iris.csv'), delimiter=",",
skip_header=1)
y = iris[:,-1]
X = iris[:,:-1]
X = X[y != 2]
y = y[y != 2]
X = sm.add_constant(X, prepend=True)
mod = Logit(y,X)
assert_raises(PerfectSeparationError, mod.fit)
#turn off raise PerfectSeparationError
mod.raise_on_perfect_prediction = False
mod.fit(disp=False) #should not raise
def test_poisson_predict():
#GH: 175, make sure poisson predict works without offset and exposure
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=True)
res = sm.Poisson(data.endog, exog).fit(method='newton', disp=0)
pred1 = res.predict()
pred2 = res.predict(exog)
assert_almost_equal(pred1, pred2)
#exta options
pred3 = res.predict(exog, offset=0, exposure=1)
assert_almost_equal(pred1, pred3)
pred3 = res.predict(exog, offset=0, exposure=2)
assert_almost_equal(2*pred1, pred3)
pred3 = res.predict(exog, offset=np.log(2), exposure=1)
assert_almost_equal(2*pred1, pred3)
def test_poisson_newton():
#GH: 24, Newton doesn't work well sometimes
nobs = 10000
np.random.seed(987689)
x = np.random.randn(nobs, 3)
x = sm.add_constant(x, prepend=True)
y_count = np.random.poisson(np.exp(x.sum(1)))
mod = sm.Poisson(y_count, x)
res = mod.fit(start_params=-np.ones(4), method='newton', disp=0)
assert_(not res.mle_retvals['converged'])
def test_issue_339():
# make sure MNLogit summary works for J != K.
data = sm.datasets.anes96.load()
exog = data.exog
# leave out last exog column
exog = exog[:,:-1]
exog = sm.add_constant(exog, prepend=True)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
# strip the header from the test
smry = "\n".join(res1.summary().as_text().split('\n')[9:])
cur_dir = os.path.dirname(os.path.abspath(__file__))
test_case_file = os.path.join(cur_dir, 'results', 'mn_logit_summary.txt')
test_case = open(test_case_file, 'r').read()
np.testing.assert_(smry == test_case[:-1])
def test_issue_341():
data = sm.datasets.anes96.load()
exog = data.exog
# leave out last exog column
exog = exog[:,:-1]
exog = sm.add_constant(exog, prepend=True)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
x = exog[0]
np.testing.assert_equal(res1.predict(x).shape, (1,7))
np.testing.assert_equal(res1.predict(x[None]).shape, (1,7))
def test_iscount():
X = np.random.random((50, 10))
X[:,2] = np.random.randint(1, 10, size=50)
X[:,6] = np.random.randint(1, 10, size=50)
X[:,4] = np.random.randint(0, 2, size=50)
X[:,1] = np.random.randint(-10, 10, size=50) # not integers
count_ind = _iscount(X)
assert_equal(count_ind, [2, 6])
def test_isdummy():
X = np.random.random((50, 10))
X[:,2] = np.random.randint(1, 10, size=50)
X[:,6] = np.random.randint(0, 2, size=50)
X[:,4] = np.random.randint(0, 2, size=50)
X[:,1] = np.random.randint(-10, 10, size=50) # not integers
count_ind = _isdummy(X)
assert_equal(count_ind, [4, 6])
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb'],
exit=False)
|
yarikoptic/pystatsmodels
|
statsmodels/discrete/tests/test_discrete.py
|
Python
|
bsd-3-clause
| 49,558
|
import logging
import os
import time
from datetime import datetime, timedelta
from django.conf import settings
from django.db import connections
from django.db.models import Q, F, Avg
import cronjobs
import multidb
from celery.task.sets import TaskSet
import waffle
from olympia import amo
from olympia.amo.celery import task
from olympia.amo.decorators import write
from olympia.amo.utils import chunked, walkfiles
from olympia.addons.models import Addon, AppSupport, FrozenAddon
from olympia.files.models import File
from olympia.lib.es.utils import raise_if_reindex_in_progress
from olympia.stats.models import UpdateCount
log = logging.getLogger('z.cron')
task_log = logging.getLogger('z.task')
@cronjobs.register
def update_addon_average_daily_users():
"""Update add-ons ADU totals."""
if not waffle.switch_is_active('local-statistics-processing'):
return False
raise_if_reindex_in_progress('amo')
cursor = connections[multidb.get_slave()].cursor()
q = """SELECT addon_id, AVG(`count`)
FROM update_counts
WHERE `date` > DATE_SUB(CURDATE(), INTERVAL 7 DAY)
GROUP BY addon_id
ORDER BY addon_id"""
cursor.execute(q)
d = cursor.fetchall()
cursor.close()
ts = [_update_addon_average_daily_users.subtask(args=[chunk])
for chunk in chunked(d, 250)]
TaskSet(ts).apply_async()
@task
def _update_addon_average_daily_users(data, **kw):
task_log.info("[%s] Updating add-ons ADU totals." % (len(data)))
if not waffle.switch_is_active('local-statistics-processing'):
return False
for pk, count in data:
try:
addon = Addon.objects.get(pk=pk)
except Addon.DoesNotExist:
# The processing input comes from metrics which might be out of
# date in regards to currently existing add-ons
m = "Got an ADU update (%s) but the add-on doesn't exist (%s)"
task_log.debug(m % (count, pk))
continue
if (count - addon.total_downloads) > 10000:
# Adjust ADU to equal total downloads so bundled add-ons don't
# skew the results when sorting by users.
task_log.info('Readjusted ADU count for addon %s' % addon.slug)
addon.update(average_daily_users=addon.total_downloads)
else:
addon.update(average_daily_users=count)
@cronjobs.register
def update_addon_download_totals():
"""Update add-on total and average downloads."""
if not waffle.switch_is_active('local-statistics-processing'):
return False
cursor = connections[multidb.get_slave()].cursor()
# We need to use SQL for this until
# http://code.djangoproject.com/ticket/11003 is resolved
q = """SELECT addon_id, AVG(count), SUM(count)
FROM download_counts
USE KEY (`addon_and_count`)
JOIN addons ON download_counts.addon_id=addons.id
WHERE addons.status != %s
GROUP BY addon_id
ORDER BY addon_id"""
cursor.execute(q, [amo.STATUS_DELETED])
d = cursor.fetchall()
cursor.close()
ts = [_update_addon_download_totals.subtask(args=[chunk])
for chunk in chunked(d, 250)]
TaskSet(ts).apply_async()
@task
def _update_addon_download_totals(data, **kw):
task_log.info('[%s] Updating add-ons download+average totals.' %
(len(data)))
if not waffle.switch_is_active('local-statistics-processing'):
return False
for pk, avg, sum in data:
try:
addon = Addon.objects.get(pk=pk)
# Don't trigger a save unless we have to. Since the query that
# sends us data doesn't filter out deleted addons, or the addon may
# be unpopular, this can reduce a lot of unnecessary save queries.
if (addon.average_daily_downloads != avg or
addon.total_downloads != sum):
addon.update(average_daily_downloads=avg, total_downloads=sum)
except Addon.DoesNotExist:
# The processing input comes from metrics which might be out of
# date in regards to currently existing add-ons.
m = ("Got new download totals (total=%s,avg=%s) but the add-on"
"doesn't exist (%s)" % (sum, avg, pk))
task_log.debug(m)
def _change_last_updated(next):
# We jump through some hoops here to make sure we only change the add-ons
# that really need it, and to invalidate properly.
current = dict(Addon.objects.values_list('id', 'last_updated'))
changes = {}
for addon, last_updated in next.items():
try:
if current[addon] != last_updated:
changes[addon] = last_updated
except KeyError:
pass
if not changes:
return
log.debug('Updating %s add-ons' % len(changes))
# Update + invalidate.
qs = Addon.objects.no_cache().filter(id__in=changes).no_transforms()
for addon in qs:
addon.last_updated = changes[addon.id]
addon.save()
@cronjobs.register
@write
def addon_last_updated():
next = {}
for q in Addon._last_updated_queries().values():
for addon, last_updated in q.values_list('id', 'last_updated'):
next[addon] = last_updated
_change_last_updated(next)
# Get anything that didn't match above.
other = (Addon.objects.no_cache().filter(last_updated__isnull=True)
.values_list('id', 'created'))
_change_last_updated(dict(other))
@cronjobs.register
def update_addon_appsupport():
# Find all the add-ons that need their app support details updated.
newish = (Q(last_updated__gte=F('appsupport__created')) |
Q(appsupport__created__isnull=True))
# Search providers don't list supported apps.
has_app = Q(versions__apps__isnull=False) | Q(type=amo.ADDON_SEARCH)
has_file = Q(versions__files__status__in=amo.VALID_STATUSES)
good = Q(has_app, has_file) | Q(type=amo.ADDON_PERSONA)
ids = (Addon.objects.valid().distinct()
.filter(newish, good).values_list('id', flat=True))
task_log.info('Updating appsupport for %d new-ish addons.' % len(ids))
ts = [_update_appsupport.subtask(args=[chunk])
for chunk in chunked(ids, 20)]
TaskSet(ts).apply_async()
@cronjobs.register
def update_all_appsupport():
from .tasks import update_appsupport
ids = sorted(set(AppSupport.objects.values_list('addon', flat=True)))
task_log.info('Updating appsupport for %s addons.' % len(ids))
for idx, chunk in enumerate(chunked(ids, 100)):
if idx % 10 == 0:
task_log.info('[%s/%s] Updating appsupport.'
% (idx * 100, len(ids)))
update_appsupport(chunk)
@task
def _update_appsupport(ids, **kw):
from .tasks import update_appsupport
task_log.info('Updating appsupport for %d of new-ish addons.' % len(ids))
update_appsupport(ids)
@cronjobs.register
def hide_disabled_files():
# If an add-on or a file is disabled, it should be moved to
# GUARDED_ADDONS_PATH so it's not publicly visible.
q = (Q(version__addon__status=amo.STATUS_DISABLED) |
Q(version__addon__disabled_by_user=True))
ids = (File.objects.filter(q | Q(status=amo.STATUS_DISABLED))
.values_list('id', flat=True))
for chunk in chunked(ids, 300):
qs = File.objects.no_cache().filter(id__in=chunk)
qs = qs.select_related('version')
for f in qs:
f.hide_disabled_file()
@cronjobs.register
def unhide_disabled_files():
# Files are getting stuck in /guarded-addons for some reason. This job
# makes sure guarded add-ons are supposed to be disabled.
log = logging.getLogger('z.files.disabled')
q = (Q(version__addon__status=amo.STATUS_DISABLED) |
Q(version__addon__disabled_by_user=True))
files = set(File.objects.filter(q | Q(status=amo.STATUS_DISABLED))
.values_list('version__addon', 'filename'))
for filepath in walkfiles(settings.GUARDED_ADDONS_PATH):
addon, filename = filepath.split('/')[-2:]
if tuple([int(addon), filename]) not in files:
log.warning(u'File that should not be guarded: %s.', filepath)
try:
file_ = (File.objects.select_related('version__addon')
.get(version__addon=addon, filename=filename))
file_.unhide_disabled_file()
if (file_.version.addon.status in amo.MIRROR_STATUSES and
file_.status in amo.MIRROR_STATUSES):
file_.copy_to_mirror()
except File.DoesNotExist:
log.warning(u'File object does not exist for: %s.' % filepath)
except Exception:
log.error(u'Could not unhide file: %s.' % filepath,
exc_info=True)
@cronjobs.register
def deliver_hotness():
"""
Calculate hotness of all add-ons.
a = avg(users this week)
b = avg(users three weeks before this week)
hotness = (a-b) / b if a > 1000 and b > 1 else 0
"""
frozen = set(f.id for f in FrozenAddon.objects.all())
all_ids = list((Addon.objects.exclude(type=amo.ADDON_PERSONA)
.values_list('id', flat=True)))
now = datetime.now()
one_week = now - timedelta(days=7)
four_weeks = now - timedelta(days=28)
for ids in chunked(all_ids, 300):
addons = Addon.objects.no_cache().filter(id__in=ids).no_transforms()
ids = [a.id for a in addons if a.id not in frozen]
qs = (UpdateCount.objects.filter(addon__in=ids)
.values_list('addon').annotate(Avg('count')))
thisweek = dict(qs.filter(date__gte=one_week))
threeweek = dict(qs.filter(date__range=(four_weeks, one_week)))
for addon in addons:
this, three = thisweek.get(addon.id, 0), threeweek.get(addon.id, 0)
if this > 1000 and three > 1:
addon.update(hotness=(this - three) / float(three))
else:
addon.update(hotness=0)
# Let the database catch its breath.
time.sleep(10)
@cronjobs.register
def reindex_addons(index=None, addon_type=None):
from . import tasks
ids = Addon.unfiltered.values_list('id', flat=True)
if addon_type:
ids = ids.filter(type=addon_type)
ts = [tasks.index_addons.subtask(args=[chunk], kwargs=dict(index=index))
for chunk in chunked(sorted(list(ids)), 150)]
TaskSet(ts).apply_async()
@cronjobs.register
def cleanup_image_files():
"""
Clean up all header and footer images files for themes.
We use these images to asynchronuously generate thumbnails with
tasks, here we delete images that are older than one day.
"""
log.info('Removing one day old temporary image files for themes.')
for folder in ('persona_footer', 'persona_header'):
root = os.path.join(settings.TMP_PATH, folder)
if not os.path.exists(root):
continue
for path in os.listdir(root):
full_path = os.path.join(root, path)
age = time.time() - os.stat(full_path).st_atime
if age > 60 * 60 * 24: # One day.
log.debug('Removing image file: %s, %dsecs old.' %
(full_path, age))
os.unlink(full_path)
|
andymckay/addons-server
|
src/olympia/addons/cron.py
|
Python
|
bsd-3-clause
| 11,368
|
# Copyright Anne M. Archibald 2008
# Released under the scipy license
from __future__ import division, print_function, absolute_import
from numpy.testing import (assert_equal, assert_array_equal, assert_,
assert_almost_equal, assert_array_almost_equal)
from pytest import raises as assert_raises
import pytest
from platform import python_implementation
import numpy as np
from scipy.spatial import KDTree, Rectangle, distance_matrix, cKDTree
from scipy.spatial.ckdtree import cKDTreeNode
from scipy.spatial import minkowski_distance
import itertools
def distance_box(a, b, p, boxsize):
diff = a - b
diff[diff > 0.5 * boxsize] -= boxsize
diff[diff < -0.5 * boxsize] += boxsize
d = minkowski_distance(diff, 0, p)
return d
class ConsistencyTests:
def distance(self, a, b, p):
return minkowski_distance(a, b, p)
def test_nearest(self):
x = self.x
d, i = self.kdtree.query(x, 1)
assert_almost_equal(d**2,np.sum((x-self.data[i])**2))
eps = 1e-8
assert_(np.all(np.sum((self.data-x[np.newaxis,:])**2,axis=1) > d**2-eps))
def test_m_nearest(self):
x = self.x
m = self.m
dd, ii = self.kdtree.query(x, m)
d = np.amax(dd)
i = ii[np.argmax(dd)]
assert_almost_equal(d**2,np.sum((x-self.data[i])**2))
eps = 1e-8
assert_equal(np.sum(np.sum((self.data-x[np.newaxis,:])**2,axis=1) < d**2+eps),m)
def test_points_near(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd,ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d**2,np.sum((x-self.data[near_i])**2))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d))
assert_equal(np.sum(self.distance(self.data,x,2) < d**2+eps),hits)
def test_points_near_l1(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=1, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd,ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d,self.distance(x,self.data[near_i],1))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d))
assert_equal(np.sum(self.distance(self.data,x,1) < d+eps),hits)
def test_points_near_linf(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=np.inf, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd,ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d,self.distance(x,self.data[near_i],np.inf))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d))
assert_equal(np.sum(self.distance(self.data,x,np.inf) < d+eps),hits)
def test_approx(self):
x = self.x
k = self.k
eps = 0.1
d_real, i_real = self.kdtree.query(x, k)
d, i = self.kdtree.query(x, k, eps=eps)
assert_(np.all(d <= d_real*(1+eps)))
class Test_random(ConsistencyTests):
def setup_method(self):
self.n = 100
self.m = 4
np.random.seed(1234)
self.data = np.random.randn(self.n, self.m)
self.kdtree = KDTree(self.data,leafsize=2)
self.x = np.random.randn(self.m)
self.d = 0.2
self.k = 10
class Test_random_far(Test_random):
def setup_method(self):
Test_random.setup_method(self)
self.x = np.random.randn(self.m)+10
class Test_small(ConsistencyTests):
def setup_method(self):
self.data = np.array([[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1]])
self.kdtree = KDTree(self.data)
self.n = self.kdtree.n
self.m = self.kdtree.m
np.random.seed(1234)
self.x = np.random.randn(3)
self.d = 0.5
self.k = 4
def test_nearest(self):
assert_array_equal(
self.kdtree.query((0,0,0.1), 1),
(0.1,0))
def test_nearest_two(self):
assert_array_equal(
self.kdtree.query((0,0,0.1), 2),
([0.1,0.9],[0,1]))
class Test_small_nonleaf(Test_small):
def setup_method(self):
Test_small.setup_method(self)
self.kdtree = KDTree(self.data,leafsize=1)
class Test_small_compiled(Test_small):
def setup_method(self):
Test_small.setup_method(self)
self.kdtree = cKDTree(self.data)
class Test_small_nonleaf_compiled(Test_small):
def setup_method(self):
Test_small.setup_method(self)
self.kdtree = cKDTree(self.data,leafsize=1)
class Test_random_compiled(Test_random):
def setup_method(self):
Test_random.setup_method(self)
self.kdtree = cKDTree(self.data)
class Test_random_far_compiled(Test_random_far):
def setup_method(self):
Test_random_far.setup_method(self)
self.kdtree = cKDTree(self.data)
class Test_vectorization:
def setup_method(self):
self.data = np.array([[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1]])
self.kdtree = KDTree(self.data)
def test_single_query(self):
d, i = self.kdtree.query(np.array([0,0,0]))
assert_(isinstance(d,float))
assert_(np.issubdtype(i, np.signedinteger))
def test_vectorized_query(self):
d, i = self.kdtree.query(np.zeros((2,4,3)))
assert_equal(np.shape(d),(2,4))
assert_equal(np.shape(i),(2,4))
def test_single_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.array([0,0,0]),k=kk)
assert_equal(np.shape(d),(kk,))
assert_equal(np.shape(i),(kk,))
assert_(np.all(~np.isfinite(d[-s:])))
assert_(np.all(i[-s:] == self.kdtree.n))
def test_vectorized_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.zeros((2,4,3)),k=kk)
assert_equal(np.shape(d),(2,4,kk))
assert_equal(np.shape(i),(2,4,kk))
assert_(np.all(~np.isfinite(d[:,:,-s:])))
assert_(np.all(i[:,:,-s:] == self.kdtree.n))
def test_single_query_all_neighbors(self):
d, i = self.kdtree.query([0,0,0],k=None,distance_upper_bound=1.1)
assert_(isinstance(d,list))
assert_(isinstance(i,list))
def test_vectorized_query_all_neighbors(self):
d, i = self.kdtree.query(np.zeros((2,4,3)),k=None,distance_upper_bound=1.1)
assert_equal(np.shape(d),(2,4))
assert_equal(np.shape(i),(2,4))
assert_(isinstance(d[0,0],list))
assert_(isinstance(i[0,0],list))
class Test_vectorization_compiled:
def setup_method(self):
self.data = np.array([[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1]])
self.kdtree = cKDTree(self.data)
def test_single_query(self):
d, i = self.kdtree.query([0,0,0])
assert_(isinstance(d,float))
assert_(isinstance(i,int))
def test_vectorized_query(self):
d, i = self.kdtree.query(np.zeros((2,4,3)))
assert_equal(np.shape(d),(2,4))
assert_equal(np.shape(i),(2,4))
def test_vectorized_query_noncontiguous_values(self):
np.random.seed(1234)
qs = np.random.randn(3,1000).T
ds, i_s = self.kdtree.query(qs)
for q, d, i in zip(qs,ds,i_s):
assert_equal(self.kdtree.query(q),(d,i))
def test_single_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query([0,0,0],k=kk)
assert_equal(np.shape(d),(kk,))
assert_equal(np.shape(i),(kk,))
assert_(np.all(~np.isfinite(d[-s:])))
assert_(np.all(i[-s:] == self.kdtree.n))
def test_vectorized_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.zeros((2,4,3)),k=kk)
assert_equal(np.shape(d),(2,4,kk))
assert_equal(np.shape(i),(2,4,kk))
assert_(np.all(~np.isfinite(d[:,:,-s:])))
assert_(np.all(i[:,:,-s:] == self.kdtree.n))
class ball_consistency:
def distance(self, a, b, p):
return minkowski_distance(a, b, p)
def test_in_ball(self):
l = self.T.query_ball_point(self.x, self.d, p=self.p, eps=self.eps)
for i in l:
assert_(self.distance(self.data[i],self.x,self.p) <= self.d*(1.+self.eps))
def test_found_all(self):
c = np.ones(self.T.n,dtype=bool)
l = self.T.query_ball_point(self.x, self.d, p=self.p, eps=self.eps)
c[l] = False
assert_(np.all(self.distance(self.data[c],self.x,self.p) >= self.d/(1.+self.eps)))
class Test_random_ball(ball_consistency):
def setup_method(self):
n = 100
m = 4
np.random.seed(1234)
self.data = np.random.randn(n,m)
self.T = KDTree(self.data,leafsize=2)
self.x = np.random.randn(m)
self.p = 2.
self.eps = 0
self.d = 0.2
class Test_random_ball_compiled(ball_consistency):
def setup_method(self):
n = 100
m = 4
np.random.seed(1234)
self.data = np.random.randn(n,m)
self.T = cKDTree(self.data,leafsize=2)
self.x = np.random.randn(m)
self.p = 2.
self.eps = 0
self.d = 0.2
class Test_random_ball_compiled_periodic(ball_consistency):
def distance(self, a, b, p):
return distance_box(a, b, p, 1.0)
def setup_method(self):
n = 10000
m = 4
np.random.seed(1234)
self.data = np.random.uniform(size=(n,m))
self.T = cKDTree(self.data,leafsize=2, boxsize=1)
self.x = np.ones(m) * 0.1
self.p = 2.
self.eps = 0
self.d = 0.2
def test_in_ball_outside(self):
l = self.T.query_ball_point(self.x + 1.0, self.d, p=self.p, eps=self.eps)
for i in l:
assert_(self.distance(self.data[i],self.x,self.p) <= self.d*(1.+self.eps))
l = self.T.query_ball_point(self.x - 1.0, self.d, p=self.p, eps=self.eps)
for i in l:
assert_(self.distance(self.data[i],self.x,self.p) <= self.d*(1.+self.eps))
def test_found_all_outside(self):
c = np.ones(self.T.n,dtype=bool)
l = self.T.query_ball_point(self.x + 1.0, self.d, p=self.p, eps=self.eps)
c[l] = False
assert_(np.all(self.distance(self.data[c],self.x,self.p) >= self.d/(1.+self.eps)))
l = self.T.query_ball_point(self.x - 1.0, self.d, p=self.p, eps=self.eps)
c[l] = False
assert_(np.all(self.distance(self.data[c],self.x,self.p) >= self.d/(1.+self.eps)))
class Test_random_ball_approx(Test_random_ball):
def setup_method(self):
Test_random_ball.setup_method(self)
self.eps = 0.1
class Test_random_ball_approx_compiled(Test_random_ball_compiled):
def setup_method(self):
Test_random_ball_compiled.setup_method(self)
self.eps = 0.1
class Test_random_ball_approx_compiled_periodic(Test_random_ball_compiled_periodic):
def setup_method(self):
Test_random_ball_compiled_periodic.setup_method(self)
self.eps = 0.1
class Test_random_ball_far(Test_random_ball):
def setup_method(self):
Test_random_ball.setup_method(self)
self.d = 2.
class Test_random_ball_far_compiled(Test_random_ball_compiled):
def setup_method(self):
Test_random_ball_compiled.setup_method(self)
self.d = 2.
class Test_random_ball_far_compiled_periodic(Test_random_ball_compiled_periodic):
def setup_method(self):
Test_random_ball_compiled_periodic.setup_method(self)
self.d = 2.
class Test_random_ball_l1(Test_random_ball):
def setup_method(self):
Test_random_ball.setup_method(self)
self.p = 1
class Test_random_ball_l1_compiled(Test_random_ball_compiled):
def setup_method(self):
Test_random_ball_compiled.setup_method(self)
self.p = 1
class Test_random_ball_l1_compiled_periodic(Test_random_ball_compiled_periodic):
def setup_method(self):
Test_random_ball_compiled_periodic.setup_method(self)
self.p = 1
class Test_random_ball_linf(Test_random_ball):
def setup_method(self):
Test_random_ball.setup_method(self)
self.p = np.inf
class Test_random_ball_linf_compiled_periodic(Test_random_ball_compiled_periodic):
def setup_method(self):
Test_random_ball_compiled_periodic.setup_method(self)
self.p = np.inf
def test_random_ball_vectorized():
n = 20
m = 5
T = KDTree(np.random.randn(n,m))
r = T.query_ball_point(np.random.randn(2,3,m),1)
assert_equal(r.shape,(2,3))
assert_(isinstance(r[0,0],list))
def test_random_ball_vectorized_compiled():
n = 20
m = 5
np.random.seed(1234)
T = cKDTree(np.random.randn(n,m))
r = T.query_ball_point(np.random.randn(2,3,m),1)
assert_equal(r.shape,(2,3))
assert_(isinstance(r[0,0],list))
def test_query_ball_point_multithreading():
np.random.seed(0)
n = 5000
k = 2
points = np.random.randn(n,k)
T = cKDTree(points)
l1 = T.query_ball_point(points,0.003,n_jobs=1)
l2 = T.query_ball_point(points,0.003,n_jobs=64)
l3 = T.query_ball_point(points,0.003,n_jobs=-1)
for i in range(n):
if l1[i] or l2[i]:
assert_array_equal(l1[i],l2[i])
for i in range(n):
if l1[i] or l3[i]:
assert_array_equal(l1[i],l3[i])
class two_trees_consistency:
def distance(self, a, b, p):
return minkowski_distance(a, b, p)
def test_all_in_ball(self):
r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps)
for i, l in enumerate(r):
for j in l:
assert_(self.distance(self.data1[i],self.data2[j],self.p) <= self.d*(1.+self.eps))
def test_found_all(self):
r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps)
for i, l in enumerate(r):
c = np.ones(self.T2.n,dtype=bool)
c[l] = False
assert_(np.all(self.distance(self.data2[c],self.data1[i],self.p) >= self.d/(1.+self.eps)))
class Test_two_random_trees(two_trees_consistency):
def setup_method(self):
n = 50
m = 4
np.random.seed(1234)
self.data1 = np.random.randn(n,m)
self.T1 = KDTree(self.data1,leafsize=2)
self.data2 = np.random.randn(n,m)
self.T2 = KDTree(self.data2,leafsize=2)
self.p = 2.
self.eps = 0
self.d = 0.2
class Test_two_random_trees_compiled(two_trees_consistency):
def setup_method(self):
n = 50
m = 4
np.random.seed(1234)
self.data1 = np.random.randn(n,m)
self.T1 = cKDTree(self.data1,leafsize=2)
self.data2 = np.random.randn(n,m)
self.T2 = cKDTree(self.data2,leafsize=2)
self.p = 2.
self.eps = 0
self.d = 0.2
class Test_two_random_trees_compiled_periodic(two_trees_consistency):
def distance(self, a, b, p):
return distance_box(a, b, p, 1.0)
def setup_method(self):
n = 50
m = 4
np.random.seed(1234)
self.data1 = np.random.uniform(size=(n,m))
self.T1 = cKDTree(self.data1,leafsize=2, boxsize=1.0)
self.data2 = np.random.uniform(size=(n,m))
self.T2 = cKDTree(self.data2,leafsize=2, boxsize=1.0)
self.p = 2.
self.eps = 0
self.d = 0.2
class Test_two_random_trees_far(Test_two_random_trees):
def setup_method(self):
Test_two_random_trees.setup_method(self)
self.d = 2
class Test_two_random_trees_far_compiled(Test_two_random_trees_compiled):
def setup_method(self):
Test_two_random_trees_compiled.setup_method(self)
self.d = 2
class Test_two_random_trees_far_compiled_periodic(Test_two_random_trees_compiled_periodic):
def setup_method(self):
Test_two_random_trees_compiled_periodic.setup_method(self)
self.d = 2
class Test_two_random_trees_linf(Test_two_random_trees):
def setup_method(self):
Test_two_random_trees.setup_method(self)
self.p = np.inf
class Test_two_random_trees_linf_compiled(Test_two_random_trees_compiled):
def setup_method(self):
Test_two_random_trees_compiled.setup_method(self)
self.p = np.inf
class Test_two_random_trees_linf_compiled_periodic(Test_two_random_trees_compiled_periodic):
def setup_method(self):
Test_two_random_trees_compiled_periodic.setup_method(self)
self.p = np.inf
class Test_rectangle:
def setup_method(self):
self.rect = Rectangle([0,0],[1,1])
def test_min_inside(self):
assert_almost_equal(self.rect.min_distance_point([0.5,0.5]),0)
def test_min_one_side(self):
assert_almost_equal(self.rect.min_distance_point([0.5,1.5]),0.5)
def test_min_two_sides(self):
assert_almost_equal(self.rect.min_distance_point([2,2]),np.sqrt(2))
def test_max_inside(self):
assert_almost_equal(self.rect.max_distance_point([0.5,0.5]),1/np.sqrt(2))
def test_max_one_side(self):
assert_almost_equal(self.rect.max_distance_point([0.5,1.5]),np.hypot(0.5,1.5))
def test_max_two_sides(self):
assert_almost_equal(self.rect.max_distance_point([2,2]),2*np.sqrt(2))
def test_split(self):
less, greater = self.rect.split(0,0.1)
assert_array_equal(less.maxes,[0.1,1])
assert_array_equal(less.mins,[0,0])
assert_array_equal(greater.maxes,[1,1])
assert_array_equal(greater.mins,[0.1,0])
def test_distance_l2():
assert_almost_equal(minkowski_distance([0,0],[1,1],2),np.sqrt(2))
def test_distance_l1():
assert_almost_equal(minkowski_distance([0,0],[1,1],1),2)
def test_distance_linf():
assert_almost_equal(minkowski_distance([0,0],[1,1],np.inf),1)
def test_distance_vectorization():
np.random.seed(1234)
x = np.random.randn(10,1,3)
y = np.random.randn(1,7,3)
assert_equal(minkowski_distance(x,y).shape,(10,7))
class count_neighbors_consistency:
def test_one_radius(self):
r = 0.2
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)]))
def test_large_radius(self):
r = 1000
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)]))
def test_multiple_radius(self):
rs = np.exp(np.linspace(np.log(0.01),np.log(10),3))
results = self.T1.count_neighbors(self.T2, rs)
assert_(np.all(np.diff(results) >= 0))
for r,result in zip(rs, results):
assert_equal(self.T1.count_neighbors(self.T2, r), result)
class Test_count_neighbors(count_neighbors_consistency):
def setup_method(self):
n = 50
m = 2
np.random.seed(1234)
self.T1 = KDTree(np.random.randn(n,m),leafsize=2)
self.T2 = KDTree(np.random.randn(n,m),leafsize=2)
class Test_count_neighbors_compiled(count_neighbors_consistency):
def setup_method(self):
n = 50
m = 2
np.random.seed(1234)
self.T1 = cKDTree(np.random.randn(n,m),leafsize=2)
self.T2 = cKDTree(np.random.randn(n,m),leafsize=2)
class sparse_distance_matrix_consistency:
def distance(self, a, b, p):
return minkowski_distance(a, b, p)
def test_consistency_with_neighbors(self):
M = self.T1.sparse_distance_matrix(self.T2, self.r)
r = self.T1.query_ball_tree(self.T2, self.r)
for i,l in enumerate(r):
for j in l:
assert_almost_equal(M[i,j],
self.distance(self.T1.data[i], self.T2.data[j], self.p),
decimal=14)
for ((i,j),d) in M.items():
assert_(j in r[i])
def test_zero_distance(self):
# raises an exception for bug 870 (FIXME: Does it?)
self.T1.sparse_distance_matrix(self.T1, self.r)
class Test_sparse_distance_matrix(sparse_distance_matrix_consistency):
def setup_method(self):
n = 50
m = 4
np.random.seed(1234)
data1 = np.random.randn(n,m)
data2 = np.random.randn(n,m)
self.T1 = cKDTree(data1,leafsize=2)
self.T2 = cKDTree(data2,leafsize=2)
self.r = 0.5
self.p = 2
self.data1 = data1
self.data2 = data2
self.n = n
self.m = m
class Test_sparse_distance_matrix_compiled(sparse_distance_matrix_consistency):
def setup_method(self):
n = 50
m = 4
np.random.seed(0)
data1 = np.random.randn(n,m)
data2 = np.random.randn(n,m)
self.T1 = cKDTree(data1,leafsize=2)
self.T2 = cKDTree(data2,leafsize=2)
self.ref_T1 = KDTree(data1, leafsize=2)
self.ref_T2 = KDTree(data2, leafsize=2)
self.r = 0.5
self.n = n
self.m = m
self.data1 = data1
self.data2 = data2
self.p = 2
def test_consistency_with_python(self):
M1 = self.T1.sparse_distance_matrix(self.T2, self.r)
M2 = self.ref_T1.sparse_distance_matrix(self.ref_T2, self.r)
assert_array_almost_equal(M1.todense(), M2.todense(), decimal=14)
def test_against_logic_error_regression(self):
# regression test for gh-5077 logic error
np.random.seed(0)
too_many = np.array(np.random.randn(18, 2), dtype=int)
tree = cKDTree(too_many, balanced_tree=False, compact_nodes=False)
d = tree.sparse_distance_matrix(tree, 3).todense()
assert_array_almost_equal(d, d.T, decimal=14)
def test_ckdtree_return_types(self):
# brute-force reference
ref = np.zeros((self.n,self.n))
for i in range(self.n):
for j in range(self.n):
v = self.data1[i,:] - self.data2[j,:]
ref[i,j] = np.dot(v,v)
ref = np.sqrt(ref)
ref[ref > self.r] = 0.
# test return type 'dict'
dist = np.zeros((self.n,self.n))
r = self.T1.sparse_distance_matrix(self.T2, self.r, output_type='dict')
for i,j in r.keys():
dist[i,j] = r[(i,j)]
assert_array_almost_equal(ref, dist, decimal=14)
# test return type 'ndarray'
dist = np.zeros((self.n,self.n))
r = self.T1.sparse_distance_matrix(self.T2, self.r,
output_type='ndarray')
for k in range(r.shape[0]):
i = r['i'][k]
j = r['j'][k]
v = r['v'][k]
dist[i,j] = v
assert_array_almost_equal(ref, dist, decimal=14)
# test return type 'dok_matrix'
r = self.T1.sparse_distance_matrix(self.T2, self.r,
output_type='dok_matrix')
assert_array_almost_equal(ref, r.todense(), decimal=14)
# test return type 'coo_matrix'
r = self.T1.sparse_distance_matrix(self.T2, self.r,
output_type='coo_matrix')
assert_array_almost_equal(ref, r.todense(), decimal=14)
def test_distance_matrix():
m = 10
n = 11
k = 4
np.random.seed(1234)
xs = np.random.randn(m,k)
ys = np.random.randn(n,k)
ds = distance_matrix(xs,ys)
assert_equal(ds.shape, (m,n))
for i in range(m):
for j in range(n):
assert_almost_equal(minkowski_distance(xs[i],ys[j]),ds[i,j])
def test_distance_matrix_looping():
m = 10
n = 11
k = 4
np.random.seed(1234)
xs = np.random.randn(m,k)
ys = np.random.randn(n,k)
ds = distance_matrix(xs,ys)
dsl = distance_matrix(xs,ys,threshold=1)
assert_equal(ds,dsl)
def check_onetree_query(T,d):
r = T.query_ball_tree(T, d)
s = set()
for i, l in enumerate(r):
for j in l:
if i < j:
s.add((i,j))
assert_(s == T.query_pairs(d))
def test_onetree_query():
np.random.seed(0)
n = 50
k = 4
points = np.random.randn(n,k)
T = KDTree(points)
check_onetree_query(T, 0.1)
points = np.random.randn(3*n,k)
points[:n] *= 0.001
points[n:2*n] += 2
T = KDTree(points)
check_onetree_query(T, 0.1)
check_onetree_query(T, 0.001)
check_onetree_query(T, 0.00001)
check_onetree_query(T, 1e-6)
def test_onetree_query_compiled():
np.random.seed(0)
n = 100
k = 4
points = np.random.randn(n,k)
T = cKDTree(points)
check_onetree_query(T, 0.1)
points = np.random.randn(3*n,k)
points[:n] *= 0.001
points[n:2*n] += 2
T = cKDTree(points)
check_onetree_query(T, 0.1)
check_onetree_query(T, 0.001)
check_onetree_query(T, 0.00001)
check_onetree_query(T, 1e-6)
def test_query_pairs_single_node():
tree = KDTree([[0, 1]])
assert_equal(tree.query_pairs(0.5), set())
def test_query_pairs_single_node_compiled():
tree = cKDTree([[0, 1]])
assert_equal(tree.query_pairs(0.5), set())
def test_ckdtree_query_pairs():
np.random.seed(0)
n = 50
k = 2
r = 0.1
r2 = r**2
points = np.random.randn(n,k)
T = cKDTree(points)
# brute force reference
brute = set()
for i in range(n):
for j in range(i+1,n):
v = points[i,:] - points[j,:]
if np.dot(v,v) <= r2:
brute.add((i,j))
l0 = sorted(brute)
# test default return type
s = T.query_pairs(r)
l1 = sorted(s)
assert_array_equal(l0,l1)
# test return type 'set'
s = T.query_pairs(r, output_type='set')
l1 = sorted(s)
assert_array_equal(l0,l1)
# test return type 'ndarray'
s = set()
arr = T.query_pairs(r, output_type='ndarray')
for i in range(arr.shape[0]):
s.add((int(arr[i,0]),int(arr[i,1])))
l2 = sorted(s)
assert_array_equal(l0,l2)
def test_ball_point_ints():
# Regression test for #1373.
x, y = np.mgrid[0:4, 0:4]
points = list(zip(x.ravel(), y.ravel()))
tree = KDTree(points)
assert_equal(sorted([4, 8, 9, 12]),
sorted(tree.query_ball_point((2, 0), 1)))
points = np.asarray(points, dtype=float)
tree = KDTree(points)
assert_equal(sorted([4, 8, 9, 12]),
sorted(tree.query_ball_point((2, 0), 1)))
def test_kdtree_comparisons():
# Regression test: node comparisons were done wrong in 0.12 w/Py3.
nodes = [KDTree.node() for _ in range(3)]
assert_equal(sorted(nodes), sorted(nodes[::-1]))
def test_ckdtree_build_modes():
# check if different build modes for cKDTree give
# similar query results
np.random.seed(0)
n = 5000
k = 4
points = np.random.randn(n, k)
T1 = cKDTree(points).query(points, k=5)[-1]
T2 = cKDTree(points, compact_nodes=False).query(points, k=5)[-1]
T3 = cKDTree(points, balanced_tree=False).query(points, k=5)[-1]
T4 = cKDTree(points, compact_nodes=False, balanced_tree=False).query(points, k=5)[-1]
assert_array_equal(T1, T2)
assert_array_equal(T1, T3)
assert_array_equal(T1, T4)
def test_ckdtree_pickle():
# test if it is possible to pickle
# a cKDTree
try:
import cPickle as pickle
except ImportError:
import pickle
np.random.seed(0)
n = 50
k = 4
points = np.random.randn(n, k)
T1 = cKDTree(points)
tmp = pickle.dumps(T1)
T2 = pickle.loads(tmp)
T1 = T1.query(points, k=5)[-1]
T2 = T2.query(points, k=5)[-1]
assert_array_equal(T1, T2)
def test_ckdtree_pickle_boxsize():
# test if it is possible to pickle a periodic
# cKDTree
try:
import cPickle as pickle
except ImportError:
import pickle
np.random.seed(0)
n = 50
k = 4
points = np.random.uniform(size=(n, k))
T1 = cKDTree(points, boxsize=1.0)
tmp = pickle.dumps(T1)
T2 = pickle.loads(tmp)
T1 = T1.query(points, k=5)[-1]
T2 = T2.query(points, k=5)[-1]
assert_array_equal(T1, T2)
def test_ckdtree_copy_data():
# check if copy_data=True makes the kd-tree
# impervious to data corruption by modification of
# the data arrray
np.random.seed(0)
n = 5000
k = 4
points = np.random.randn(n, k)
T = cKDTree(points, copy_data=True)
q = points.copy()
T1 = T.query(q, k=5)[-1]
points[...] = np.random.randn(n, k)
T2 = T.query(q, k=5)[-1]
assert_array_equal(T1, T2)
def test_ckdtree_parallel():
# check if parallel=True also generates correct
# query results
np.random.seed(0)
n = 5000
k = 4
points = np.random.randn(n, k)
T = cKDTree(points)
T1 = T.query(points, k=5, n_jobs=64)[-1]
T2 = T.query(points, k=5, n_jobs=-1)[-1]
T3 = T.query(points, k=5)[-1]
assert_array_equal(T1, T2)
assert_array_equal(T1, T3)
def test_ckdtree_view():
# Check that the nodes can be correctly viewed from Python.
# This test also sanity checks each node in the cKDTree, and
# thus verifies the internal structure of the kd-tree.
np.random.seed(0)
n = 100
k = 4
points = np.random.randn(n, k)
kdtree = cKDTree(points)
# walk the whole kd-tree and sanity check each node
def recurse_tree(n):
assert_(isinstance(n, cKDTreeNode))
if n.split_dim == -1:
assert_(n.lesser is None)
assert_(n.greater is None)
assert_(n.indices.shape[0] <= kdtree.leafsize)
else:
recurse_tree(n.lesser)
recurse_tree(n.greater)
x = n.lesser.data_points[:, n.split_dim]
y = n.greater.data_points[:, n.split_dim]
assert_(x.max() < y.min())
recurse_tree(kdtree.tree)
# check that indices are correctly retrieved
n = kdtree.tree
assert_array_equal(np.sort(n.indices), range(100))
# check that data_points are correctly retrieved
assert_array_equal(kdtree.data[n.indices, :], n.data_points)
# cKDTree is specialized to type double points, so no need to make
# a unit test corresponding to test_ball_point_ints()
def test_ckdtree_list_k():
# check ckdtree periodic boundary
n = 200
m = 2
klist = [1, 2, 3]
kint = 3
np.random.seed(1234)
data = np.random.uniform(size=(n, m))
kdtree = cKDTree(data, leafsize=1)
# check agreement between arange(1,k+1) and k
dd, ii = kdtree.query(data, klist)
dd1, ii1 = kdtree.query(data, kint)
assert_equal(dd, dd1)
assert_equal(ii, ii1)
# now check skipping one element
klist = np.array([1, 3])
kint = 3
dd, ii = kdtree.query(data, kint)
dd1, ii1 = kdtree.query(data, klist)
assert_equal(dd1, dd[..., klist - 1])
assert_equal(ii1, ii[..., klist - 1])
# check k == 1 special case
# and k == [1] non-special case
dd, ii = kdtree.query(data, 1)
dd1, ii1 = kdtree.query(data, [1])
assert_equal(len(dd.shape), 1)
assert_equal(len(dd1.shape), 2)
assert_equal(dd, np.ravel(dd1))
assert_equal(ii, np.ravel(ii1))
def test_ckdtree_box():
# check ckdtree periodic boundary
n = 2000
m = 3
k = 3
np.random.seed(1234)
data = np.random.uniform(size=(n, m))
kdtree = cKDTree(data, leafsize=1, boxsize=1.0)
# use the standard python KDTree for the simulated periodic box
kdtree2 = cKDTree(data, leafsize=1)
for p in [1, 2, 3.0, np.inf]:
dd, ii = kdtree.query(data, k, p=p)
dd1, ii1 = kdtree.query(data + 1.0, k, p=p)
assert_almost_equal(dd, dd1)
assert_equal(ii, ii1)
dd1, ii1 = kdtree.query(data - 1.0, k, p=p)
assert_almost_equal(dd, dd1)
assert_equal(ii, ii1)
dd2, ii2 = simulate_periodic_box(kdtree2, data, k, boxsize=1.0, p=p)
assert_almost_equal(dd, dd2)
assert_equal(ii, ii2)
def test_ckdtree_box_0boxsize():
# check ckdtree periodic boundary that mimics non-periodic
n = 2000
m = 2
k = 3
np.random.seed(1234)
data = np.random.uniform(size=(n, m))
kdtree = cKDTree(data, leafsize=1, boxsize=0.0)
# use the standard python KDTree for the simulated periodic box
kdtree2 = cKDTree(data, leafsize=1)
for p in [1, 2, np.inf]:
dd, ii = kdtree.query(data, k, p=p)
dd1, ii1 = kdtree2.query(data, k, p=p)
assert_almost_equal(dd, dd1)
assert_equal(ii, ii1)
def test_ckdtree_box_upper_bounds():
data = np.linspace(0, 2, 10).reshape(-1, 2)
data[:, 1] += 10
assert_raises(ValueError, cKDTree, data, leafsize=1, boxsize=1.0)
assert_raises(ValueError, cKDTree, data, leafsize=1, boxsize=(0.0, 2.0))
# skip a dimension.
cKDTree(data, leafsize=1, boxsize=(2.0, 0.0))
def test_ckdtree_box_lower_bounds():
data = np.linspace(-1, 1, 10)
assert_raises(ValueError, cKDTree, data, leafsize=1, boxsize=1.0)
def simulate_periodic_box(kdtree, data, k, boxsize, p):
dd = []
ii = []
x = np.arange(3 ** data.shape[1])
nn = np.array(np.unravel_index(x, [3] * data.shape[1])).T
nn = nn - 1.0
for n in nn:
image = data + n * 1.0 * boxsize
dd2, ii2 = kdtree.query(image, k, p=p)
dd2 = dd2.reshape(-1, k)
ii2 = ii2.reshape(-1, k)
dd.append(dd2)
ii.append(ii2)
dd = np.concatenate(dd, axis=-1)
ii = np.concatenate(ii, axis=-1)
result = np.empty([len(data), len(nn) * k], dtype=[
('ii', 'i8'),
('dd', 'f8')])
result['ii'][:] = ii
result['dd'][:] = dd
result.sort(order='dd')
return result['dd'][:, :k], result['ii'][:,:k]
@pytest.mark.skipif(python_implementation() == 'PyPy',
reason="Fails on PyPy CI runs. See #9507")
def test_ckdtree_memuse():
# unit test adaptation of gh-5630
# NOTE: this will fail when run via valgrind,
# because rss is no longer a reliable memory usage indicator.
try:
import resource
except ImportError:
# resource is not available on Windows with Python 2.6
return
# Make some data
dx, dy = 0.05, 0.05
y, x = np.mgrid[slice(1, 5 + dy, dy),
slice(1, 5 + dx, dx)]
z = np.sin(x)**10 + np.cos(10 + y*x) * np.cos(x)
z_copy = np.empty_like(z)
z_copy[:] = z
# Place FILLVAL in z_copy at random number of random locations
FILLVAL = 99.
mask = np.random.randint(0, z.size, np.random.randint(50) + 5)
z_copy.flat[mask] = FILLVAL
igood = np.vstack(np.nonzero(x != FILLVAL)).T
ibad = np.vstack(np.nonzero(x == FILLVAL)).T
mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
# burn-in
for i in range(10):
tree = cKDTree(igood)
# count memleaks while constructing and querying cKDTree
num_leaks = 0
for i in range(100):
mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
tree = cKDTree(igood)
dist, iquery = tree.query(ibad, k=4, p=2)
new_mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if new_mem_use > mem_use:
num_leaks += 1
# ideally zero leaks, but errors might accidentally happen
# outside cKDTree
assert_(num_leaks < 10)
def test_ckdtree_weights():
data = np.linspace(0, 1, 4).reshape(-1, 1)
tree1 = cKDTree(data, leafsize=1)
weights = np.ones(len(data), dtype='f4')
nw = tree1._build_weights(weights)
assert_array_equal(nw, [4, 2, 1, 1, 2, 1, 1])
assert_raises(ValueError, tree1._build_weights, weights[:-1])
for i in range(10):
# since weights are uniform, these shall agree:
c1 = tree1.count_neighbors(tree1, np.linspace(0, 10, i))
c2 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
weights=(weights, weights))
c3 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
weights=(weights, None))
c4 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
weights=(None, weights))
c5 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
weights=weights)
assert_array_equal(c1, c2)
assert_array_equal(c1, c3)
assert_array_equal(c1, c4)
for i in range(len(data)):
# this tests removal of one data point by setting weight to 0
w1 = weights.copy()
w1[i] = 0
data2 = data[w1 != 0]
w2 = weights[w1 != 0]
tree2 = cKDTree(data2)
c1 = tree1.count_neighbors(tree1, np.linspace(0, 10, 100),
weights=(w1, w1))
# "c2 is correct"
c2 = tree2.count_neighbors(tree2, np.linspace(0, 10, 100))
assert_array_equal(c1, c2)
#this asserts for two different trees, singular weights
# crashes
assert_raises(ValueError, tree1.count_neighbors,
tree2, np.linspace(0, 10, 100), weights=w1)
def test_ckdtree_count_neighbous_multiple_r():
n = 2000
m = 2
np.random.seed(1234)
data = np.random.normal(size=(n, m))
kdtree = cKDTree(data, leafsize=1)
r0 = [0, 0.01, 0.01, 0.02, 0.05]
i0 = np.arange(len(r0))
n0 = kdtree.count_neighbors(kdtree, r0)
nnc = kdtree.count_neighbors(kdtree, r0, cumulative=False)
assert_equal(n0, nnc.cumsum())
for i, r in zip(itertools.permutations(i0),
itertools.permutations(r0)):
# permute n0 by i and it shall agree
n = kdtree.count_neighbors(kdtree, r)
assert_array_equal(n, n0[list(i)])
def test_len0_arrays():
# make sure len-0 arrays are handled correctly
# in range queries (gh-5639)
np.random.seed(1234)
X = np.random.rand(10,2)
Y = np.random.rand(10,2)
tree = cKDTree(X)
# query_ball_point (single)
d,i = tree.query([.5, .5], k=1)
z = tree.query_ball_point([.5, .5], 0.1*d)
assert_array_equal(z, [])
# query_ball_point (multiple)
d,i = tree.query(Y, k=1)
mind = d.min()
z = tree.query_ball_point(Y, 0.1*mind)
y = np.empty(shape=(10,), dtype=object)
y.fill([])
assert_array_equal(y, z)
# query_ball_tree
other = cKDTree(Y)
y = tree.query_ball_tree(other, 0.1*mind)
assert_array_equal(10*[[]], y)
# count_neighbors
y = tree.count_neighbors(other, 0.1*mind)
assert_(y == 0)
# sparse_distance_matrix
y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='dok_matrix')
assert_array_equal(y == np.zeros((10,10)), True)
y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='coo_matrix')
assert_array_equal(y == np.zeros((10,10)), True)
y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='dict')
assert_equal(y, {})
y = tree.sparse_distance_matrix(other,0.1*mind, output_type='ndarray')
_dtype = [('i',np.intp), ('j',np.intp), ('v',np.float64)]
res_dtype = np.dtype(_dtype, align=True)
z = np.empty(shape=(0,), dtype=res_dtype)
assert_array_equal(y, z)
# query_pairs
d,i = tree.query(X, k=2)
mind = d[:,-1].min()
y = tree.query_pairs(0.1*mind, output_type='set')
assert_equal(y, set())
y = tree.query_pairs(0.1*mind, output_type='ndarray')
z = np.empty(shape=(0,2), dtype=np.intp)
assert_array_equal(y, z)
def test_ckdtree_duplicated_inputs():
# check ckdtree with duplicated inputs
n = 1024
for m in range(1, 8):
data = np.concatenate([
np.ones((n // 2, m)) * 1,
np.ones((n // 2, m)) * 2], axis=0)
# it shall not divide more than 3 nodes.
# root left (1), and right (2)
kdtree = cKDTree(data, leafsize=1)
assert_equal(kdtree.size, 3)
kdtree = cKDTree(data)
assert_equal(kdtree.size, 3)
# if compact_nodes are disabled, the number
# of nodes is n (per leaf) + (m - 1)* 2 (splits per dimension) + 1
# and the root
kdtree = cKDTree(data, compact_nodes=False, leafsize=1)
assert_equal(kdtree.size, n + m * 2 - 1)
def test_ckdtree_noncumulative_nondecreasing():
# check ckdtree with duplicated inputs
# it shall not divide more than 3 nodes.
# root left (1), and right (2)
kdtree = cKDTree([[0]], leafsize=1)
assert_raises(ValueError, kdtree.count_neighbors,
kdtree, [0.1, 0], cumulative=False)
def test_short_knn():
# The test case is based on github: #6425 by @SteveDoyle2
xyz = np.array([
[0., 0., 0.],
[1.01, 0., 0.],
[0., 1., 0.],
[0., 1.01, 0.],
[1., 0., 0.],
[1., 1., 0.],],
dtype='float64')
ckdt = cKDTree(xyz)
deq, ieq = ckdt.query(xyz, k=4, distance_upper_bound=0.2)
assert_array_almost_equal(deq,
[[0., np.inf, np.inf, np.inf],
[0., 0.01, np.inf, np.inf],
[0., 0.01, np.inf, np.inf],
[0., 0.01, np.inf, np.inf],
[0., 0.01, np.inf, np.inf],
[0., np.inf, np.inf, np.inf]])
class Test_sorted_query_ball_point(object):
def setup_method(self):
np.random.seed(1234)
self.x = np.random.randn(100, 1)
self.ckdt = cKDTree(self.x)
def test_return_sorted_True(self):
idxs_list = self.ckdt.query_ball_point(self.x, 1., return_sorted=True)
for idxs in idxs_list:
assert_array_equal(idxs, sorted(idxs))
def test_return_sorted_None(self):
"""Previous behavior was to sort the returned indices if there were
multiple points per query but not sort them if there was a single point
per query."""
idxs_list = self.ckdt.query_ball_point(self.x, 1.)
for idxs in idxs_list:
assert_array_equal(idxs, sorted(idxs))
idxs_list_single = [self.ckdt.query_ball_point(xi, 1.) for xi in self.x]
idxs_list_False = self.ckdt.query_ball_point(self.x, 1., return_sorted=False)
for idxs0, idxs1 in zip(idxs_list_False, idxs_list_single):
assert_array_equal(idxs0, idxs1)
|
Eric89GXL/scipy
|
scipy/spatial/tests/test_kdtree.py
|
Python
|
bsd-3-clause
| 42,617
|
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
"""Locking primitives"""
from gevent.hub import getcurrent
from gevent._semaphore import Semaphore
__all__ = ['Semaphore', 'DummySemaphore', 'BoundedSemaphore', 'RLock']
class DummySemaphore(object):
# XXX what is this used for?
"""A Semaphore initialized with "infinite" initial value. Neither of its methods ever block."""
def __str__(self):
return '<%s>' % self.__class__.__name__
def locked(self):
return False
def release(self):
pass
def rawlink(self, callback):
# XXX should still work and notify?
pass
def unlink(self, callback):
pass
def wait(self, timeout=None):
pass
def acquire(self, blocking=True, timeout=None):
pass
def __enter__(self):
pass
def __exit__(self, typ, val, tb):
pass
class BoundedSemaphore(Semaphore):
"""A bounded semaphore checks to make sure its current value doesn't exceed its initial value.
If it does, ``ValueError`` is raised. In most situations semaphores are used to guard resources
with limited capacity. If the semaphore is released too many times it's a sign of a bug.
If not given, *value* defaults to 1."""
def __init__(self, value=1):
Semaphore.__init__(self, value)
self._initial_value = value
def release(self):
if self.counter >= self._initial_value:
raise ValueError("Semaphore released too many times")
return Semaphore.release(self)
class RLock(object):
def __init__(self):
self._block = Semaphore(1)
self._owner = None
self._count = 0
def __repr__(self):
return "<%s at 0x%x _block=%s _count=%r _owner=%r)>" % (
self.__class__.__name__,
id(self),
self._block,
self._count,
self._owner)
def acquire(self, blocking=1):
me = getcurrent()
if self._owner is me:
self._count = self._count + 1
return 1
rc = self._block.acquire(blocking)
if rc:
self._owner = me
self._count = 1
return rc
def __enter__(self):
return self.acquire()
def release(self):
if self._owner is not getcurrent():
raise RuntimeError("cannot release un-aquired lock")
self._count = count = self._count - 1
if not count:
self._owner = None
self._block.release()
def __exit__(self, typ, value, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, count_owner):
count, owner = count_owner
self._block.acquire()
self._count = count
self._owner = owner
def _release_save(self):
count = self._count
self._count = 0
owner = self._owner
self._owner = None
self._block.release()
return (count, owner)
def _is_owned(self):
return self._owner is getcurrent()
|
ubuntuvim/GoAgent
|
local/gevent-1.0rc2/gevent/lock.py
|
Python
|
mit
| 3,077
|
#
# Transmission Line Simulator
#
# Author(s): Jiacong Xu
# Created: Jun-27-2017
#
|
flyingbanana1024102/transmission-line-simulator
|
src/views/__init__.py
|
Python
|
mit
| 83
|
from allauth.account.models import EmailAddress
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class KakaoAccount(ProviderAccount):
@property
def properties(self):
return self.account.extra_data['properties']
def get_avatar_url(self):
return self.properties['profile_image']
def to_str(self):
dflt = super(KakaoAccount, self).to_str()
return self.properties['nickname'] or dflt
class KakaoProvider(OAuth2Provider):
id = 'kakao'
name = 'Kakao'
account_class = KakaoAccount
def extract_uid(self, data):
return str(data['id'])
def extract_common_fields(self, data):
email = data.get("kaccount_email")
return dict(email=email)
def extract_email_addresses(self, data):
ret = []
email = data.get("kaccount_email")
verified = data.get("kaccount_email_verified")
# data["kaccount_email_verified"] imply the email address is
# verified
ret.append(EmailAddress(email=email,
verified=verified,
primary=True))
return ret
provider_classes = [KakaoProvider]
|
joshowen/django-allauth
|
allauth/socialaccount/providers/kakao/provider.py
|
Python
|
mit
| 1,264
|
#! /usr/env/python
"""
Python implementation of RadialModelGrid, a grid class used to create and
manage structured Voronoi-Delaunay grids for 2D numerical models.
Do NOT add new documentation here. Grid documentation is now built in a semi-
automated fashion. To modify the text seen on the web, edit the files
`docs/text_for_[gridfile].py.txt`.
"""
import numpy
from six.moves import range
from .voronoi import VoronoiDelaunayGrid
from landlab.utils.decorators import deprecated
class RadialModelGrid(VoronoiDelaunayGrid):
"""Grid of concentric circles.
This inherited class implements a circular grid in which grid nodes are
placed at regular radial and semi-regular arc-wise intervals. That is,
if the radial spacing between *shells* is *dr*, the nodes are placed around
the circular shell at regular intervals that get as close as possible to
*dr*. The points are then arranged in a Delaunay triangulation with Voronoi
cells. Within each ring, nodes are numbered according to Landlab
convention, from the first node counterclockwise of east. Numbering
begins at the centermost node and works outwards through the rings.
Parameters
----------
num_shells : int
Number of rings in the grid.
dr : float, optional
Radial interval for rings.
origin_x : float, optional
x-coordinate of origin node.
origin_y : float, optional
y-coordinate of origin node.
Returns
-------
RadialModelGrid
A newly-created grid.
Examples
--------
A grid with just one ring will have a node at the origin surrounded
by six other nodes.
>>> from landlab import RadialModelGrid
>>> omg = RadialModelGrid(num_shells=1, dr=1., origin_x=0., origin_y=0.)
>>> omg.number_of_nodes
7
>>> omg.number_of_cells
1
A second rings will have 13 nodes.
>>> omg = RadialModelGrid(2)
>>> omg.number_of_nodes
20
"""
def __init__(self, num_shells=0, dr=1.0, origin_x=0.0, origin_y=0.0,
**kwds):
"""Create a circular grid.
Create a circular grid in which grid nodes are placed at regular
radial and semi-regular arc-wise intervals. That is, if the radial
spacing between *shells* is *dr*, the nodes are placed around the
circular shell at regular intervals that get as close as possible to
*dr*. The points are then arranged in a Delaunay triangulation with
Voronoi cells.
Parameters
----------
num_shells : int
Number of rings in the grid.
dr : float, optional
Radial interval for rings.
origin_x : float, optional
x-coordinate of origin node.
origin_y : float, optional
y-coordinate of origin node.
Returns
-------
RadialModelGrid
A newly-created grid.
Examples
--------
A grid with just one ring will have a node at the origin surrounded
by six other nodes.
>>> from landlab import RadialModelGrid
>>> omg = RadialModelGrid(num_shells=1, dr=1., origin_x=0.,
... origin_y=0.)
>>> omg.number_of_nodes
7
>>> omg.number_of_cells
1
A second rings will have 13 nodes.
>>> omg = RadialModelGrid(2)
>>> omg.number_of_nodes
20
"""
# Set number of nodes, and initialize if caller has given dimensions
self._origin_x = origin_x
self._origin_y = origin_y
if num_shells > 0:
self._initialize(num_shells, dr, origin_x, origin_y)
super(RadialModelGrid, self).__init__(**kwds)
@classmethod
def from_dict(cls, params):
"""
LLCATS: GINF
"""
num_shells = params['num_shells']
dr = params.get('dr', 1.)
origin = params.get('origin', (0., 0.))
return cls(num_shells=num_shells, dr=dr, origin_x=origin[0],
origin_y=origin[1])
def _initialize(self, num_shells, dr, origin_x=0.0, origin_y=0.0):
[pts, npts] = self._create_radial_points(num_shells, dr)
self._n_shells = int(num_shells)
self._dr = dr
super(RadialModelGrid, self)._initialize(pts[:, 0], pts[:, 1])
def _create_radial_points(self, num_shells, dr, origin_x=0.0,
origin_y=0.0):
"""Create a set of points on concentric circles.
Creates and returns a set of (x,y) points placed in a series of
concentric circles around the origin.
"""
shells = numpy.arange(0, num_shells) + 1
twopi = 2 * numpy.pi
# number of points in each shell
n_pts_in_shell = numpy.round(twopi * shells)
dtheta = twopi / n_pts_in_shell
npts = int(sum(n_pts_in_shell) + 1)
pts = numpy.zeros((npts, 2))
r = shells * dr
startpt = 1
for i in numpy.arange(0, num_shells):
theta = (dtheta[i] * numpy.arange(0, n_pts_in_shell[i]) +
dtheta[i] / (i + 1))
ycoord = r[i] * numpy.sin(theta)
if numpy.isclose(ycoord[-1], 0.):
# this modification necessary to force the first ring to
# follow our new CCW from E numbering convention (DEJH, Nov15)
ycoord[-1] = 0.
pts[startpt:(startpt + int(n_pts_in_shell[i])),
0] = numpy.roll(r[i] * numpy.cos(theta), 1)
pts[startpt:(startpt + int(n_pts_in_shell[i])),
1] = numpy.roll(ycoord, 1)
else:
pts[startpt:(startpt + int(n_pts_in_shell[i])),
0] = r[i] * numpy.cos(theta)
pts[startpt:(startpt + int(n_pts_in_shell[i])),
1] = ycoord
startpt += int(n_pts_in_shell[i])
pts[:, 0] += origin_x
pts[:, 1] += origin_y
return pts, npts
@property
def number_of_shells(self):
"""Number of node shells in grid.
Returns
-------
int
The number of node shells in the radial grid (not counting the
center node).
LLCATS: GINF
"""
return self._n_shells
@property
@deprecated(use='spacing_of_shells', version=1.0)
def shell_spacing(self):
"""Fixed distance between shells.
LLCATS: DEPR GINF MEAS
"""
return self._dr
@property
def spacing_of_shells(self):
"""Fixed distance between shells.
LLCATS: GINF MEAS
"""
return self._dr
@property
def number_of_nodes_in_shell(self):
"""Number of nodes in each shell.
Returns
-------
int
Number of nodes in each shell, excluding the center node.
LLCATS: GINF NINF
"""
try:
return self._nnodes_inshell
except AttributeError:
n_pts_in_shell = numpy.round(2. * numpy.pi * (
numpy.arange(self.number_of_shells, dtype=float) + 1.))
self._nnodes_inshell = n_pts_in_shell.astype(int)
return self._nnodes_inshell
@property
def radius_at_node(self):
"""Distance for center node to each node.
Returns
-------
ndarray of float
The distance from the center node of each node.
>>> mg = RadialModelGrid(num_shells=2)
>>> mg.radius_at_node
array([ 2., 2., 2., 2., 2., 1., 1., 2., 0., 1., 1., 2., 2.,
1., 1., 2., 2., 2., 2., 2.])
LLCATS: NINF MEAS
"""
try:
return self._node_radii
except AttributeError:
self._node_radii = numpy.sqrt(numpy.square(self.node_x -
self._origin_x) +
numpy.square(self.node_y -
self._origin_y))
return self._node_radii
|
csherwood-usgs/landlab
|
landlab/grid/radial.py
|
Python
|
mit
| 8,078
|
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import logging
import datetime
from io import BytesIO
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .exceptions import HTTPError, RequestException, MissingSchema, InvalidURL
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len)
from .compat import (
cookielib, urlparse, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
log = logging.getLogger(__name__)
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but abritrary
if parameters are supplied as a dict.
"""
if (not files) or isinstance(data, str):
return None
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
else:
fn, fp, ft = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, str):
fp = StringIO(fp)
if isinstance(fp, bytes):
fp = BytesIO(fp)
if ft:
new_v = (fn, fp.read(), ft)
else:
new_v = (fn, fp.read())
new_fields.append((k, new_v))
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach the request. If a dictionary is provided, form-encoding will take place.
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=dict(),
params=dict(),
auth=None,
cookies=None,
hooks=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.params = params
self.auth = auth
self.cookies = cookies
self.hooks = hooks
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare_method(self.method)
p.prepare_url(self.url, self.params)
p.prepare_headers(self.headers)
p.prepare_cookies(self.cookies)
p.prepare_body(self.data, self.files)
p.prepare_auth(self.auth, self.url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
p.prepare_hooks(self.hooks)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
try:
url = unicode(url)
except NameError:
# We're on Python 3.
url = str(url)
except UnicodeDecodeError:
pass
# Support for unicode domain names and paths.
scheme, auth, host, port, path, query, fragment = parse_url(url)
if not scheme:
raise MissingSchema("Invalid URL %r: No schema supplied" % url)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
headers = dict((name.encode('ascii'), value) for name, value in headers.items())
self.headers = CaseInsensitiveDict(headers)
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
is_stream = False
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, basestring),
not isinstance(data, list),
not isinstance(data, dict)
])
try:
length = super_len(data)
except (TypeError, AttributeError):
length = False
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
# Check if file, fo, generator, iterator.
# If not, run through normal process.
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, str) or isinstance(data, builtin_str) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if (content_type) and (not 'content-type' in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body, force=False):
# Skip this if it's already set, you crazy crazy person.
if 'Content-Length' in self.headers:
if not force:
return
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = str(l)
elif self.method not in ('GET', 'HEAD'):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body, force=True)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
cookies = cookies
else:
cookies = cookiejar_from_dict(cookies)
if 'cookie' not in self.headers:
cookie_header = get_cookie_header(cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Requires that ``stream=True` on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except RequestException:
return False
return True
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the lovely Charade library
(Thanks, Ian!)."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
"""
if self._content_consumed:
# simulate reading small chunks of the content
return iter_slices(self._content, chunk_size)
def generate():
while 1:
chunk = self.raw.read(chunk_size, decode_content=True)
if not chunk:
break
yield chunk
self._content_consumed = True
gen = generate()
if decode_unicode:
gen = stream_decode_response_unicode(gen, self)
return gen
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size,
decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
if Response.encoding is None and chardet module is available, encoding
will be guessed.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
return json.loads(self.content.decode(encoding), **kwargs)
return json.loads(self.text or self.content, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
return self.raw.release_conn()
|
lucashmorais/x-Bench
|
mozmill-env/python/Lib/site-packages/requests/models.py
|
Python
|
mit
| 22,091
|
##########################################################################
# Copyright (c) 2009, 2013, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
##########################################################################
import getpass
import siteconfig
LOADGEN_HOSTS = []
class UW(siteconfig.BaseSite):
# site-specific configuration variables for UW
WEBSERVER_NFS_HOST = 'triangle'
# NFS_SERVER_HOST = 'tomme1.in.barrelfish.org'
WEBSERVER_NFS_PATH = '/home/netos/notexist'
WEBSERVER_LOCAL_PATH = WEBSERVER_NFS_PATH
HTTPERF_PATH = 'httperf.notexist'
HTTPERF_MAXCLIENTS = len(LOADGEN_HOSTS * 2) # max number of load generators
IPBENCH_PATH = 'ipbench.notexist'
IPBENCHD_PATH = 'ipbenchd.notexist'
SSH_ARGS='-x -o StrictHostKeyChecking=no -o ControlPath=none'
def __init__(self):
self._loadgen_hosts = LOADGEN_HOSTS
def get_load_generator(self):
# take the first host, but put it on the back in case we
# need more clients than available hosts (ie. rotate the list)
host = self._loadgen_hosts.pop(0)
self._loadgen_hosts.append(host)
return getpass.getuser(), host
siteconfig.site = UW()
# also cause the UW machines to be loaded/initialised
import machines.uw
|
utsav2601/cmpe295A
|
tools/harness/siteconfig/uw.py
|
Python
|
mit
| 1,471
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Will Bond <will@wbond.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from . import platform
try:
# Python 2
str_cls = unicode
bytes_cls = str
except NameError:
# Python 3
str_cls = str
bytes_cls = bytes
# This is used by Linux when the locale seems to be improperly set. UTF-8 tends
# to be the encoding used by all distros, so this is a good fallback.
fs_fallback_encoding = 'utf-8'
fs_encoding = sys.getfilesystemencoding() or fs_fallback_encoding
def encode(path):
if isinstance(path, str_cls):
try:
path = path.encode(fs_encoding, 'strict')
except UnicodeEncodeError:
if not platform.is_linux():
raise
path = path.encode(fs_fallback_encoding, 'strict')
return path
def decode(path):
if isinstance(path, bytes_cls):
try:
path = path.decode(fs_encoding, 'strict')
except UnicodeDecodeError:
if not platform.is_linux():
raise
path = path.decode(fs_fallback_encoding, 'strict')
return path
|
croneter/PlexKodiConnect
|
resources/lib/watchdog/utils/unicode_paths.py
|
Python
|
gpl-2.0
| 2,171
|
##
# Copyright 2012-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Various test utility functions.
@author: Kenneth Hoste (Ghent University)
"""
import copy
import fileinput
import os
import re
import shutil
import sys
import tempfile
from vsc.utils import fancylogger
from vsc.utils.patterns import Singleton
from vsc.utils.testing import EnhancedTestCase as _EnhancedTestCase
import easybuild.tools.build_log as eb_build_log
import easybuild.tools.options as eboptions
import easybuild.tools.toolchain.utilities as tc_utils
import easybuild.tools.module_naming_scheme.toolchain as mns_toolchain
from easybuild.framework.easyconfig import easyconfig
from easybuild.framework.easyblock import EasyBlock
from easybuild.main import main
from easybuild.tools import config
from easybuild.tools.config import module_classes
from easybuild.tools.configobj import ConfigObj
from easybuild.tools.environment import modify_env
from easybuild.tools.filetools import mkdir, read_file
from easybuild.tools.module_naming_scheme import GENERAL_CLASS
from easybuild.tools.modules import modules_tool
from easybuild.tools.options import CONFIG_ENV_VAR_PREFIX, EasyBuildOptions, set_tmpdir
# make sure tests are robust against any non-default configuration settings;
# involves ignoring any existing configuration files that are picked up, and cleaning the environment
# this is tackled here rather than in suite.py, to make sure this is also done when test modules are ran separately
# clean up environment from unwanted $EASYBUILD_X env vars
for key in os.environ.keys():
if key.startswith('%s_' % CONFIG_ENV_VAR_PREFIX):
del os.environ[key]
# ignore any existing configuration files
go = EasyBuildOptions(go_useconfigfiles=False)
os.environ['EASYBUILD_IGNORECONFIGFILES'] = ','.join(go.options.configfiles)
# redefine $TEST_EASYBUILD_X env vars as $EASYBUILD_X
test_env_var_prefix = 'TEST_EASYBUILD_'
for key in os.environ.keys():
if key.startswith(test_env_var_prefix):
val = os.environ[key]
del os.environ[key]
newkey = '%s_%s' % (CONFIG_ENV_VAR_PREFIX, key[len(test_env_var_prefix):])
os.environ[newkey] = val
class EnhancedTestCase(_EnhancedTestCase):
"""Enhanced test case, provides extra functionality (e.g. an assertErrorRegex method)."""
def setUp(self):
"""Set up testcase."""
super(EnhancedTestCase, self).setUp()
# keep track of log handlers
log = fancylogger.getLogger(fname=False)
self.orig_log_handlers = log.handlers[:]
log.info("setting up test %s" % self.id())
self.orig_tmpdir = tempfile.gettempdir()
# use a subdirectory for this test (which we can clean up easily after the test completes)
self.test_prefix = set_tmpdir()
self.log = fancylogger.getLogger(self.__class__.__name__, fname=False)
fd, self.logfile = tempfile.mkstemp(suffix='.log', prefix='eb-test-')
os.close(fd)
self.cwd = os.getcwd()
# keep track of original environment to restore
self.orig_environ = copy.deepcopy(os.environ)
# keep track of original environment/Python search path to restore
self.orig_sys_path = sys.path[:]
testdir = os.path.dirname(os.path.abspath(__file__))
self.test_sourcepath = os.path.join(testdir, 'sandbox', 'sources')
os.environ['EASYBUILD_SOURCEPATH'] = self.test_sourcepath
os.environ['EASYBUILD_PREFIX'] = self.test_prefix
self.test_buildpath = tempfile.mkdtemp()
os.environ['EASYBUILD_BUILDPATH'] = self.test_buildpath
self.test_installpath = tempfile.mkdtemp()
os.environ['EASYBUILD_INSTALLPATH'] = self.test_installpath
# make sure that the tests only pick up easyconfigs provided with the tests
os.environ['EASYBUILD_ROBOT_PATHS'] = os.path.join(testdir, 'easyconfigs')
# make sure no deprecated behaviour is being triggered (unless intended by the test)
# trip *all* log.deprecated statements by setting deprecation version ridiculously high
self.orig_current_version = eb_build_log.CURRENT_VERSION
os.environ['EASYBUILD_DEPRECATED'] = '10000000'
init_config()
# remove any entries in Python search path that seem to provide easyblocks
for path in sys.path[:]:
if os.path.exists(os.path.join(path, 'easybuild', 'easyblocks', '__init__.py')):
sys.path.remove(path)
# add test easyblocks to Python search path and (re)import and reload easybuild modules
import easybuild
sys.path.append(os.path.join(testdir, 'sandbox'))
reload(easybuild)
import easybuild.easyblocks
reload(easybuild.easyblocks)
import easybuild.easyblocks.generic
reload(easybuild.easyblocks.generic)
reload(easybuild.tools.module_naming_scheme) # required to run options unit tests stand-alone
modtool = modules_tool()
# purge out any loaded modules with original $MODULEPATH before running each test
modtool.purge()
self.reset_modulepath([os.path.join(testdir, 'modules')])
def tearDown(self):
"""Clean up after running testcase."""
super(EnhancedTestCase, self).tearDown()
self.log.info("Cleaning up for test %s", self.id())
# go back to where we were before
os.chdir(self.cwd)
# restore original environment
modify_env(os.environ, self.orig_environ, verbose=False)
# restore original Python search path
sys.path = self.orig_sys_path
# remove any log handlers that were added (so that log files can be effectively removed)
log = fancylogger.getLogger(fname=False)
new_log_handlers = [h for h in log.handlers if h not in self.orig_log_handlers]
for log_handler in new_log_handlers:
log_handler.close()
log.removeHandler(log_handler)
# cleanup test tmp dir
try:
shutil.rmtree(self.test_prefix)
except (OSError, IOError):
pass
# restore original 'parent' tmpdir
for var in ['TMPDIR', 'TEMP', 'TMP']:
os.environ[var] = self.orig_tmpdir
# reset to make sure tempfile picks up new temporary directory to use
tempfile.tempdir = None
def reset_modulepath(self, modpaths):
"""Reset $MODULEPATH with specified paths."""
modtool = modules_tool()
for modpath in os.environ.get('MODULEPATH', '').split(os.pathsep):
modtool.remove_module_path(modpath)
# make very sure $MODULEPATH is totally empty
# some paths may be left behind, e.g. when they contain environment variables
# example: "module unuse Modules/$MODULE_VERSION/modulefiles" may not yield the desired result
os.environ['MODULEPATH'] = ''
for modpath in modpaths:
modtool.add_module_path(modpath)
def eb_main(self, args, do_build=False, return_error=False, logfile=None, verbose=False, raise_error=False,
reset_env=True, raise_systemexit=False, testing=True):
"""Helper method to call EasyBuild main function."""
cleanup()
myerr = False
if logfile is None:
logfile = self.logfile
# clear log file
if logfile:
f = open(logfile, 'w')
f.write('')
f.close()
env_before = copy.deepcopy(os.environ)
try:
main(args=args, logfile=logfile, do_build=do_build, testing=testing)
except SystemExit:
if raise_systemexit:
raise err
except Exception, err:
myerr = err
if verbose:
print "err: %s" % err
if logfile and os.path.exists(logfile):
logtxt = read_file(logfile)
else:
logtxt = None
os.chdir(self.cwd)
# make sure config is reinitialized
init_config()
# restore environment to what it was before running main,
# changes may have been made by eb_main (e.g. $TMPDIR & co)
if reset_env:
modify_env(os.environ, env_before)
tempfile.tempdir = None
if myerr and raise_error:
raise myerr
if return_error:
return logtxt, myerr
else:
return logtxt
def setup_hierarchical_modules(self):
"""Setup hierarchical modules to run tests on."""
mod_prefix = os.path.join(self.test_installpath, 'modules', 'all')
# simply copy module files under 'Core' and 'Compiler' to test install path
# EasyBuild is responsible for making sure that the toolchain can be loaded using the short module name
mkdir(mod_prefix, parents=True)
for mod_subdir in ['Core', 'Compiler', 'MPI']:
src_mod_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'modules', mod_subdir)
shutil.copytree(src_mod_path, os.path.join(mod_prefix, mod_subdir))
# make sure only modules in a hierarchical scheme are available, mixing modules installed with
# a flat scheme like EasyBuildMNS and a hierarhical one like HierarchicalMNS doesn't work
self.reset_modulepath([mod_prefix, os.path.join(mod_prefix, 'Core')])
# tweak use statements in modules to ensure correct paths
mpi_pref = os.path.join(mod_prefix, 'MPI', 'GCC', '4.7.2', 'OpenMPI', '1.6.4')
for modfile in [
os.path.join(mod_prefix, 'Core', 'GCC', '4.7.2'),
os.path.join(mod_prefix, 'Core', 'GCC', '4.8.3'),
os.path.join(mod_prefix, 'Core', 'icc', '2013.5.192-GCC-4.8.3'),
os.path.join(mod_prefix, 'Core', 'ifort', '2013.5.192-GCC-4.8.3'),
os.path.join(mod_prefix, 'Compiler', 'GCC', '4.7.2', 'OpenMPI', '1.6.4'),
os.path.join(mod_prefix, 'Compiler', 'intel', '2013.5.192-GCC-4.8.3', 'impi', '4.1.3.049'),
os.path.join(mpi_pref, 'FFTW', '3.3.3'),
os.path.join(mpi_pref, 'OpenBLAS', '0.2.6-LAPACK-3.4.2'),
os.path.join(mpi_pref, 'ScaLAPACK', '2.0.2-OpenBLAS-0.2.6-LAPACK-3.4.2'),
]:
for line in fileinput.input(modfile, inplace=1):
line = re.sub(r"(module\s*use\s*)/tmp/modules/all",
r"\1%s/modules/all" % self.test_installpath,
line)
sys.stdout.write(line)
def setup_categorized_hmns_modules(self):
"""Setup categorized hierarchical modules to run tests on."""
mod_prefix = os.path.join(self.test_installpath, 'modules', 'all')
# simply copy module files under 'CategorizedHMNS/{Core,Compiler,MPI}' to test install path
# EasyBuild is responsible for making sure that the toolchain can be loaded using the short module name
mkdir(mod_prefix, parents=True)
for mod_subdir in ['Core', 'Compiler', 'MPI']:
src_mod_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'modules', 'CategorizedHMNS', mod_subdir)
shutil.copytree(src_mod_path, os.path.join(mod_prefix, mod_subdir))
# create empty module file directory to make C/Tcl modules happy
mpi_pref = os.path.join(mod_prefix, 'MPI', 'GCC', '4.7.2', 'OpenMPI', '1.6.4')
mkdir(os.path.join(mpi_pref, 'base'))
# make sure only modules in the CategorizedHMNS are available
self.reset_modulepath([os.path.join(mod_prefix, 'Core', 'compiler'),
os.path.join(mod_prefix, 'Core', 'toolchain')])
# tweak use statements in modules to ensure correct paths
for modfile in [
os.path.join(mod_prefix, 'Core', 'compiler', 'GCC', '4.7.2'),
os.path.join(mod_prefix, 'Compiler', 'GCC', '4.7.2', 'mpi', 'OpenMPI', '1.6.4'),
]:
for line in fileinput.input(modfile, inplace=1):
line = re.sub(r"(module\s*use\s*)/tmp/modules/all",
r"\1%s/modules/all" % self.test_installpath,
line)
sys.stdout.write(line)
def cleanup():
"""Perform cleanup of singletons and caches."""
# clear Singelton instances, to start afresh
Singleton._instances.clear()
# empty caches
tc_utils._initial_toolchain_instances.clear()
easyconfig._easyconfigs_cache.clear()
easyconfig._easyconfig_files_cache.clear()
mns_toolchain._toolchain_details_cache.clear()
# reset to make sure tempfile picks up new temporary directory to use
tempfile.tempdir = None
def init_config(args=None, build_options=None):
"""(re)initialize configuration"""
cleanup()
# initialize configuration so config.get_modules_tool function works
eb_go = eboptions.parse_options(args=args)
config.init(eb_go.options, eb_go.get_options_by_section('config'))
# initialize build options
if build_options is None:
build_options = {
'extended_dry_run': False,
'external_modules_metadata': ConfigObj(),
'valid_module_classes': module_classes(),
'valid_stops': [x[0] for x in EasyBlock.get_steps()],
}
if 'suffix_modules_path' not in build_options:
build_options.update({'suffix_modules_path': GENERAL_CLASS})
config.init_build_options(build_options=build_options)
return eb_go.options
def find_full_path(base_path, trim=(lambda x: x)):
"""
Determine full path for given base path by looking in sys.path and PYTHONPATH.
trim: a function that takes a path and returns a trimmed version of that path
"""
full_path = None
pythonpath = os.getenv('PYTHONPATH')
if pythonpath:
pythonpath = pythonpath.split(':')
else:
pythonpath = []
for path in sys.path + pythonpath:
tmp_path = os.path.join(trim(path), base_path)
if os.path.exists(tmp_path):
full_path = tmp_path
break
return full_path
|
nesi/easybuild-framework
|
test/framework/utilities.py
|
Python
|
gpl-2.0
| 15,056
|
#! /usr/bin/env python
"""Token constants (from "token.h")."""
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# python Lib/token.py
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
BACKQUOTE = 25
LBRACE = 26
RBRACE = 27
EQEQUAL = 28
NOTEQUAL = 29
LESSEQUAL = 30
GREATEREQUAL = 31
TILDE = 32
CIRCUMFLEX = 33
LEFTSHIFT = 34
RIGHTSHIFT = 35
DOUBLESTAR = 36
PLUSEQUAL = 37
MINEQUAL = 38
STAREQUAL = 39
SLASHEQUAL = 40
PERCENTEQUAL = 41
AMPEREQUAL = 42
VBAREQUAL = 43
CIRCUMFLEXEQUAL = 44
LEFTSHIFTEQUAL = 45
RIGHTSHIFTEQUAL = 46
DOUBLESTAREQUAL = 47
OP = 48
ERRORTOKEN = 49
N_TOKENS = 50
NT_OFFSET = 256
#--end constants--
tok_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
tok_name[_value] = _name
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
def main():
import re
import string
import sys
args = sys.argv[1:]
inFileName = args and args[0] or "Include/token.h"
outFileName = "Lib/token.py"
if len(args) > 1:
outFileName = args[1]
try:
fp = open(inFileName)
except IOError, err:
sys.stdout.write("I/O error: %s\n" % str(err))
sys.exit(1)
lines = string.splitfields(fp.read(), "\n")
fp.close()
prog = re.compile(
"#define[ \t][ \t]*([A-Z][A-Z_]*)[ \t][ \t]*([0-9][0-9]*)",
re.IGNORECASE)
tokens = {}
for line in lines:
match = prog.match(line)
if match:
name, val = match.group(1, 2)
val = string.atoi(val)
tokens[val] = name # reverse so we can sort them...
keys = tokens.keys()
keys.sort()
# load the output skeleton from the target:
try:
fp = open(outFileName)
except IOError, err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(2)
format = string.splitfields(fp.read(), "\n")
fp.close()
try:
start = format.index("#--start constants--") + 1
end = format.index("#--end constants--")
except ValueError:
sys.stderr.write("target does not contain format markers")
sys.exit(3)
lines = []
for val in keys:
lines.append("%s = %d" % (tokens[val], val))
format[start:end] = lines
try:
fp = open(outFileName, 'w')
except IOError, err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(4)
fp.write(string.joinfields(format, "\n"))
fp.close()
if __name__ == "__main__":
main()
|
sensysnetworks/uClinux
|
user/python/Lib/token.py
|
Python
|
gpl-2.0
| 2,941
|
#This converter script is created by windhamwong.
#This script is used for converting non-ASCII character in decompiled lua file generated by unluac into the correct character.
import re, sys, getopt
input_filepath = ''
output_filepath = ''
if (len(sys.argv) < 5):
print 'luaConverter.py -i <inputfile> -o <outputfile>'
sys.exit(2)
try:
opts, args = getopt.getopt(sys.argv[1:],"i:o:")
except getopt.GetoptError:
print 'luaConverter.py -i <inputfile> -o <outputfile>'
sys.exit(2)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
for opt, arg in opts:
if opt == '-h':
print 'luaConverter.py -i <inputfile> -o <outputfile>'
sys.exit()
elif opt == "-i":
input_filepath = arg
print 'Input file: ', input_filepath
elif opt == "-o":
output_filepath = arg
print 'Output file: ', output_filepath
try:
input_file = open(input_filepath)
input_lines = input_file.readlines()
input_file.close()
output_file = open(output_filepath, 'w')
for single_line in input_lines:
single_line = re.sub(r'\\(\d{3})', lambda match: chr(int(match.group(1))), single_line)
output_file.write(single_line)
output_file.close()
except:
print "Unexpected error:", sys.exc_info()[0]
raise
|
AndreasWilliams/BotGravindo
|
plugins/needs-review/luaStringConverter/trunk/luaStringConverter.py
|
Python
|
gpl-2.0
| 1,268
|
# -*- coding: utf-8 -*-
#
# balancedneuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Balanced neuron example
-----------------------
This script simulates a neuron driven by an excitatory and an
inhibitory population of neurons firing Poisson spike trains. The aim
is to find a firing rate for the inhibitory population that will make
the neuron fire at the same rate as the excitatory population.
Optimization is performed using the bisection method from Scipy,
simulating the network repeatedly.
This example is also shown in the article Eppler et al. (2009)
**PyNEST: A convenient interface to the NEST simulator**,
*Front. Neuroinform.* http://dx.doi.org/10.3389/neuro.11.012.2008
'''
# First, we import all necessary modules for simulation, analysis and
# plotting. Additionally, we set the verbosity to suppress info
# messages
from scipy.optimize import bisect
import nest
import nest.voltage_trace
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
# Second, the simulation parameters are assigned to variables.
t_sim = 25000.0 # how long we simulate
n_ex = 16000 # size of the excitatory population
n_in = 4000 # size of the inhibitory population
r_ex = 5.0 # mean rate of the excitatory population
r_in = 20.5 # initial rate of the inhibitory population
epsc = 45.0 # peak amplitude of excitatory synaptic currents
ipsc = -45.0 # peak amplitude of inhibitory synaptic currents
d = 1.0 # synaptic delay
lower = 15.0 # lower bound of the search interval
upper = 25.0 # upper bound of the search interval
prec = 0.01 # how close need the excitatory rates be
# Third, the nodes are created using `Create()`. We store the returned
# handles in variables for later reference.
neuron = nest.Create("iaf_neuron")
noise = nest.Create("poisson_generator", 2)
voltmeter = nest.Create("voltmeter")
spikedetector = nest.Create("spike_detector")
# Fourth, the excitatory Poisson generator (`noise[0]`) and the
# voltmeter are configured using `SetStatus()`, which expects a list
# of node handles and a list of parameter dictionaries. The rate of
# the inhibitory Poisson generator is set later. Note that we need not
# set parameters for the neuron and the spike detector, since they
# have satisfactory defaults.
nest.SetStatus(noise, [{"rate": n_ex * r_ex}, {"rate": n_in * r_in}])
nest.SetStatus(voltmeter, {"withgid": True, "withtime": True})
# Fifth, the neuron is connected to the spike detector and the
# voltmeter, as are the two Poisson generators to the neuron. The
# command `Connect()` has different variants. Plain `Connect()` just
# takes the handles of pre- and post-synaptic nodes and uses the
# default values for weight and delay. `ConvergentConnect()` takes
# four arguments: lists of pre- and post-synaptic nodes and lists of
# weights and delays. Note that the connection direction for the
# voltmeter is reversed compared to the spike detector, because it
# observes the neuron instead of receiving events from it. Thus,
# `Connect()` reflects the direction of signal flow in the simulation
# kernel rather than the physical process of inserting an electrode
# into the neuron. The latter semantics is presently not available in
# NEST.
nest.Connect(neuron, spikedetector)
nest.Connect(voltmeter, neuron)
nest.ConvergentConnect(noise, neuron, [epsc, ipsc], 1.0)
# To determine the optimal rate of the neurons in the inhibitory
# population, the network is simulated several times for different
# values of the inhibitory rate while measuring the rate of the target
# neuron. This is done until the rate of the target neuron matches the
# rate of the neurons in the excitatory population with a certain
# accuracy. The algorithm is implemented in two steps:
# First, the function `output_rate()` is defined to measure the firing
# rate of the target neuron for a given rate of the inhibitory
# neurons.
def output_rate(guess):
print("Inhibitory rate estimate: %5.2f Hz" % guess)
rate = float(abs(n_in * guess))
nest.SetStatus([noise[1]], "rate", rate)
nest.SetStatus(spikedetector, "n_events", 0)
nest.Simulate(t_sim)
out = nest.GetStatus(spikedetector, "n_events")[0] * 1000.0 / t_sim
print(" -> Neuron rate: %6.2f Hz (goal: %4.2f Hz)" % (out, r_ex))
return out
# The function takes the firing rate of the inhibitory neurons as an
# argument. It scales the rate with the size of the inhibitory
# population and configures the inhibitory Poisson generator
# (`noise[1]`) accordingly. Then, the spike-counter of the spike
# detector is reset to zero. The network is simulated using
# `Simulate()`, which takes the desired simulation time in
# milliseconds and advances the network state by this amount of
# time. During simulation, the spike detector counts the spikes of the
# target neuron and the total number is read out at the end of the
# simulation period. The return value of `output_rate()` is the firing
# rate of the target neuron in Hz.
# Second, the scipy function `bisect()` is used to determine the
# optimal firing rate of the neurons of the inhibitory population.
in_rate = bisect(lambda x: output_rate(x) - r_ex, lower, upper, xtol=prec)
print("Optimal rate for the inhibitory population: %.2f Hz" % in_rate)
# The function `bisect()` takes four arguments: first a function whose
# zero crossing is to be determined. Here, the firing rate of the
# target neuron should equal the firing rate of the neurons of the
# excitatory population. Thus we define an anonymous function (using
# `lambda`) that returns the difference between the actual rate of the
# target neuron and the rate of the excitatory Poisson generator,
# given a rate for the inhibitory neurons. The next two arguments are
# the lower and upper bound of the interval in which to search for the
# zero crossing. The fourth argument of `bisect()` is the desired
# relative precision of the zero crossing.
# Finally, we plot the target neuron's membrane potential as a
# function of time.
nest.voltage_trace.from_device(voltmeter)
|
INM-6/nest-git-migration
|
pynest/examples/balancedneuron.py
|
Python
|
gpl-2.0
| 6,690
|
# Add root folder to python paths
# This must be done on every test in order to pass in Travis
import os
import sys
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.realpath(os.path.join(script_dir, '..', '..', '..')))
from gui.aboutData import versionString, licenses, developers, credits, description
def test_aboutData():
"""
Simple test to validate all about data exists
"""
assert versionString.__len__() > 0
assert licenses.__len__() > 0
assert developers.__len__() > 0
assert credits.__len__() > 0
assert description.__len__() > 0
|
blitzmann/Pyfa
|
tests/test_modules/test_gui/test_aboutData.py
|
Python
|
gpl-3.0
| 604
|
from geemusic import app
import os
if __name__ == '__main__':
port = int(os.environ.get("PORT", 4000))
app.run(host='0.0.0.0', port=port, debug=True)
|
PostsDesert/geemusic
|
server.py
|
Python
|
gpl-3.0
| 159
|
# coding=utf-8
# Author: Dustyn Gibson <miigotu@gmail.com>
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Fre$
# Foundation, either version 3 of the License, or (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or$
# PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import json
import re
from sickbeard import logger, tvcache
from sickrage.helper.common import convert_size, try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class NcoreProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
TorrentProvider.__init__(self, "ncore.cc")
self.username = None
self.password = None
self.minseed = None
self.minleech = None
categories = [
'xvidser_hun', 'xvidser',
'dvd_hun', 'dvd',
'dvd9_hun', 'dvd9',
'hd_hun', 'hd'
]
categories = '&'.join(['kivalasztott_tipus[]=' + x for x in categories])
self.url = 'https://ncore.cc/'
self.urls = {
'login': 'https://ncore.cc/login.php',
'search': ('https://ncore.cc/torrents.php?{cats}&mire=%s&miben=name'
'&tipus=kivalasztottak_kozott&submit.x=0&submit.y=0&submit=Ok'
'&tags=&searchedfrompotato=true&jsons=true').format(cats=categories),
}
self.cache = tvcache.TVCache(self)
def login(self):
login_params = {
'nev': self.username,
'pass': self.password,
'submitted': '1',
}
response = self.get_url(self.urls["login"], post_data=login_params, returns="text")
if not response:
logger.log("Unable to connect to provider", logger.WARNING)
return False
if re.search('images/warning.png', response):
logger.log("Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-branches, too-many-locals, too-many-statements
results = []
if not self.login():
return results
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != "RSS":
logger.log("Search string: {0}".format(search_string.decode("utf-8")), logger.DEBUG)
url = self.urls['search'] % (search_string)
data = self.get_url(url, returns="text")
try:
parsed_json = json.loads(data)
except ValueError as e:
continue
if not isinstance(parsed_json, dict):
logger.log("No data returned from provider", logger.DEBUG)
continue
torrent_results = parsed_json['total_results']
if not torrent_results:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
logger.log('Number of torrents found on nCore = ' + str(torrent_results), logger.INFO)
for item in parsed_json['results']:
try:
title = item.pop("release_name")
download_url = item.pop("download_url")
if not all([title, download_url]):
continue
seeders = item.pop("seeders")
leechers = item.pop("leechers")
if seeders < self.minseed or leechers < self.minleech:
if mode != "RSS":
logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
torrent_size = item.pop("size", -1)
size = convert_size(torrent_size) or -1
if mode != "RSS":
logger.log("Found result: {0} with {1} seeders and {2} leechers with a file size {3}".format(title, seeders, leechers, size), logger.DEBUG)
result = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
items.append(result)
except StandardError:
continue
# For each search mode sort all the items by seeders
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
provider = NcoreProvider()
|
b0ttl3z/SickRage
|
sickbeard/providers/ncore.py
|
Python
|
gpl-3.0
| 5,358
|
# This file is part of JujuPy, a library for driving the Juju CLI.
# Copyright 2013-2017 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the Lesser GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranties of MERCHANTABILITY,
# SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser
# GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import json
import os
import pexpect
import subprocess
from contextlib import contextmanager
from datetime import datetime
from jujupy.exceptions import (
CannotConnectEnv,
NoActiveControllers,
NoActiveModel,
SoftDeadlineExceeded,
)
from jujupy.utility import (
get_timeout_path,
get_timeout_prefix,
pause,
quote,
scoped_environ,
)
from jujupy.wait_condition import (
CommandTime,
)
__metaclass__ = type
log = logging.getLogger("jujupy.backend")
JUJU_DEV_FEATURE_FLAGS = 'JUJU_DEV_FEATURE_FLAGS'
class JujuBackend:
"""A Juju backend referring to a specific juju 2 binary.
Uses -m to specify models, uses JUJU_DATA to specify home directory.
"""
_model_flag = '-m'
def __init__(self, full_path, version, feature_flags, debug,
soft_deadline=None):
self._version = version
self._full_path = full_path
self.feature_flags = feature_flags
self.debug = debug
self._timeout_path = get_timeout_path()
self.juju_timings = []
self.soft_deadline = soft_deadline
self._ignore_soft_deadline = False
# List of ModelClients, keep track of models added so we can remove
# only those added during a test run (i.e. when using an existing
# controller.)
self._added_models = []
def _now(self):
return datetime.utcnow()
@contextmanager
def _check_timeouts(self):
# If an exception occurred, we don't want to replace it with
# SoftDeadlineExceeded.
yield
if self.soft_deadline is None or self._ignore_soft_deadline:
return
if self._now() > self.soft_deadline:
raise SoftDeadlineExceeded()
@contextmanager
def ignore_soft_deadline(self):
"""Ignore the client deadline. For cleanup code."""
old_val = self._ignore_soft_deadline
self._ignore_soft_deadline = True
try:
yield
finally:
self._ignore_soft_deadline = old_val
def clone(self, full_path, version, debug, feature_flags):
if version is None:
version = self.version
if full_path is None:
full_path = self.full_path
if debug is None:
debug = self.debug
result = self.__class__(full_path, version, feature_flags, debug,
self.soft_deadline)
# Each clone shares a reference to juju_timings allowing us to collect
# all commands run during a test.
result.juju_timings = self.juju_timings
# Each clone shares a reference to _added_models to ensure we track any
# added models regardless of the ModelClient that adds them.
result._added_models = self._added_models
return result
def track_model(self, client):
# Keep a reference to `client` for the lifetime of this backend (or
# until it's untracked).
self._added_models.append(client)
def untrack_model(self, client):
"""Remove `client` from tracking. Silently fails if not present."""
# No longer need to track this client for whatever reason.
try:
self._added_models.remove(client)
except ValueError:
log.debug(
'Attempted to remove client "{}" that was not tracked.'.format(
client.env.environment))
pass
@property
def version(self):
return self._version
@property
def full_path(self):
return self._full_path
@property
def juju_name(self):
return os.path.basename(self._full_path)
@property
def added_models(self):
# Return a copy of the list so any modifications don't trip callees up.
return list(self._added_models)
def _get_attr_tuple(self):
return (self._version, self._full_path, self.feature_flags,
self.debug, self.juju_timings)
def __eq__(self, other):
if type(self) != type(other):
return False
return self._get_attr_tuple() == other._get_attr_tuple()
def shell_environ(self, used_feature_flags, juju_home):
"""Generate a suitable shell environment.
Juju's directory must be in the PATH to support plugins.
"""
env = dict(os.environ)
if self.full_path is not None:
env['PATH'] = '{}{}{}'.format(os.path.dirname(self.full_path),
os.pathsep, env['PATH'])
flags = self.feature_flags.intersection(used_feature_flags)
feature_flag_string = env.get(JUJU_DEV_FEATURE_FLAGS, '')
if feature_flag_string != '':
flags.update(feature_flag_string.split(','))
if flags:
env[JUJU_DEV_FEATURE_FLAGS] = ','.join(sorted(flags))
env['JUJU_DATA'] = juju_home
return env
def full_args(self, command, args, model, timeout):
if model is not None:
e_arg = (self._model_flag, model)
else:
e_arg = ()
if timeout is None:
prefix = ()
else:
prefix = get_timeout_prefix(timeout, self._timeout_path)
logging = '--debug' if self.debug else '--show-log'
# If args is a string, make it a tuple. This makes writing commands
# with one argument a bit nicer.
if isinstance(args, str):
args = (args,)
# we split the command here so that the caller can control where the -m
# model flag goes. Everything in the command string is put before the
# -m flag.
command = command.split()
return (prefix + (self.juju_name, logging,) + tuple(command) + e_arg +
args)
def juju(self, command, args, used_feature_flags,
juju_home, model=None, check=True, timeout=None, extra_env=None,
suppress_err=False):
"""Run a command under juju for the current environment.
:return: Tuple rval, CommandTime rval being the commands exit code and
a CommandTime object used for storing command timing data.
"""
args = self.full_args(command, args, model, timeout)
log.info(' '.join(args))
env = self.shell_environ(used_feature_flags, juju_home)
if extra_env is not None:
env.update(extra_env)
if check:
call_func = subprocess.check_call
else:
call_func = subprocess.call
# Mutate os.environ instead of supplying env parameter so Windows can
# search env['PATH']
stderr = subprocess.PIPE if suppress_err else None
# Keep track of commands and how long they take.
command_time = CommandTime(command, args, env)
with scoped_environ(env):
log.debug('Running juju with env: {}'.format(env))
with self._check_timeouts():
rval = call_func(args, stderr=stderr)
self.juju_timings.append(command_time)
return rval, command_time
def expect(self, command, args, used_feature_flags, juju_home, model=None,
timeout=None, extra_env=None):
args = self.full_args(command, args, model, timeout)
log.info(' '.join(args))
env = self.shell_environ(used_feature_flags, juju_home)
if extra_env is not None:
env.update(extra_env)
# pexpect.spawn expects a string. This is better than trying to extract
# command + args from the returned tuple (as there could be an initial
# timing command tacked on).
command_string = ' '.join(quote(a) for a in args)
with scoped_environ(env):
log.debug('starting client interaction: {}'.format(command_string))
return pexpect.spawn(command_string, encoding='UTF-8', timeout=60)
@contextmanager
def juju_async(self, command, args, used_feature_flags,
juju_home, model=None, timeout=None):
full_args = self.full_args(command, args, model, timeout)
log.info(' '.join(args))
env = self.shell_environ(used_feature_flags, juju_home)
# Mutate os.environ instead of supplying env parameter so Windows can
# search env['PATH']
with scoped_environ(env):
with self._check_timeouts():
proc = subprocess.Popen(full_args)
yield proc
retcode = proc.wait()
if retcode != 0:
raise subprocess.CalledProcessError(retcode, full_args)
def get_juju_output(self, command, args, used_feature_flags, juju_home,
model=None, timeout=None, user_name=None,
merge_stderr=False):
args = self.full_args(command, args, model, timeout)
env = self.shell_environ(used_feature_flags, juju_home)
log.debug(args)
# Mutate os.environ instead of supplying env parameter so
# Windows can search env['PATH']
with scoped_environ(env):
proc = subprocess.Popen(
args, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.STDOUT if merge_stderr else subprocess.PIPE)
with self._check_timeouts():
sub_output, sub_error = proc.communicate()
log.debug(sub_output)
if proc.returncode != 0:
log.debug(sub_error)
e = subprocess.CalledProcessError(
proc.returncode, args, sub_output)
e.stderr = sub_error
if sub_error and (
b'Unable to connect to environment' in sub_error or
b'MissingOrIncorrectVersionHeader' in sub_error or
b'307: Temporary Redirect' in sub_error):
raise CannotConnectEnv(e)
raise e
return sub_output.decode('utf-8')
def get_active_model(self, juju_data_dir):
"""Determine the active model in a juju data dir."""
try:
current = json.loads(self.get_juju_output(
'models', ('--format', 'json'), set(),
juju_data_dir, model=None).decode('ascii'))
except subprocess.CalledProcessError:
raise NoActiveControllers(
'No active controller for {}'.format(juju_data_dir))
try:
return current['current-model']
except KeyError:
raise NoActiveModel('No active model for {}'.format(juju_data_dir))
def get_active_controller(self, juju_data_dir):
"""Determine the active controller in a juju data dir."""
try:
current = json.loads(self.get_juju_output(
'controllers', ('--format', 'json'), set(),
juju_data_dir, model=None))
except subprocess.CalledProcessError:
raise NoActiveControllers(
'No active controller for {}'.format(juju_data_dir))
try:
return current['current-controller']
except KeyError:
raise NoActiveControllers(
'No active controller for {}'.format(juju_data_dir))
def get_active_user(self, juju_data_dir, controller):
"""Determine the active user for a controller."""
try:
current = json.loads(self.get_juju_output(
'controllers', ('--format', 'json'), set(),
juju_data_dir, model=None))
except subprocess.CalledProcessError:
raise NoActiveControllers(
'No active controller for {}'.format(juju_data_dir))
return current['controllers'][controller]['user']
def pause(self, seconds):
pause(seconds)
|
freyes/juju
|
acceptancetests/jujupy/backend.py
|
Python
|
agpl-3.0
| 12,440
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 08 16:45:12 2015
@author: thomas.douenne
"""
from __future__ import division
import pandas as pd
import numpy as np
from pandas import concat
from openfisca_france_indirect_taxation.examples.utils_example import get_input_data_frame
from openfisca_france_indirect_taxation.almost_ideal_demand_system.aids_price_index_builder import \
df_indice_prix_produit
# Now that we have our price indexes, we construct a dataframe with the rest of the information
data_frame_for_reg = None
for year in [2000, 2005, 2011]:
aggregates_data_frame = get_input_data_frame(2011)
aggregates_data_frame['depenses_tot'] = 0
for i in range(1, 13):
aggregates_data_frame['depenses_tot'] += aggregates_data_frame['coicop12_{}'.format(i)]
produits = [column for column in aggregates_data_frame.columns if column.isdigit()]
data = aggregates_data_frame[produits + ['vag']].copy()
data.index.name = 'ident_men'
data.reset_index(inplace = True)
df = pd.melt(data, id_vars = ['vag', 'ident_men'], value_vars=produits,
value_name = 'depense_bien', var_name = 'bien')
df_indice_prix_produit = df_indice_prix_produit[['indice_prix_produit'] + ['prix'] + ['temps'] + ['mois']]
df['vag'] = df['vag'].astype(str)
df['indice_prix_produit'] = df['vag'] + '_' + df['bien']
df['indice_prix_produit'] = df['indice_prix_produit'].str.replace('_0', '')
df['indice_prix_produit'] = df['indice_prix_produit'].str.replace('_', '')
df['coicop_12_numero'] = df['bien'].str[:2]
df = df[['ident_men'] + ['coicop_12_numero'] + ['indice_prix_produit'] + ['depense_bien'] + ['vag']]
df = pd.merge(df, df_indice_prix_produit, on = 'indice_prix_produit')
df_temps = df[['vag'] + ['temps'] + ['mois']]
df_temps['mois'] = df_temps['mois'].astype(float)
df_temps['mois2'] = df_temps['mois'] ** 2
df_temps = df_temps.drop_duplicates(cols='vag', take_last=True)
df_temps = df_temps.astype(float)
# Construct the price index by coicop:
df['coicop_12_numero'] = df['coicop_12_numero'].astype(int) # Goal : transform 1.0 into 1 to merge with same id.
df = df.astype(str)
df['id'] = df['coicop_12_numero'] + '_' + df['ident_men']
df_depense_coicop = None
for i in range(1, 13):
if df_depense_coicop is not None:
df_depense_coicop = concat([df_depense_coicop, aggregates_data_frame['coicop12_{}'.format(i)]], axis = 1)
else:
df_depense_coicop = aggregates_data_frame['coicop12_{}'.format(i)]
list_coicop12 = [column for column in df_depense_coicop.columns]
df_depense_coicop.index.name = 'ident_men'
df_depense_coicop.reset_index(inplace = True)
df_depense_coicop = pd.melt(df_depense_coicop, id_vars = ['ident_men'], value_vars = list_coicop12)
df_depense_coicop.rename(columns = {'value': 'depense_par_coicop'}, inplace = True)
df_depense_coicop.rename(columns = {'variable': 'numero_coicop'}, inplace = True)
df_depense_coicop['numero_coicop'] = df_depense_coicop['numero_coicop'].str.split('coicop12_').str[1]
df_depense_coicop = df_depense_coicop.astype(str)
df_depense_coicop['id'] = df_depense_coicop['numero_coicop'] + '_' + df_depense_coicop['ident_men']
df_to_merge = df_depense_coicop[['id'] + ['depense_par_coicop']]
df = pd.merge(df, df_to_merge, on = 'id')
df[['prix'] + ['depense_bien'] + ['depense_par_coicop']] = (
df[['prix'] + ['depense_bien'] + ['depense_par_coicop']].astype(float)
)
df['part_bien_coicop'] = df['depense_bien'] / df['depense_par_coicop']
df.fillna(0, inplace=True)
df['indice_prix_pondere'] = df['part_bien_coicop'] * df['prix']
df.sort(['id'])
grouped = df['indice_prix_pondere'].groupby(df['id'])
grouped = grouped.aggregate(np.sum)
grouped.index.name = 'id'
grouped = grouped.reset_index()
# Import information about households, including niveau_vie_decile
# (To do: Obviously there are mistakes in its computation, check why).
df_info_menage = aggregates_data_frame[['ocde10'] + ['depenses_tot'] + ['vag'] + ['typmen'] + ['revtot'] +
['poste_coicop_2201'] + ['poste_coicop_2202'] + ['poste_coicop_2203']]
df_info_menage['fumeur'] = 0
df_info_menage[['poste_coicop_2201'] + ['poste_coicop_2202'] + ['poste_coicop_2203']] = \
df_info_menage[['poste_coicop_2201'] + ['poste_coicop_2202'] + ['poste_coicop_2203']].astype(float)
df_info_menage['consommation_tabac'] = (
df_info_menage['poste_coicop_2201'] + df_info_menage['poste_coicop_2202'] + df_info_menage['poste_coicop_2203']
)
df_info_menage['fumeur'] = 1 * (df_info_menage['consommation_tabac'] > 0)
df_info_menage.drop(['consommation_tabac', 'poste_coicop_2201', 'poste_coicop_2202', 'poste_coicop_2203'],
inplace = True, axis = 1)
df_info_menage.index.name = 'ident_men'
df_info_menage.reset_index(inplace = True)
df_info_menage['ident_men'] = df_info_menage['ident_men'].astype(str)
data_frame = pd.merge(df_depense_coicop, df_info_menage, on = 'ident_men')
data_frame = pd.merge(data_frame, grouped, on = 'id')
data_frame[['depenses_tot'] + ['depense_par_coicop']] = (
data_frame[['depenses_tot'] + ['depense_par_coicop']].astype(float)
)
data_frame['wi'] = data_frame['depense_par_coicop'] / data_frame['depenses_tot']
data_frame = data_frame.astype(str)
# By construction, those who don't consume in coicop_i have a price index of 0 for this coicop.
# We replace it with the price index of the whole coicop at the same vag.
data_frame['indice_prix_produit'] = data_frame['vag'] + data_frame['numero_coicop'] + '000'
df_indice_prix_produit['prix'] = df_indice_prix_produit['prix'].astype(float)
df_indice_prix_produit['prix_coicop'] = df_indice_prix_produit['prix']
df_indice_prix_produit_to_merge = df_indice_prix_produit[['indice_prix_produit'] + ['prix_coicop']]
data_frame = pd.merge(data_frame, df_indice_prix_produit_to_merge, on = 'indice_prix_produit')
data_frame['indice_prix_pondere'] = data_frame['indice_prix_pondere'].astype(float)
data_frame.loc[data_frame['indice_prix_pondere'] == 0, 'indice_prix_pondere'] = \
data_frame.loc[data_frame['indice_prix_pondere'] == 0, 'prix_coicop']
data_frame = data_frame.drop(['prix_coicop', 'indice_prix_produit'], axis = 1)
# Reshape the dataframe to have the price index of each coicop as a variable
data_frame_prix = data_frame[['numero_coicop'] + ['ident_men'] + ['indice_prix_pondere']]
data_frame_prix.index.name = 'ident_men'
data_frame_prix = pd.pivot_table(data_frame_prix, index='ident_men', columns='numero_coicop',
values='indice_prix_pondere')
data_frame_prix.reset_index(inplace = True)
data_frame = pd.merge(data_frame, data_frame_prix, on = 'ident_men')
for i in range(1, 13):
data_frame.rename(columns = {'{}'.format(i): 'p{}'.format(i)}, inplace = True)
del data_frame['id']
data_frame = data_frame.astype(float)
data_frame['depenses_par_uc'] = data_frame['depenses_tot'] / data_frame['ocde10']
data_frame = pd.merge(data_frame, df_temps, on = 'vag')
data_frame['numero_coicop'] = data_frame['numero_coicop'].astype(int)
data_frame['numero_coicop'] = data_frame['numero_coicop'].astype(str)
data_frame2 = pd.pivot_table(data_frame, index = 'ident_men', columns = 'numero_coicop',
values = 'wi')
for i in range(1, 13):
data_frame2.rename(columns = {'{}'.format(i): 'w{}'.format(i)}, inplace = True)
data_frame2.index.name = 'ident_men'
data_frame2 = data_frame2.reset_index()
data_frame = pd.merge(data_frame, data_frame2, on = 'ident_men')
data_frame = data_frame.drop_duplicates(cols = 'ident_men', take_last = True)
data_frame.drop(
['depense_par_coicop', 'depenses_tot', 'indice_prix_pondere', 'wi', 'numero_coicop'],
inplace = True, axis = 1
)
data_frame.to_csv('data_frame_r_{}_by_coicop.csv'.format(year), sep = ',')
if data_frame_for_reg is not None:
data_frame_for_reg = pd.concat([data_frame_for_reg, data_frame])
else:
data_frame_for_reg = data_frame
data_frame_for_reg.to_csv('data_frame_for_stata_by_coicop.csv', sep = ',')
data_frame_for_reg['somme_wi'] = 0
for i in range(1, 13):
data_frame_for_reg['somme_wi'] += data_frame_for_reg['w{}'.format(i)]
assert (data_frame_for_reg['somme_wi'] == 1).any(), 'The expenditure shares do not sum to 1'
|
thomasdouenne/openfisca-france-indirect-taxation
|
openfisca_france_indirect_taxation/almost_ideal_demand_system/aids_dataframe_builder_coicop.py
|
Python
|
agpl-3.0
| 8,551
|
"""
Models for Bookmarks.
"""
import logging
import six
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from jsonfield.fields import JSONField
from model_utils.models import TimeStampedModel
from opaque_keys.edx.django.models import CourseKeyField, UsageKeyField
from opaque_keys.edx.keys import UsageKey
from xmodule.modulestore import search
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from . import PathItem
log = logging.getLogger(__name__)
def prepare_path_for_serialization(path):
"""
Return the data from a list of PathItems ready for serialization to json.
"""
return [(six.text_type(path_item.usage_key), path_item.display_name) for path_item in path]
def parse_path_data(path_data):
"""
Return a list of PathItems constructed from parsing path_data.
"""
path = []
for item in path_data:
usage_key = UsageKey.from_string(item[0])
usage_key = usage_key.replace(course_key=modulestore().fill_in_run(usage_key.course_key))
path.append(PathItem(usage_key, item[1]))
return path
@python_2_unicode_compatible
class Bookmark(TimeStampedModel):
"""
Bookmarks model.
.. no_pii:
"""
user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE)
course_key = CourseKeyField(max_length=255, db_index=True)
usage_key = UsageKeyField(max_length=255, db_index=True)
_path = JSONField(db_column='path', help_text='Path in course tree to the block')
xblock_cache = models.ForeignKey('bookmarks.XBlockCache', on_delete=models.CASCADE)
class Meta(object):
"""
Bookmark metadata.
"""
unique_together = ('user', 'usage_key')
def __str__(self):
return self.resource_id
@classmethod
def create(cls, data):
"""
Create a Bookmark object.
Arguments:
data (dict): The data to create the object with.
Returns:
A Bookmark object.
Raises:
ItemNotFoundError: If no block exists for the usage_key.
"""
data = dict(data)
usage_key = data.pop('usage_key')
with modulestore().bulk_operations(usage_key.course_key):
block = modulestore().get_item(usage_key)
xblock_cache = XBlockCache.create({
'usage_key': usage_key,
'display_name': block.display_name_with_default,
})
data['_path'] = prepare_path_for_serialization(Bookmark.updated_path(usage_key, xblock_cache))
data['course_key'] = usage_key.course_key
data['xblock_cache'] = xblock_cache
user = data.pop('user')
# Sometimes this ends up in data, but newer versions of Django will fail on having unknown keys in defaults
data.pop('display_name', None)
bookmark, created = cls.objects.get_or_create(usage_key=usage_key, user=user, defaults=data)
return bookmark, created
@property
def resource_id(self):
"""
Return the resource id: {username,usage_id}.
"""
return u"{0},{1}".format(self.user.username, self.usage_key)
@property
def display_name(self):
"""
Return the display_name from self.xblock_cache.
Returns:
String.
"""
return self.xblock_cache.display_name # pylint: disable=no-member
@property
def path(self):
"""
Return the path to the bookmark's block after checking self.xblock_cache.
Returns:
List of dicts.
"""
if self.modified < self.xblock_cache.modified: # pylint: disable=no-member
path = Bookmark.updated_path(self.usage_key, self.xblock_cache)
self._path = prepare_path_for_serialization(path)
self.save() # Always save so that self.modified is updated.
return path
return parse_path_data(self._path)
@staticmethod
def updated_path(usage_key, xblock_cache):
"""
Return the update-to-date path.
xblock_cache.paths is the list of all possible paths to a block
constructed by doing a DFS of the tree. However, in case of DAGS,
which section jump_to_id() takes the user to depends on the
modulestore. If xblock_cache.paths has only one item, we can
just use it. Otherwise, we use path_to_location() to get the path
jump_to_id() will take the user to.
"""
if xblock_cache.paths and len(xblock_cache.paths) == 1:
return xblock_cache.paths[0]
return Bookmark.get_path(usage_key)
@staticmethod
def get_path(usage_key):
"""
Returns data for the path to the block in the course graph.
Note: In case of multiple paths to the block from the course
root, this function returns a path arbitrarily but consistently,
depending on the modulestore. In the future, we may want to
extend it to check which of the paths, the user has access to
and return its data.
Arguments:
block (XBlock): The block whose path is required.
Returns:
list of PathItems
"""
with modulestore().bulk_operations(usage_key.course_key):
try:
path = search.path_to_location(modulestore(), usage_key, full_path=True)
except ItemNotFoundError:
log.error(u'Block with usage_key: %s not found.', usage_key)
return []
except NoPathToItem:
log.error(u'No path to block with usage_key: %s.', usage_key)
return []
path_data = []
for ancestor_usage_key in path:
if ancestor_usage_key != usage_key and ancestor_usage_key.block_type != 'course':
try:
block = modulestore().get_item(ancestor_usage_key)
except ItemNotFoundError:
return [] # No valid path can be found.
path_data.append(
PathItem(usage_key=block.location, display_name=block.display_name_with_default)
)
return path_data
@python_2_unicode_compatible
class XBlockCache(TimeStampedModel):
"""
XBlockCache model to store info about xblocks.
.. no_pii:
"""
course_key = CourseKeyField(max_length=255, db_index=True)
usage_key = UsageKeyField(max_length=255, db_index=True, unique=True)
display_name = models.CharField(max_length=255, default='')
_paths = JSONField(
db_column='paths', default=[], help_text='All paths in course tree to the corresponding block.'
)
def __str__(self):
return six.text_type(self.usage_key)
@property
def paths(self):
"""
Return paths.
Returns:
list of list of PathItems.
"""
return [parse_path_data(path) for path in self._paths] if self._paths else self._paths
@paths.setter
def paths(self, value):
"""
Set paths.
Arguments:
value (list of list of PathItems): The list of paths to cache.
"""
self._paths = [prepare_path_for_serialization(path) for path in value] if value else value
@classmethod
def create(cls, data):
"""
Create an XBlockCache object.
Arguments:
data (dict): The data to create the object with.
Returns:
An XBlockCache object.
"""
data = dict(data)
usage_key = data.pop('usage_key')
usage_key = usage_key.replace(course_key=modulestore().fill_in_run(usage_key.course_key))
data['course_key'] = usage_key.course_key
xblock_cache, created = cls.objects.get_or_create(usage_key=usage_key, defaults=data)
if not created:
new_display_name = data.get('display_name', xblock_cache.display_name)
if xblock_cache.display_name != new_display_name:
xblock_cache.display_name = new_display_name
xblock_cache.save()
return xblock_cache
|
cpennington/edx-platform
|
openedx/core/djangoapps/bookmarks/models.py
|
Python
|
agpl-3.0
| 8,283
|
#!/usr/bin/env python
import os
import sys
# Horrible boilerplate - there must be a better way :)
sys.path.append(
os.path.abspath(
os.path.dirname(__file__) + '../../..'
)
)
import simplejson
from pprint import pprint
from django.utils.text import slugify
from django_date_extensions.fields import ApproximateDateField, ApproximateDate
from pombola.core import models
party_kind = models.OrganisationKind.objects.get(slug="party")
parties = simplejson.loads(sys.stdin.read())
for party in parties:
pprint(party)
try:
org = models.Organisation.objects.get(
slug = slugify(party['Acronym'])
)
except models.Organisation.DoesNotExist:
org = models.Organisation(
slug = slugify(party['Acronym'])
)
org.kind = party_kind
org.original_id = party['PartyID']
org.name = party['Name']
if party.get('FoundationYear'):
org.started = ApproximateDate(year=int(party['FoundationYear']))
org.save()
|
patricmutwiri/pombola
|
pombola/core/kenya_import_scripts/import_parties_from_json.py
|
Python
|
agpl-3.0
| 1,011
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from osv import fields, osv
from tools.misc import DEFAULT_SERVER_DATETIME_FORMAT
from tools.float_utils import float_compare
import decimal_precision as dp
from tools.translate import _
class stock_partial_picking_line(osv.TransientModel):
def _tracking(self, cursor, user, ids, name, arg, context=None):
res = {}
for tracklot in self.browse(cursor, user, ids, context=context):
tracking = False
if (tracklot.move_id.picking_id.type == 'in' and tracklot.product_id.track_incoming == True) or \
(tracklot.move_id.picking_id.type == 'out' and tracklot.product_id.track_outgoing == True):
tracking = True
res[tracklot.id] = tracking
return res
_name = "stock.partial.picking.line"
_rec_name = 'product_id'
_columns = {
'product_id' : fields.many2one('product.product', string="Product", required=True, ondelete='CASCADE'),
'quantity' : fields.float("Quantity", digits_compute=dp.get_precision('Product UoM'), required=True),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True, ondelete='CASCADE'),
'prodlot_id' : fields.many2one('stock.production.lot', 'Production Lot', ondelete='CASCADE'),
'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete='CASCADE', domain = [('usage','<>','view')]),
'location_dest_id': fields.many2one('stock.location', 'Dest. Location', required=True, ondelete='CASCADE',domain = [('usage','<>','view')]),
'move_id' : fields.many2one('stock.move', "Move", ondelete='CASCADE'),
'wizard_id' : fields.many2one('stock.partial.picking', string="Wizard", ondelete='CASCADE'),
'update_cost': fields.boolean('Need cost update'),
'cost' : fields.float("Cost", help="Unit Cost for this product line"),
'currency' : fields.many2one('res.currency', string="Currency", help="Currency in which Unit cost is expressed", ondelete='CASCADE'),
'tracking': fields.function(_tracking, string='Tracking', type='boolean'),
}
class stock_partial_picking(osv.osv_memory):
_name = "stock.partial.picking"
_description = "Partial Picking Processing Wizard"
def _hide_tracking(self, cursor, user, ids, name, arg, context=None):
res = {}
for wizard in self.browse(cursor, user, ids, context=context):
res[wizard.id] = any([not(x.tracking) for x in wizard.move_ids])
return res
_columns = {
'date': fields.datetime('Date', required=True),
'move_ids' : fields.one2many('stock.partial.picking.line', 'wizard_id', 'Product Moves'),
'picking_id': fields.many2one('stock.picking', 'Picking', required=True, ondelete='CASCADE'),
'hide_tracking': fields.function(_hide_tracking, string='Tracking', type='boolean', help='This field is for internal purpose. It is used to decide if the column prodlot has to be shown on the move_ids field or not'),
}
def default_get(self, cr, uid, fields, context=None):
if context is None: context = {}
res = super(stock_partial_picking, self).default_get(cr, uid, fields, context=context)
picking_ids = context.get('active_ids', [])
if not picking_ids or (not context.get('active_model') == 'stock.picking') \
or len(picking_ids) != 1:
# Partial Picking Processing may only be done for one picking at a time
return res
picking_id, = picking_ids
if 'picking_id' in fields:
res.update(picking_id=picking_id)
if 'move_ids' in fields:
picking = self.pool.get('stock.picking').browse(cr, uid, picking_id, context=context)
moves = [self._partial_move_for(cr, uid, m) for m in picking.move_lines if m.state not in ('done','cancel')]
res.update(move_ids=moves)
if 'date' in fields:
res.update(date=time.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
return res
def _product_cost_for_average_update(self, cr, uid, move):
"""Returns product cost and currency ID for the given move, suited for re-computing
the average product cost.
:return: map of the form::
{'cost': 123.34,
'currency': 42}
"""
# Currently, the cost on the product form is supposed to be expressed in the currency
# of the company owning the product. If not set, we fall back to the picking's company,
# which should work in simple cases.
product_currency_id = move.product_id.company_id.currency_id and move.product_id.company_id.currency_id.id
picking_currency_id = move.picking_id.company_id.currency_id and move.picking_id.company_id.currency_id.id
return {'cost': move.product_id.standard_price,
'currency': product_currency_id or picking_currency_id or False}
def _partial_move_for(self, cr, uid, move):
partial_move = {
'product_id' : move.product_id.id,
'quantity' : move.state in ('assigned','draft') and move.product_qty or 0,
'product_uom' : move.product_uom.id,
'prodlot_id' : move.prodlot_id.id,
'move_id' : move.id,
'location_id' : move.location_id.id,
'location_dest_id' : move.location_dest_id.id,
}
if move.picking_id.type == 'in' and move.product_id.cost_method == 'average':
partial_move.update(update_cost=True, **self._product_cost_for_average_update(cr, uid, move))
return partial_move
def do_partial(self, cr, uid, ids, context=None):
assert len(ids) == 1, 'Partial picking processing may only be done one at a time'
stock_picking = self.pool.get('stock.picking')
stock_move = self.pool.get('stock.move')
uom_obj = self.pool.get('product.uom')
partial = self.browse(cr, uid, ids[0], context=context)
partial_data = {
'delivery_date' : partial.date
}
picking_type = partial.picking_id.type
for wizard_line in partial.move_ids:
line_uom = wizard_line.product_uom
move_id = wizard_line.move_id.id
#Quantiny must be Positive
if wizard_line.quantity < 0:
raise osv.except_osv(_('Warning!'), _('Please provide Proper Quantity !'))
#Compute the quantity for respective wizard_line in the line uom (this jsut do the rounding if necessary)
qty_in_line_uom = uom_obj._compute_qty(cr, uid, line_uom.id, wizard_line.quantity, line_uom.id)
if line_uom.factor and line_uom.factor <> 0:
if float_compare(qty_in_line_uom, wizard_line.quantity, precision_rounding=line_uom.rounding) != 0:
raise osv.except_osv(_('Warning'), _('The uom rounding does not allow you to ship "%s %s", only roundings of "%s %s" is accepted by the uom.') % (wizard_line.quantity, line_uom.name, line_uom.rounding, line_uom.name))
if move_id:
#Check rounding Quantity.ex.
#picking: 1kg, uom kg rounding = 0.01 (rounding to 10g),
#partial delivery: 253g
#=> result= refused, as the qty left on picking would be 0.747kg and only 0.75 is accepted by the uom.
initial_uom = wizard_line.move_id.product_uom
#Compute the quantity for respective wizard_line in the initial uom
qty_in_initial_uom = uom_obj._compute_qty(cr, uid, line_uom.id, wizard_line.quantity, initial_uom.id)
without_rounding_qty = (wizard_line.quantity / line_uom.factor) * initial_uom.factor
if float_compare(qty_in_initial_uom, without_rounding_qty, precision_rounding=initial_uom.rounding) != 0:
raise osv.except_osv(_('Warning'), _('The rounding of the initial uom does not allow you to ship "%s %s", as it would let a quantity of "%s %s" to ship and only roundings of "%s %s" is accepted by the uom.') % (wizard_line.quantity, line_uom.name, wizard_line.move_id.product_qty - without_rounding_qty, initial_uom.name, initial_uom.rounding, initial_uom.name))
else:
seq_obj_name = 'stock.picking.' + picking_type
move_id = stock_move.create(cr,uid,{'name' : self.pool.get('ir.sequence').get(cr, uid, seq_obj_name),
'product_id': wizard_line.product_id.id,
'product_qty': wizard_line.quantity,
'product_uom': wizard_line.product_uom.id,
'prodlot_id': wizard_line.prodlot_id.id,
'location_id' : wizard_line.location_id.id,
'location_dest_id' : wizard_line.location_dest_id.id,
'picking_id': partial.picking_id.id
},context=context)
stock_move.action_confirm(cr, uid, [move_id], context)
partial_data['move%s' % (move_id)] = {
'product_id': wizard_line.product_id.id,
'product_qty': wizard_line.quantity,
'product_uom': wizard_line.product_uom.id,
'prodlot_id': wizard_line.prodlot_id.id,
}
if (picking_type == 'in') and (wizard_line.product_id.cost_method == 'average'):
partial_data['move%s' % (wizard_line.move_id.id)].update(product_price=wizard_line.cost,
product_currency=wizard_line.currency.id)
stock_picking.do_partial(cr, uid, [partial.picking_id.id], partial_data, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
chengdh/openerp-ktv
|
openerp/addons/stock/wizard/stock_partial_picking.py
|
Python
|
agpl-3.0
| 11,019
|
# Amara, universalsubtitles.org
#
# Copyright (C) 2015 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along
# with this program. If not, see http://www.gnu.org/licenses/agpl-3.0.html.
"""Implement pagination.
This module has a bunch of code to overrides the default paginate_queryset()
method to use offset/limit based pagination instead of page based pagination.
This will get much simpler once we switch to django-rest-framework 3.1 which
has built-in support for this.
"""
import urlparse
from django.http import Http404, QueryDict
from rest_framework import pagination
from rest_framework import serializers
class MetaSerializer(serializers.Serializer):
previous = serializers.SerializerMethodField()
next = serializers.SerializerMethodField()
offset = serializers.IntegerField(read_only=True)
limit = serializers.IntegerField(read_only=True)
total_count = serializers.IntegerField(read_only=True)
def get_next(self, page):
if page.has_next():
return self._make_link(page.next_offset(), page.limit)
else:
return None
def get_previous(self, page):
if page.has_previous():
return self._make_link(page.previous_offset(), page.limit)
else:
return None
def _make_link(self, offset, limit):
request = self.context.get('request')
url = request and request.build_absolute_uri() or ''
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(url)
query_dict = QueryDict(query).copy()
query_dict['offset'] = offset
query_dict['limit'] = limit
query = query_dict.urlencode()
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
class AmaraPaginationSerializer(pagination.BasePaginationSerializer):
meta = MetaSerializer(source='*')
results_field = 'objects'
class AmaraPage(object):
def __init__(self, queryset, offset, limit):
self.object_list = queryset[offset:offset+limit]
self.total_count = queryset.count()
self.offset = offset
self.limit = limit
def has_next(self):
return self.offset + self.limit < self.total_count
def next_offset(self):
return self.offset + self.limit
def has_previous(self):
return self.offset > 0
def previous_offset(self):
return max(self.offset - self.limit, 0)
class AmaraPaginationMixin(object):
paginate_by_param = 'limit'
max_paginate_by = 100
def paginate_queryset(self, queryset):
limit = self.get_paginate_by()
if not limit:
return None
offset = self.request.query_params.get('offset', 0)
try:
offset = int(offset)
except ValueError:
offset = 0
return AmaraPage(queryset, offset, limit)
|
ofer43211/unisubs
|
apps/api/pagination.py
|
Python
|
agpl-3.0
| 3,393
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Piranha(CMakePackage):
"""Piranha is a computer-algebra library for the symbolic manipulation of
sparse multivariate polynomials and other closely-related symbolic objects
(such as Poisson series)."""
homepage = "https://bluescarni.github.io/piranha/sphinx/"
url = "https://github.com/bluescarni/piranha/archive/v0.5.tar.gz"
git = "https://github.com/bluescarni/piranha.git"
version('develop', branch='master')
version('0.5', sha256='34a89bda8208ff48cfb116efa7d53c09e8a9b3838af4bb96ba2e19e4930b3a58')
variant('python', default=True,
description='Build the Python bindings')
# Build dependencies
depends_on('cmake@3.2.0:', type='build')
extends('python', when='+python')
depends_on('python@2.6:', type='build', when='+python')
# Other dependencies
depends_on('boost+iostreams+regex+serialization',
when='~python')
depends_on('boost+iostreams+regex+serialization+python',
when='+python')
depends_on('bzip2')
depends_on('gmp') # mpir is a drop-in replacement for this
depends_on('mpfr') # Could also be built against mpir
def cmake_args(self):
return [
self.define_from_variant('BUILD_PYRANHA', 'python'),
'-DBUILD_TESTS:BOOL=ON',
]
|
LLNL/spack
|
var/spack/repos/builtin/packages/piranha/package.py
|
Python
|
lgpl-2.1
| 1,547
|
import urllib
from xml.dom import minidom
import xml.dom.ext
import StringIO
import random
import string
import socket
import struct
import time
import uuid
import threading
import thread
BUFFER_SIZE = 0xffff
APP_MAX_DELAY = 500 # miliseconds
DP_MAX_TIMEOUT = 5000 # 5 seconds
MULTICAST_PORT = 3702
MULTICAST_IPV4_ADDRESS = "239.255.255.250"
UNICAST_UDP_REPEAT=2
UNICAST_UDP_MIN_DELAY=50
UNICAST_UDP_MAX_DELAY=250
UNICAST_UDP_UPPER_DELAY=500
MULTICAST_UDP_REPEAT=4
MULTICAST_UDP_MIN_DELAY=50
MULTICAST_UDP_MAX_DELAY=250
MULTICAST_UDP_UPPER_DELAY=500
NS_A = "http://schemas.xmlsoap.org/ws/2004/08/addressing"
NS_D = "http://schemas.xmlsoap.org/ws/2005/04/discovery"
NS_S = "http://www.w3.org/2003/05/soap-envelope"
ACTION_HELLO = "http://schemas.xmlsoap.org/ws/2005/04/discovery/Hello"
ACTION_BYE = "http://schemas.xmlsoap.org/ws/2005/04/discovery/Bye"
ACTION_PROBE = "http://schemas.xmlsoap.org/ws/2005/04/discovery/Probe"
ACTION_PROBE_MATCH = "http://schemas.xmlsoap.org/ws/2005/04/discovery/ProbeMatches"
ACTION_RESOLVE = "http://schemas.xmlsoap.org/ws/2005/04/discovery/Resolve"
ACTION_RESOLVE_MATCH = "http://schemas.xmlsoap.org/ws/2005/04/discovery/ResolveMatches"
ADDRESS_ALL = "urn:schemas-xmlsoap-org:ws:2005:04:discovery"
ADDRESS_UNKNOWN = "http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous"
MATCH_BY_LDAP = "http://schemas.xmlsoap.org/ws/2005/04/discovery/ldap"
MATCH_BY_URI = "http://schemas.xmlsoap.org/ws/2005/04/discovery/rfc2396"
MATCH_BY_UUID = "http://schemas.xmlsoap.org/ws/2005/04/discovery/uuid"
MATCH_BY_STRCMP = "http://schemas.xmlsoap.org/ws/2005/04/discovery/strcmp0"
class URI:
def __init__(self, uri):
uri = urllib.unquote(uri)
i1 = uri.find(":")
i2 = uri.find("@")
self.__scheme = uri[:i1]
if i2 != -1:
self.__authority = uri[i1 + 1: i2]
self.__path = uri[i2 + 1:]
else:
self.__authority = ""
self.__path = uri[i1 + 1:]
def getScheme(self):
return self.__scheme
def getAuthority(self):
return self.__authority
def getPath(self):
return self.__path
def getPathExQueryFragment(self):
i = self.__path.find("?")
path = self.getPath()
if i != -1:
return path[:self.__path.find("?")]
else:
return path
class QName:
def __init__(self, namespace, localname):
self.__namespace = namespace
self.__localname = localname
def getNamespace(self):
return self.__namespace
def getLocalname(self):
return self.__localname
def getFullname(self):
return self.getNamespace() + ":" + self.getLocalname()
def __repr__(self):
return self.getFullname()
class Scope:
def __init__(self, value, matchBy=None):
self.__matchBy = matchBy
self.__value = value
def getMatchBy(self):
return self.__matchBy
def getValue(self):
return self.__value
def __repr__(self):
if self.getMatchBy() == None or len(self.getMatchBy()) == 0:
return self.getValue()
else:
return self.getMatchBy() + ":" + self.getValue()
class ProbeResolveMatch:
def __init__(self, epr, types, scopes, xAddrs, metadataVersion):
self.__epr = epr
self.__types = types
self.__scopes = scopes
self.__xAddrs = xAddrs
self.__metadataVersion = metadataVersion
def getEPR(self):
return self.__epr
def getTypes(self):
return self.__types
def getScopes(self):
return self.__scopes
def getXAddrs(self):
return self.__xAddrs
def getMetadataVersion(self):
return self.__metadataVersion
def __repr__(self):
return "EPR: %s\nTypes: %s\nScopes: %s\nXAddrs: %s\nMetadata Version: %s" % \
(self.getEPR(), self.getTypes(), self.getScopes(),
self.getXAddrs(), self.getMetadataVersion())
class SoapEnvelope:
def __init__(self):
self.__action = ""
self.__messageId = ""
self.__relatesTo = ""
self.__relationshipType = None
self.__to = ""
self.__replyTo = ""
self.__instanceId = ""
self.__sequenceId = ""
self.__messageNumber = ""
self.__epr = ""
self.__types = []
self.__scopes = []
self.__xAddrs = []
self.__metadataVersion = ""
self.__probeResolveMatches = []
def getAction(self):
return self.__action
def setAction(self, action):
self.__action = action
def getMessageId(self):
return self.__messageId
def setMessageId(self, messageId):
self.__messageId = messageId
def getRelatesTo(self):
return self.__relatesTo
def setRelatesTo(self, relatesTo):
self.__relatesTo = relatesTo
def getRelationshipType(self):
return self.__relationshipType
def setRelationshipType(self, relationshipType):
self.__relationshipType = relationshipType
def getTo(self):
return self.__to
def setTo(self, to):
self.__to = to
def getReplyTo(self):
return self.__replyTo
def setReplyTo(self, replyTo):
self.__replyTo = replyTo
def getInstanceId(self):
return self.__instanceId
def setInstanceId(self, instanceId):
self.__instanceId = instanceId
def getSequenceId(self):
return self.__sequenceId
def setSequenceId(self, sequenceId):
self.__sequenceId = sequenceId
def getEPR(self):
return self.__epr
def setEPR(self, epr):
self.__epr = epr
def getMessageNumber(self):
return self.__messageNumber
def setMessageNumber(self, messageNumber):
self.__messageNumber = messageNumber
def getTypes(self):
return self.__types
def setTypes(self, types):
self.__types = types
def getScopes(self):
return self.__scopes
def setScopes(self, scopes):
self.__scopes = scopes
def getXAddrs(self):
return self.__xAddrs
def setXAddrs(self, xAddrs):
self.__xAddrs = xAddrs
def getMetadataVersion(self):
return self.__metadataVersion
def setMetadataVersion(self, metadataVersion):
self.__metadataVersion = metadataVersion
def getProbeResolveMatches(self):
return self.__probeResolveMatches
def setProbeResolveMatches(self, probeResolveMatches):
self.__probeResolveMatches = probeResolveMatches
def matchScope(src, target, matchBy):
if matchBy == "" or matchBy == None or matchBy == MATCH_BY_LDAP or matchBy == MATCH_BY_URI or matchBy == MATCH_BY_UUID:
src = URI(src)
target = URI(target)
if src.getScheme().lower() != target.getScheme().lower():
return False
if src.getAuthority().lower() != target.getAuthority().lower():
return False
srcPath = src.getPathExQueryFragment()
targetPath = target.getPathExQueryFragment()
if srcPath == targetPath:
return True
elif targetPath.startswith(srcPath):
n = len(srcPath)
if targetPath[n - 1] == srcPath[n - 1] == '/':
return True
if targetPath[n] == '/':
return True
return False
else:
return False
elif matchBy == MATCH_BY_STRCMP:
return src == target
else:
return False
def matchType(type1, type2):
return type1.getFullname() == type2.getFullname()
def getNamespaceValue(node, prefix):
while node != None:
if node.nodeType == minidom.Node.ELEMENT_NODE:
attr = node.getAttributeNode("xmlns:" + prefix)
if attr != None:
return attr.nodeValue
node = node.parentNode
return ""
def getDefaultNamespace(node):
while node != None:
if node.nodeType == minidom.Node.ELEMENT_NODE:
attr = node.getAttributeNode("xmlns")
if attr != None:
return attr.nodeValue
node = node.parentNode
return ""
def getQNameFromValue(value, node):
vals = value.split(":")
ns = ""
if len(vals) == 1:
localName = vals[0]
ns = getDefaultNamespace(node)
else:
localName = vals[1]
ns = getNamespaceValue(node, vals[0])
return QName(ns, localName)
def getTypes(typeNode):
ret = []
if len(typeNode.childNodes) > 0:
items = typeNode.childNodes[0].data.split(" ")
for item in items:
item = item.strip()
if len(item) == 0:
continue
ret.append(getQNameFromValue(item, typeNode))
return ret
def getScopes(scopeNode):
ret = []
matchBy = scopeNode.getAttribute("MatchBy")
if len(scopeNode.childNodes) > 0:
items = scopeNode.childNodes[0].data.split(" ")
for item in items:
item = item.strip()
if len(item) == 0:
continue
ret.append(Scope(item, matchBy))
return ret
def getXAddrs(xAddrsNode):
ret = []
if len(xAddrsNode.childNodes) > 0:
items = xAddrsNode.childNodes[0].data.split(" ")
for item in items:
item = item.strip()
if len(item) == 0:
continue
ret.append(item)
return ret
def createSkelSoapMessage(soapAction):
doc = minidom.Document()
envEl = doc.createElementNS(NS_S, "s:Envelope")
doc.appendChild(envEl)
headerEl = doc.createElementNS(NS_S, "s:Header")
envEl.appendChild(headerEl)
addElementWithText(doc, headerEl, "a:Action", NS_A, soapAction)
bodyEl = doc.createElementNS(NS_S, "s:Body")
envEl.appendChild(bodyEl)
return doc
def addElementWithText(doc, parent, name, ns, value):
el = doc.createElementNS(ns, name)
text = doc.createTextNode(value)
el.appendChild(text)
parent.appendChild(el)
def getDocAsString(doc):
outStr = ""
stream = StringIO.StringIO(outStr)
xml.dom.ext.PrettyPrint(doc, stream)
return stream.getvalue()
def getBodyEl(doc):
return doc.getElementsByTagNameNS(NS_S, "Body")[0]
def getHeaderEl(doc):
return doc.getElementsByTagNameNS(NS_S, "Header")[0]
def getEnvEl(doc):
return doc.getElementsByTagNameNS(NS_S, "Envelope")[0]
def getRandomStr():
return "".join([random.choice(string.letters) for x in xrange(10)])
def addNSAttrToEl(el, ns, prefix):
el.setAttribute("xmlns:" + prefix, ns)
def addTypes(doc, node, types):
if types is not None and len(types) > 0:
envEl = getEnvEl(doc)
typeList = []
prefixMap = {}
for type in types:
ns = type.getNamespace()
localname = type.getLocalname()
if prefixMap.get(ns) == None:
prefix = getRandomStr()
prefixMap[ns] = prefix
else:
prefix = prefixMap.get(ns)
addNSAttrToEl(envEl, ns, prefix)
typeList.append(prefix + ":" + localname)
addElementWithText(doc, node, "d:Types", NS_D, " ".join(typeList))
def addScopes(doc, node, scopes):
if scopes is not None and len(scopes) > 0:
addElementWithText(doc, node, "d:Scopes", NS_D, " ".join([x.getValue() for x in scopes]))
if scopes[0].getMatchBy() is not None and len(scopes[0].getMatchBy()) > 0:
node.getElementsByTagNameNS(NS_D, "Scopes")[0].setAttribute("MatchBy", scopes[0].getMatchBy())
def addXAddrs(doc, node, xAddrs):
if xAddrs is not len(xAddrs) > 0:
addElementWithText(doc, node, "d:XAddrs", NS_D, " ".join([x for x in xAddrs]))
def addEPR(doc, node, epr):
eprEl = doc.createElementNS(NS_A, "a:EndpointReference")
addElementWithText(doc, eprEl, "a:Address", NS_A, epr)
node.appendChild(eprEl)
def createMulticastOutSocket():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 1)
sock.setblocking(0)
return sock
def createMulticastInSocket():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', MULTICAST_PORT))
mreq = struct.pack("4sl", socket.inet_aton(MULTICAST_IPV4_ADDRESS), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
sock.setblocking(0)
return sock
def createUnicastOutSocket():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return sock
def readMessage(sock):
try:
data, addr = sock.recvfrom(BUFFER_SIZE)
except socket.error, e:
return None
else:
return (data, addr)
def parseProbeMessage(dom):
env = SoapEnvelope()
env.setAction(ACTION_PROBE)
env.setMessageId(dom.getElementsByTagNameNS(NS_A, "MessageID")[0].firstChild.data.strip())
replyToNodes = dom.getElementsByTagNameNS(NS_A, "ReplyTo")
if len(replyToNodes) > 0:
env.setReplyTo(replyToNodes[0].firstChild.data.strip())
env.setTo(dom.getElementsByTagNameNS(NS_A, "To")[0].firstChild.data.strip())
typeNodes = dom.getElementsByTagNameNS(NS_D, "Types")
if len(typeNodes) > 0:
env.getTypes().extend(getTypes(typeNodes[0]))
scopeNodes = dom.getElementsByTagNameNS(NS_D, "Scopes")
if len(scopeNodes) > 0:
env.getScopes().extend(getScopes(scopeNodes[0]))
return env
def parseProbeMatchMessage(dom):
env = SoapEnvelope()
env.setAction(ACTION_PROBE_MATCH)
env.setMessageId(dom.getElementsByTagNameNS(NS_A, "MessageID")[0].firstChild.data.strip())
env.setRelatesTo(dom.getElementsByTagNameNS(NS_A, "RelatesTo")[0].firstChild.data.strip())
env.setTo(dom.getElementsByTagNameNS(NS_A, "To")[0].firstChild.data.strip())
appSeqNode = dom.getElementsByTagNameNS(NS_D, "AppSequence")[0]
env.setInstanceId(appSeqNode.getAttribute("InstanceId"))
env.setSequenceId(appSeqNode.getAttribute("SequenceId"))
env.setMessageNumber(appSeqNode.getAttribute("MessageNumber"))
pmNodes = dom.getElementsByTagNameNS(NS_D, "ProbeMatch")
for node in pmNodes:
epr = node.getElementsByTagNameNS(NS_A, "Address")[0].firstChild.data.strip()
types = None
typeNodes = node.getElementsByTagNameNS(NS_D, "Types")
if len(typeNodes) > 0:
types = getTypes(typeNodes[0])
scopes = None
scopeNodes = node.getElementsByTagNameNS(NS_D, "Scopes")
if len(scopeNodes) > 0:
scopes = getScopes(scopeNodes[0])
xAddrs = None
xAddrNodes = node.getElementsByTagNameNS(NS_D, "XAddrs")
if len(xAddrNodes) > 0:
xAddrs = getXAddrs(xAddrNodes[0])
mdv = node.getElementsByTagNameNS(NS_D, "MetadataVersion")[0].firstChild.data.strip()
env.getProbeResolveMatches().append(ProbeResolveMatch(epr, types, scopes, xAddrs, mdv))
return env
def parseResolveMessage(dom):
env = SoapEnvelope()
env.setAction(ACTION_RESOLVE)
env.setMessageId(dom.getElementsByTagNameNS(NS_A, "MessageID")[0].firstChild.data.strip())
replyToNodes = dom.getElementsByTagNameNS(NS_A, "ReplyTo")
if len(replyToNodes) > 0:
env.setReplyTo(replyToNodes[0].firstChild.data.strip())
env.setTo(dom.getElementsByTagNameNS(NS_A, "To")[0].firstChild.data.strip())
env.setEPR(dom.getElementsByTagNameNS(NS_A, "Address")[0].firstChild.data.strip())
return env
def parseResolveMatchMessage(dom):
env = SoapEnvelope()
env.setAction(ACTION_RESOLVE_MATCH)
env.setMessageId(dom.getElementsByTagNameNS(NS_A, "MessageID")[0].firstChild.data.strip())
env.setRelatesTo(dom.getElementsByTagNameNS(NS_A, "RelatesTo")[0].firstChild.data.strip())
env.setTo(dom.getElementsByTagNameNS(NS_A, "To")[0].firstChild.data.strip())
appSeqNode = dom.getElementsByTagNameNS(NS_D, "AppSequence")[0]
env.setInstanceId(appSeqNode.getAttribute("InstanceId"))
env.setSequenceId(appSeqNode.getAttribute("SequenceId"))
env.setMessageNumber(appSeqNode.getAttribute("MessageNumber"))
nodes = dom.getElementsByTagNameNS(NS_D, "ResolveMatch")
if len(nodes) > 0:
node = nodes[0]
epr = node.getElementsByTagNameNS(NS_A, "Address")[0].firstChild.data.strip()
typeNodes = node.getElementsByTagNameNS(NS_D, "Types")
types = []
if len(typeNodes) > 0:
types = getTypes(typeNodes[0])
scopeNodes = node.getElementsByTagNameNS(NS_D, "Scopes")
scopes = []
if len(scopeNodes) > 0:
scopes = getScopes(scopeNodes[0])
xAddrs = getXAddrs(node.getElementsByTagNameNS(NS_D, "XAddrs")[0])
mdv = node.getElementsByTagNameNS(NS_D, "MetadataVersion")[0].firstChild.data.strip()
env.getProbeResolveMatches().append(ProbeResolveMatch(epr, types, scopes, xAddrs, mdv))
return env
def parseHelloMessage(dom):
env = SoapEnvelope()
env.setAction(ACTION_HELLO)
env.setMessageId(dom.getElementsByTagNameNS(NS_A, "MessageID")[0].firstChild.data.strip())
env.setTo(dom.getElementsByTagNameNS(NS_A, "To")[0].firstChild.data.strip())
appSeqNode = dom.getElementsByTagNameNS(NS_D, "AppSequence")[0]
env.setInstanceId(appSeqNode.getAttribute("InstanceId"))
env.setSequenceId(appSeqNode.getAttribute("SequenceId"))
env.setMessageNumber(appSeqNode.getAttribute("MessageNumber"))
relatesToNodes = dom.getElementsByTagNameNS(NS_A, "RelatesTo")
if len(relatesToNodes) > 0:
env.setRelatesTo(relatesToNodes[0].firstChild.data.strip())
env.setRelationshipType(getQNameFromValue( \
relatesToNodes[0].getAttribute("RelationshipType"), relatesToNodes[0]))
env.setEPR(dom.getElementsByTagNameNS(NS_A, "Address")[0].firstChild.data.strip())
typeNodes = dom.getElementsByTagNameNS(NS_D, "Types")
if len(typeNodes) > 0:
env.setTypes(getTypes(typeNodes[0]))
scopeNodes = dom.getElementsByTagNameNS(NS_D, "Scopes")
if len(scopeNodes) > 0:
env.setScopes(getScopes(scopeNodes[0]))
xNodes = dom.getElementsByTagNameNS(NS_D, "XAddrs")
if len(xNodes) > 0:
env.setXAddrs(getXAddrs(xNodes[0]))
env.setMetadataVersion(dom.getElementsByTagNameNS(NS_D, "MetadataVersion")[0].firstChild.data.strip())
return env
def parseByeMessage(dom):
env = SoapEnvelope()
env.setAction(ACTION_BYE)
env.setMessageId(dom.getElementsByTagNameNS(NS_A, "MessageID")[0].firstChild.data.strip())
env.setTo(dom.getElementsByTagNameNS(NS_A, "To")[0].firstChild.data.strip())
appSeqNode = dom.getElementsByTagNameNS(NS_D, "AppSequence")[0]
env.setInstanceId(appSeqNode.getAttribute("InstanceId"))
env.setSequenceId(appSeqNode.getAttribute("SequenceId"))
env.setMessageNumber(appSeqNode.getAttribute("MessageNumber"))
env.setEPR(dom.getElementsByTagNameNS(NS_A, "Address")[0].firstChild.data.strip())
return env
def parseEnvelope(data):
dom = minidom.parseString(data)
soapAction = dom.getElementsByTagNameNS(NS_A, "Action")[0].firstChild.data.strip()
if soapAction == ACTION_PROBE:
return parseProbeMessage(dom)
elif soapAction == ACTION_PROBE_MATCH:
return parseProbeMatchMessage(dom)
elif soapAction == ACTION_RESOLVE:
return parseResolveMessage(dom)
elif soapAction == ACTION_RESOLVE_MATCH:
return parseResolveMatchMessage(dom)
elif soapAction == ACTION_BYE:
return parseByeMessage(dom)
elif soapAction == ACTION_HELLO:
return parseHelloMessage(dom)
def sendMessage(sock, addr, port, data):
sock.sendto(data, (addr, port))
def createMessage(env):
if env.getAction() == ACTION_PROBE:
return createProbeMessage(env)
if env.getAction() == ACTION_PROBE_MATCH:
return createProbeMatchMessage(env)
if env.getAction() == ACTION_RESOLVE:
return createResolveMessage(env)
if env.getAction() == ACTION_RESOLVE_MATCH:
return createResolveMatchMessage(env)
if env.getAction() == ACTION_HELLO:
return createHelloMessage(env)
if env.getAction() == ACTION_BYE:
return createByeMessage(env)
def createProbeMessage(env):
doc = createSkelSoapMessage(ACTION_PROBE)
bodyEl = getBodyEl(doc)
headerEl = getHeaderEl(doc)
addElementWithText(doc, headerEl, "a:MessageID", NS_A, env.getMessageId())
addElementWithText(doc, headerEl, "a:To", NS_A, env.getTo())
if len(env.getReplyTo()) > 0:
addElementWithText(doc, headerEl, "a:ReplyTo", NS_A, env.getReplyTo())
probeEl = doc.createElementNS(NS_D, "d:Probe")
bodyEl.appendChild(probeEl)
addTypes(doc, probeEl, env.getTypes())
addScopes(doc, probeEl, env.getScopes())
return getDocAsString(doc)
def createProbeMatchMessage(env):
doc = createSkelSoapMessage(ACTION_PROBE_MATCH)
bodyEl = getBodyEl(doc)
headerEl = getHeaderEl(doc)
addElementWithText(doc, headerEl, "a:MessageID", NS_A, env.getMessageId())
addElementWithText(doc, headerEl, "a:RelatesTo", NS_A, env.getRelatesTo())
addElementWithText(doc, headerEl, "a:To", NS_A, env.getTo())
appSeqEl = doc.createElementNS(NS_D, "d:AppSequence")
appSeqEl.setAttribute("InstanceId", env.getInstanceId())
appSeqEl.setAttribute("MessageNumber", env.getMessageNumber())
headerEl.appendChild(appSeqEl)
probeMatchesEl = doc.createElementNS(NS_D, "d:ProbeMatches")
probeMatches = env.getProbeResolveMatches()
for probeMatch in probeMatches:
probeMatchEl = doc.createElementNS(NS_D, "d:ProbeMatch")
addEPR(doc, probeMatchEl, probeMatch.getEPR())
addTypes(doc, probeMatchEl, probeMatch.getTypes())
addScopes(doc, probeMatchEl, probeMatch.getScopes())
addXAddrs(doc, probeMatchEl, probeMatch.getXAddrs())
addElementWithText(doc, probeMatchEl, "d:MetadataVersion", NS_D, probeMatch.getMetadataVersion())
probeMatchesEl.appendChild(probeMatchEl)
bodyEl.appendChild(probeMatchesEl)
return getDocAsString(doc)
def createResolveMessage(env):
doc = createSkelSoapMessage(ACTION_RESOLVE)
bodyEl = getBodyEl(doc)
headerEl = getHeaderEl(doc)
addElementWithText(doc, headerEl, "a:MessageID", NS_A, env.getMessageId())
addElementWithText(doc, headerEl, "a:To", NS_A, env.getTo())
if len(env.getReplyTo()) > 0:
addElementWithText(doc, headerEl, "a:ReplyTo", NS_A, env.getReplyTo())
resolveEl = doc.createElementNS(NS_D, "d:Resolve")
addEPR(doc, resolveEl, env.getEPR())
bodyEl.appendChild(resolveEl)
return getDocAsString(doc)
def createResolveMatchMessage(env):
doc = createSkelSoapMessage(ACTION_RESOLVE_MATCH)
bodyEl = getBodyEl(doc)
headerEl = getHeaderEl(doc)
addElementWithText(doc, headerEl, "a:MessageID", NS_A, env.getMessageId())
addElementWithText(doc, headerEl, "a:RelatesTo", NS_A, env.getRelatesTo())
addElementWithText(doc, headerEl, "a:To", NS_A, env.getTo())
appSeqEl = doc.createElementNS(NS_D, "d:AppSequence")
appSeqEl.setAttribute("InstanceId", env.getInstanceId())
appSeqEl.setAttribute("MessageNumber", env.getMessageNumber())
headerEl.appendChild(appSeqEl)
resolveMatchesEl = doc.createElementNS(NS_D, "d:ResolveMatches")
if len(env.getProbeResolveMatches()) > 0:
resolveMatch = env.getProbeResolveMatches()[0]
resolveMatchEl = doc.createElementNS(NS_D, "d:ResolveMatch")
addEPR(doc, resolveMatchEl, resolveMatch.getEPR())
addTypes(doc, resolveMatchEl, resolveMatch.getTypes())
addScopes(doc, resolveMatchEl, resolveMatch.getScopes())
addXAddrs(doc, resolveMatchEl, resolveMatch.getXAddrs())
addElementWithText(doc, resolveMatchEl, "d:MetadataVersion", NS_D, resolveMatch.getMetadataVersion())
resolveMatchesEl.appendChild(resolveMatchEl)
bodyEl.appendChild(resolveMatchesEl)
return getDocAsString(doc)
def createHelloMessage(env):
doc = createSkelSoapMessage(ACTION_HELLO)
bodyEl = getBodyEl(doc)
headerEl = getHeaderEl(doc)
addElementWithText(doc, headerEl, "a:MessageID", NS_A, env.getMessageId())
if len(env.getRelatesTo()) > 0:
addElementWithText(doc, headerEl, "a:RelatesTo", NS_A, env.getRelatesTo())
relatesToEl = headerEl.getElementsByTagNameNS(NS_A, "RelatesTo")[0]
relatesToEl.setAttribute("RelationshipType", "d:Suppression")
addElementWithText(doc, headerEl, "a:To", NS_A, env.getTo())
appSeqEl = doc.createElementNS(NS_D, "d:AppSequence")
appSeqEl.setAttribute("InstanceId", env.getInstanceId())
appSeqEl.setAttribute("MessageNumber", env.getMessageNumber())
headerEl.appendChild(appSeqEl)
helloEl = doc.createElementNS(NS_D, "d:Hello")
addEPR(doc, helloEl, env.getEPR())
addTypes(doc, helloEl, env.getTypes())
addScopes(doc, helloEl, env.getScopes())
addXAddrs(doc, helloEl, env.getXAddrs())
addElementWithText(doc, helloEl, "d:MetadataVersion", NS_D, env.getMetadataVersion())
bodyEl.appendChild(helloEl)
return getDocAsString(doc)
def createByeMessage(env):
doc = createSkelSoapMessage(ACTION_BYE)
bodyEl = getBodyEl(doc)
headerEl = getHeaderEl(doc)
addElementWithText(doc, headerEl, "a:MessageID", NS_A, env.getMessageId())
addElementWithText(doc, headerEl, "a:To", NS_A, env.getTo())
appSeqEl = doc.createElementNS(NS_D, "d:AppSequence")
appSeqEl.setAttribute("InstanceId", env.getInstanceId())
appSeqEl.setAttribute("MessageNumber", env.getMessageNumber())
headerEl.appendChild(appSeqEl)
byeEl = doc.createElementNS(NS_D, "d:Bye")
addEPR(doc, byeEl, env.getEPR())
bodyEl.appendChild(byeEl)
return getDocAsString(doc)
def extractSoapUdpAddressFromURI(uri):
val = uri.getPathExQueryFragment().split(":")
part1 = val[0][2:]
part2 = None
if val[1].count('/') > 0:
part2 = int(val[1][:val[1].index('/')])
else:
part2 = int(val[1])
addr = [part1, part2]
return addr
class MessageReceiverThread(threading.Thread):
def __init__(self, sock, midMap, iidMap, observer):
self.__sock = sock
self.__midMap = midMap
self.__iidMap = iidMap
self.__observer = observer
self.__stop = False
threading.Thread.__init__(self)
self.setDaemon(True)
def run(self):
while not self.__stop:
val = readMessage(self.__sock)
if val is None:
time.sleep(0.01)
continue
(data, addr) = val
env = parseEnvelope(data)
mid = env.getMessageId()
if self.__midMap.has_key(mid):
continue
else:
self.__midMap[mid] = 0
iid = env.getInstanceId()
mid = env.getMessageId()
if iid > 0:
mnum = env.getMessageNumber()
key = addr[0] + ":" + str(addr[1]) + ":" + str(iid)
if mid is not None and len(mid) > 0:
key = key + ":" + mid
if not self.__iidMap.has_key(key):
self.__iidMap[key] = iid
else:
tmnum = self.__iidMap[key]
if mnum > tmnum:
self.__iidMap[key] = mnum
else:
continue
self.__observer.envReceived(env, addr)
def stop(self):
self.__stop = True
class MessageSenderThread(threading.Thread):
def __init__(self, sock, midMap, udpRepeat, udpMinDelay, udpMaxDelay, udpUpperDelay):
self.__sock = sock
self.__midMap = midMap
self.__udpRepeat = udpRepeat
self.__udpMinDelay = udpMinDelay
self.__udpMaxDelay = udpMaxDelay
self.__udpUpperDelay = udpUpperDelay
self.__stop = False
self.__queue = []
threading.Thread.__init__(self)
self.setDaemon(True)
def getUdpRepeat(self):
return self.__udpRepeat
def setUdpRepeat(self, udpRepeat):
self.__udpRepeat = udpRepeat
def getUdpMinDelay(self):
return self.__udpMinDelay
def setUdpMinDelay(self, udpMinDelay):
self.__udpMinDelay = udpMinDelay
def getUdpMaxDelay(self):
return self.__udpMaxDelay
def setUdpMaxDelay(self, udpMaxDelay):
self.__udpMaxDelay = udpMaxDelay
def getUdpUpperDelay(self):
return self.__udpUpperDelay
def setUdpUpperDelay(self, udpUpperDelay):
self.__udpUpperDelay = udpUpperDelay
def addMessage(self, env, addr, port, initialDelay=0):
msg = Message(env, addr, port, self.__udpRepeat, \
self.__udpMinDelay, self.__udpMaxDelay, self.__udpUpperDelay, initialDelay)
self.__queue.append(msg)
self.__midMap[env.getMessageId()] = 0
def stop(self):
self.__stop = True
def run(self):
while not self.__stop or len(self.__queue) > 0:
if len(self.__queue) == 0:
time.sleep(0.1)
continue
msg = self.__queue.pop(0)
if msg.canSend():
data = createMessage(msg.getEnv())
sendMessage(self.__sock, msg.getAddr(), msg.getPort(), data)
msg.refresh()
if not (msg.isFinished()):
self.__queue.append(msg)
else:
self.__queue.append(msg)
time.sleep(0.01)
class Message:
def __init__(self, env, addr, port, udpRepeat, udpMinDelay, udpMaxDelay, udpUpperDelay, initialDelay=0):
self.__env = env
self.__addr = addr
self.__port = port
self.__udpRepeat = udpRepeat
self.__udpUpperDelay = udpUpperDelay
self.__t = (udpMinDelay + ((udpMaxDelay - udpMinDelay) * random.random())) / 2
self.__nextTime = int(time.time() * 1000) + initialDelay
def getEnv(self):
return self.__env
def getAddr(self):
return self.__addr
def getPort(self):
return self.__port
def isFinished(self):
return self.__udpRepeat <= 0
def canSend(self):
ct = int(time.time() * 1000)
return self.__nextTime < ct
def refresh(self):
self.__t = self.__t * 2
if self.__t > self.__udpUpperDelay:
self.__t = self.__udpUpperDelay
self.__nextTime = int(time.time() * 1000) + self.__t
self.__udpRepeat = self.__udpRepeat - 1
class Service:
def __init__(self, types, scopes, xAddrs, epr, instanceId):
self.__types = types
self.__scopes = scopes
self.__xAddrs = xAddrs
self.__epr = epr
self.__instanceId = instanceId
self.__messageNumber = 0
self.__metadataVersion = 1
def getTypes(self):
return self.__types
def setTypes(self, types):
self.__types = types
def getScopes(self):
return self.__scopes
def setScopes(self, scopes):
self.__scopes = scopes
def getXAddrs(self):
return self.__xAddrs
def setXAddrs(self, xAddrs):
self.__xAddrs = xAddrs
def getEPR(self):
return self.__epr
def setEPR(self, epr):
self.__epr = epr
def getInstanceId(self):
return self.__instanceId
def setInstanceId(self, instanceId):
self.__instanceId = instanceId
def getMessageNumber(self):
return self.__messageNumber
def setMessageNumber(self, messageNumber):
self.__messageNumber = messageNumber
def getMetadataVersion(self):
return self.__metadataVersion
def setMetadataVersion(self, metadataVersion):
self.__metadataVersion = metadataVersion
def incrementMessageNumber(self):
self.__messageNumber = self.__messageNumber + 1
class WSDiscovery:
def __init__(self):
self.__sockMultiOut = None
self.__sockMultiIn = None
self.__sockUniOut = None
self.__multicastSenderThread = None
self.__multicastReceiverThread = None
self.__unicastSenderThread = None
self.__unicastReceiverThread = None
self.__serverStarted = False
self.__remoteServices = {}
self.__localServices = {}
self.__dpActive = False
self.__dpAddr = None
self.__dpEPR = None
def __addRemoteService(self, types, scopes, xAddrs, epr):
service = Service(types, scopes, xAddrs, epr, 0)
self.__remoteServices[epr] = service
def __removeRemoteService(self, epr):
if self.__remoteServices.has_key(epr):
del self.__remoteServices[epr]
def handleEnv(self, env, addr):
if (env.getAction() == ACTION_PROBE_MATCH):
for match in env.getProbeResolveMatches():
self.__addRemoteService(match.getTypes(), match.getScopes(), match.getXAddrs(), match.getEPR())
if match.getXAddrs() is None or len(match.getXAddrs()) == 0:
self.__sendResolve(match.getEPR())
elif env.getAction() == ACTION_RESOLVE_MATCH:
for match in env.getProbeResolveMatches():
self.__addRemoteService(match.getTypes(), match.getScopes(), match.getXAddrs(), match.getEPR())
elif env.getAction() == ACTION_PROBE:
services = self.__filterServices(self.__localServices.values(), env.getTypes(), env.getScopes())
self.__sendProbeMatch(services, env.getMessageId(), addr)
elif env.getAction() == ACTION_RESOLVE:
if self.__localServices.has_key(env.getEPR()):
service = self.__localServices[env.getEPR()]
self.__sendResolveMatch(service, env.getMessageId(), addr)
elif env.getAction() == ACTION_HELLO:
#check if it is from a discovery proxy
rt = env.getRelationshipType()
if rt is not None and rt.getLocalname() == "Suppression" and rt.getNamespace() == NS_D:
xAddr = env.getXAddrs()[0]
#only support 'soap.udp'
if xAddr.startswith("soap.udp:"):
self.__dpActive = True
self.__dpAddr = extractSoapUdpAddressFromURI(URI(xAddr))
self.__dpEPR = env.getEPR()
self.__addRemoteService(env.getTypes(), env.getScopes(), env.getXAddrs(), env.getEPR())
elif env.getAction() == ACTION_BYE:
#if the bye is from discovery proxy... revert back to multicasting
if self.__dpActive and self.__dpEPR == env.getEPR():
self.__dpActive = False
self.__dpAddr = None
self.__dpEPR = None
self.__removeRemoteService(env.getEPR())
def envReceived(self, env, addr):
thread.start_new_thread(self.handleEnv, (env, addr))
def __sendResolveMatch(self, service, relatesTo, addr):
service.incrementMessageNumber()
env = SoapEnvelope()
env.setAction(ACTION_RESOLVE_MATCH)
env.setTo(ADDRESS_UNKNOWN)
env.setMessageId(uuid.uuid4().get_urn())
env.setInstanceId(str(service.getInstanceId()))
env.setMessageNumber(str(service.getMessageNumber()))
env.setRelatesTo(relatesTo)
env.getProbeResolveMatches().append(ProbeResolveMatch(service.getEPR(), \
service.getTypes(), service.getScopes(), \
service.getXAddrs(), str(service.getMetadataVersion())))
self.__unicastSenderThread.addMessage(env, addr[0], addr[1])
def __sendProbeMatch(self, services, relatesTo, addr):
env = SoapEnvelope()
env.setAction(ACTION_PROBE_MATCH)
env.setTo(ADDRESS_UNKNOWN)
env.setMessageId(uuid.uuid4().get_urn())
random.seed((int)(time.time() * 1000000))
env.setInstanceId(str(random.randint(1, 0xFFFFFFF)))
env.setMessageNumber("1")
env.setRelatesTo(relatesTo)
for service in services:
env.getProbeResolveMatches().append(ProbeResolveMatch(service.getEPR(), \
service.getTypes(), service.getScopes(), \
service.getXAddrs(), str(service.getMetadataVersion())))
self.__unicastSenderThread.addMessage(env, addr[0], addr[1], random.randint(0, APP_MAX_DELAY))
def __sendProbe(self, types=None, scopes=None):
env = SoapEnvelope()
env.setAction(ACTION_PROBE)
env.setTo(ADDRESS_ALL)
env.setMessageId(uuid.uuid4().get_urn())
env.setTypes(types)
env.setScopes(scopes)
if self.__dpActive:
self.__unicastSenderThread.addMessage(env, self.__dpAddr[0], self.__dpAddr[1])
else:
self.__multicastSenderThread.addMessage(env, MULTICAST_IPV4_ADDRESS, MULTICAST_PORT)
def __sendResolve(self, epr):
env = SoapEnvelope()
env.setAction(ACTION_RESOLVE)
env.setTo(ADDRESS_ALL)
env.setMessageId(uuid.uuid4().get_urn())
env.setEPR(epr)
if self.__dpActive:
self.__unicastSenderThread.addMessage(env, self.__dpAddr[0], self.__dpAddr[1])
else:
self.__multicastSenderThread.addMessage(env, MULTICAST_IPV4_ADDRESS, MULTICAST_PORT)
def __sendHello(self, service):
service.incrementMessageNumber()
env = SoapEnvelope()
env.setAction(ACTION_HELLO)
env.setTo(ADDRESS_ALL)
env.setMessageId(uuid.uuid4().get_urn())
env.setInstanceId(str(service.getInstanceId()))
env.setMessageNumber(str(service.getMessageNumber()))
env.setTypes(service.getTypes())
env.setScopes(service.getScopes())
env.setXAddrs(service.getXAddrs())
env.setEPR(service.getEPR())
random.seed((int)(time.time() * 1000000))
self.__multicastSenderThread.addMessage(env, MULTICAST_IPV4_ADDRESS, MULTICAST_PORT, random.randint(0, APP_MAX_DELAY))
def __sendBye(self, service):
env = SoapEnvelope()
env.setAction(ACTION_BYE)
env.setTo(ADDRESS_ALL)
env.setMessageId(uuid.uuid4().get_urn())
env.setInstanceId(str(service.getInstanceId()))
env.setMessageNumber(str(service.getMessageNumber()))
env.setEPR(service.getEPR())
service.incrementMessageNumber()
self.__multicastSenderThread.addMessage(env, MULTICAST_IPV4_ADDRESS, MULTICAST_PORT)
def start(self):
'start the discovery server - should be called before using other functions'
self.__startThreads()
self.__serverStarted = True
def stop(self):
'cleans up and stops the discovery server'
self.clearRemoteServices()
self.clearLocalServices()
self.__stopThreads()
self.__serverStarted = False
def __startThreads(self):
if self.__multicastSenderThread is not None:
return
self.__sockMultiOut = createMulticastOutSocket()
self.__sockMultiIn = createMulticastInSocket()
self.__sockUniOut = createUnicastOutSocket()
iidMap = {}
midMap = {}
self.__multicastSenderThread = MessageSenderThread(self.__sockMultiOut, midMap, \
MULTICAST_UDP_REPEAT, MULTICAST_UDP_MIN_DELAY, \
MULTICAST_UDP_MAX_DELAY, MULTICAST_UDP_UPPER_DELAY)
self.__multicastSenderThread.start()
self.__unicastSenderThread = MessageSenderThread(self.__sockUniOut, midMap, \
UNICAST_UDP_REPEAT, UNICAST_UDP_MIN_DELAY, \
UNICAST_UDP_MAX_DELAY, UNICAST_UDP_UPPER_DELAY)
self.__unicastSenderThread.start()
self.__multicastReceiverThread = MessageReceiverThread(self.__sockMultiIn, midMap, iidMap, self)
self.__multicastReceiverThread.start()
self.__unicastReceiverThread = MessageReceiverThread(self.__sockMultiOut, midMap, iidMap, self)
self.__unicastReceiverThread.start()
def __stopThreads(self):
if self.__multicastSenderThread is None:
return
self.__unicastReceiverThread.stop()
self.__unicastReceiverThread.join()
self.__multicastReceiverThread.stop()
self.__multicastReceiverThread.join()
self.__unicastSenderThread.stop()
self.__unicastSenderThread.join()
self.__multicastSenderThread.stop()
self.__multicastSenderThread.join()
self.__sockMultiOut.close()
self.__sockMultiIn.close()
self.__sockUniOut.close()
self.__sockMultiOut = None
self.__sockMultiIn = None
self.__sockUniOut = None
self.__multicastSenderThread = None
self.__multicastReceiverThread = None
self.__unicastSenderThread = None
self.__unicastReceiverThread = None
def __isTypeInList(self, ttype, types):
for entry in types:
if matchType(ttype, entry):
return True
return False
def __isScopeInList(self, scope, scopes):
for entry in scopes:
if matchScope(scope.getValue(), entry.getValue(), scope.getMatchBy()):
return True
return False
def __filterServices(self, services, types, scopes):
ret = []
ok = True
for service in services:
ok = True
if types is not None:
for ttype in types:
if not self.__isTypeInList(ttype, service.getTypes()):
ok = False
break
if ok and scopes is not None:
for scope in scopes:
if not self.__isScopeInList(scope, service.getScopes()):
ok = False
break
if ok:
ret.append(service)
return ret
def clearRemoteServices(self):
'clears remotely discovered services'
self.__remoteServices.clear()
def clearLocalServices(self):
'send Bye messages for the services and remove them'
for service in self.__localServices.values():
self.__sendBye(service)
self.__localServices.clear()
def searchServices(self, types=None, scopes=None, timeout=3):
'search for services given the TYPES and SCOPES in a given TIMEOUT'
if not self.__serverStarted:
raise Exception("Server not started")
self.__sendProbe(types, scopes)
time.sleep(timeout)
return self.__filterServices(self.__remoteServices.values(), types, scopes)
def publishService(self, types, scopes, xAddrs):
'publish a service with the given TYPES, SCOPES and XAddrs (service addresses)'
if not self.__serverStarted:
raise Exception("Server not started")
instanceId = (int) (time.time() * 1000000)
epr = uuid.uuid4().get_urn()
service = Service(types, scopes, xAddrs, epr, instanceId)
self.__localServices[epr] = service
self.__sendHello(service)
time.sleep(0.001)
def showEnv(env):
print "-----------------------------"
print "Action: %s" % env.getAction()
print "MessageId: %s" % env.getMessageId()
print "InstanceId: %s" % env.getInstanceId()
print "MessageNumber: %s" % env.getMessageNumber()
print "Reply To: %s" % env.getReplyTo()
print "To: %s" % env.getTo()
print "RelatesTo: %s" % env.getRelatesTo()
print "Relationship Type: %s" % env.getRelationshipType()
print "Types: %s" % env.getTypes()
print "Scopes: %s" % env.getScopes()
print "EPR: %s" % env.getEPR()
print "Metadata Version: %s" % env.getMetadataVersion()
print "Probe Matches: %s" % env.getProbeResolveMatches()
print "-----------------------------"
if __name__ == "__main__":
wsd = WSDiscovery()
wsd.start()
ttype = QName("abc", "def")
ttype1 = QName("namespace", "myTestService")
scope1 = Scope("http://myscope")
ttype2 = QName("namespace", "myOtherTestService_type1")
scope2 = Scope("http://other_scope")
xAddr = "localhost:8080/abc"
wsd.publishService(types=[ttype], scopes=[scope2], xAddrs=[xAddr])
#ret = wsd.searchServices(scopes=[scope1], timeout=10)
ret = wsd.searchServices()
for service in ret:
print service.getEPR() + ":" + service.getXAddrs()[0]
wsd.stop()
|
MyPeWa/python-ws-discovery
|
WSDiscovery.py
|
Python
|
lgpl-3.0
| 46,468
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_common_layer_protocol_name import TapiCommonLayerProtocolName # noqa: F401,E501
from tapi_server.models.tapi_common_name_and_value import TapiCommonNameAndValue # noqa: F401,E501
from tapi_server.models.tapi_oam_mip import TapiOamMip # noqa: F401,E501
from tapi_server.models.tapi_odu_mip_augmentation1 import TapiOduMipAugmentation1 # noqa: F401,E501
from tapi_server.models.tapi_odu_odu_mip_spec import TapiOduOduMipSpec # noqa: F401,E501
from tapi_server import util
class TapiOamMegMip(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, name=None, local_id=None, layer_protocol_name=None, odu_mip_spec=None): # noqa: E501
"""TapiOamMegMip - a model defined in OpenAPI
:param name: The name of this TapiOamMegMip. # noqa: E501
:type name: List[TapiCommonNameAndValue]
:param local_id: The local_id of this TapiOamMegMip. # noqa: E501
:type local_id: str
:param layer_protocol_name: The layer_protocol_name of this TapiOamMegMip. # noqa: E501
:type layer_protocol_name: TapiCommonLayerProtocolName
:param odu_mip_spec: The odu_mip_spec of this TapiOamMegMip. # noqa: E501
:type odu_mip_spec: TapiOduOduMipSpec
"""
self.openapi_types = {
'name': List[TapiCommonNameAndValue],
'local_id': str,
'layer_protocol_name': TapiCommonLayerProtocolName,
'odu_mip_spec': TapiOduOduMipSpec
}
self.attribute_map = {
'name': 'name',
'local_id': 'local-id',
'layer_protocol_name': 'layer-protocol-name',
'odu_mip_spec': 'odu-mip-spec'
}
self._name = name
self._local_id = local_id
self._layer_protocol_name = layer_protocol_name
self._odu_mip_spec = odu_mip_spec
@classmethod
def from_dict(cls, dikt) -> 'TapiOamMegMip':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.oam.meg.Mip of this TapiOamMegMip. # noqa: E501
:rtype: TapiOamMegMip
"""
return util.deserialize_model(dikt, cls)
@property
def name(self):
"""Gets the name of this TapiOamMegMip.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:return: The name of this TapiOamMegMip.
:rtype: List[TapiCommonNameAndValue]
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TapiOamMegMip.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:param name: The name of this TapiOamMegMip.
:type name: List[TapiCommonNameAndValue]
"""
self._name = name
@property
def local_id(self):
"""Gets the local_id of this TapiOamMegMip.
none # noqa: E501
:return: The local_id of this TapiOamMegMip.
:rtype: str
"""
return self._local_id
@local_id.setter
def local_id(self, local_id):
"""Sets the local_id of this TapiOamMegMip.
none # noqa: E501
:param local_id: The local_id of this TapiOamMegMip.
:type local_id: str
"""
self._local_id = local_id
@property
def layer_protocol_name(self):
"""Gets the layer_protocol_name of this TapiOamMegMip.
:return: The layer_protocol_name of this TapiOamMegMip.
:rtype: TapiCommonLayerProtocolName
"""
return self._layer_protocol_name
@layer_protocol_name.setter
def layer_protocol_name(self, layer_protocol_name):
"""Sets the layer_protocol_name of this TapiOamMegMip.
:param layer_protocol_name: The layer_protocol_name of this TapiOamMegMip.
:type layer_protocol_name: TapiCommonLayerProtocolName
"""
self._layer_protocol_name = layer_protocol_name
@property
def odu_mip_spec(self):
"""Gets the odu_mip_spec of this TapiOamMegMip.
:return: The odu_mip_spec of this TapiOamMegMip.
:rtype: TapiOduOduMipSpec
"""
return self._odu_mip_spec
@odu_mip_spec.setter
def odu_mip_spec(self, odu_mip_spec):
"""Sets the odu_mip_spec of this TapiOamMegMip.
:param odu_mip_spec: The odu_mip_spec of this TapiOamMegMip.
:type odu_mip_spec: TapiOduOduMipSpec
"""
self._odu_mip_spec = odu_mip_spec
|
karthik-sethuraman/ONFOpenTransport
|
RI/flask_server/tapi_server/models/tapi_oam_meg_mip.py
|
Python
|
apache-2.0
| 5,067
|
#!/usr/bin/env python
import argparse
import plistlib
import json
import os
import sys
valid_bumps = ['major', 'minor', 'bugfix']
#
# Argparse
#
parser = argparse.ArgumentParser(description='Bump version in xcode project and podspec')
parser.add_argument('bump', type=str, nargs='?', default=valid_bumps[2], choices=valid_bumps, help='Which part of semantic verison to bump default: bugfix')
parser.add_argument('--version', dest='version', help='explicitly set a version, ignores semver specified')
parser.add_argument('--tag', action='store_true', help='tag the git repository with new version')
# parser.add_argument('--no-build', action='store_true', help='doesn\'t increment the build number')
parser.add_argument('-p', metavar='plist', help='plist file to operate on')
parser.add_argument('-s', metavar='podspec', help='podspec file to operate on')
args = parser.parse_args()
def error(msg):
print("=== Error ===\n%s\n=============\n" % msg)
parser.print_help()
sys.exit(-1)
project_name = None
podspec = args.s
plist = args.p
if (plist is None) or (podspec is None):
for element in os.listdir('.'):
if 'xcodeproj' in element:
project_name = element.split('.')[0]
break
if project_name is None:
error('Your not in a Xcode project directory, cannot find xcodeproj\nPlease specify Info.plist and podspec.json to operate on')
if plist is None:
plist = "%s/%s-Info.plist" % (project_name, project_name)
if podspec is None:
podspec = "%s.podspec.json" % project_name
# check files
if not os.path.isfile(plist):
error('plist: %s file not found' % plist)
if not os.path.isfile(podspec):
error('podspec: %s file not found' % podspec)
# read plist
plist_d = plistlib.readPlist(plist)
# read podspec
f = open(podspec, 'r')
podspec_d = json.load(f)
f.close()
current_version = plist_d['CFBundleShortVersionString']
new_version = current_version
print('Current Version is %s' % current_version)
if args.version is None:
version_split = current_version.split('.')
if args.bump == 'bugfix':
version_split[2] = str(int(version_split[2]) + 1)
elif args.bump == 'minor':
version_split[1] = str(int(version_split[1]) + 1)
version_split[2] = '0'
elif args.bump == 'major':
version_split[0] = str(int(version_split[0]) + 1)
version_split[1] = '0'
version_split[2] = '0'
print(version_split)
new_version = '.'.join(version_split)
else:
new_version = args.version
print('New Version will be %s' % new_version)
# edit objects
podspec_d['version'] = new_version
podspec_d['source']['tag'] = new_version
plist_d['CFBundleShortVersionString'] = new_version
# Saving files
print("Saving files...")
try:
with open(podspec, 'w') as fp:
json.dump(podspec_d, fp, sort_keys=False, indent=4, separators=(',', ': '))
plistlib.writePlist(plist_d, plist)
except Exception, e:
error(e.message)
print('OK')
|
mogui/MDWamp
|
bump.py
|
Python
|
apache-2.0
| 2,994
|