text stringlengths 4 1.02M | meta dict |
|---|---|
import random
from go_board import GoBoard
class GoGame:
def __init__( self, size = 0 ):
self.size = size
self.history = [ GoBoard( size ) ]
self.whose_turn = GoBoard.WHITE
self.consecutive_pass_count = 0
self.captures = { GoBoard.WHITE : 0, GoBoard.BLACK : 0 }
def CurrentBoard( self ):
return self.history[ len( self.history ) - 1 ]
def OpponentOf( self, for_who ):
return GoBoard.WHITE if for_who == GoBoard.BLACK else GoBoard.BLACK
def PlaceStone( self, i, j ):
opponent = self.OpponentOf( self.whose_turn )
# If the given coordinates are out of range, then we consider it a pass.
if i >= 0 and i < self.size and j >= 0 and j < self.size:
board = self.CurrentBoard()
if board.matrix[i][j] != GoBoard.EMPTY:
raise Exception( 'Cannot place a stone where one already exists.' )
board = board.Clone()
board.matrix[i][j] = self.whose_turn
group_list = board.AnalyzeGroups( opponent )
for group in group_list:
if group[ 'liberties' ] == 0:
for location in group[ 'location_list' ]:
board.SetState( location, GoBoard.EMPTY )
self.captures[ self.whose_turn ] += 1
group_list = board.AnalyzeGroups( self.whose_turn )
for group in group_list:
if group[ 'liberties' ] == 0:
raise Exception( 'Cannot commit suicide.' )
if len( self.history ) >= 2 and self.history[ len( self.history ) - 2 ] == board:
raise Exception( 'Cannot repeat board state so soon.' )
self.history.append( board )
self.consecutive_pass_count = 0
if len( self.history ) > 10:
self.history.pop(0)
else:
self.consecutive_pass_count += 1
self.whose_turn = opponent
def RelinquishStone( self, i, j ):
opponent = self.OpponentOf( self.whose_turn )
if i < 0 or i >= self.size or j < 0 or j >= self.size:
raise Exception( 'Board location bogus: (%d,%d).' % ( i, j ) )
board = self.CurrentBoard()
if board.GetState( ( i, j ) ) != self.whose_turn:
raise Exception( 'Can only relinquish your own stones.' )
board.SetState( ( i, j ), GoBoard.EMPTY )
self.captures[ opponent ] += 1
# Notice that we do not change whose turn it is.
def CalculateScores( self ):
territory, group_list = self.CurrentBoard().CalculateTerritory()
scores = {
GoBoard.WHITE : {
'score' : territory[ GoBoard.WHITE ] + self.captures[ GoBoard.WHITE ],
'territory' : territory[ GoBoard.WHITE ],
'captures' : self.captures[ GoBoard.WHITE ]
},
GoBoard.BLACK : {
'score' : territory[ GoBoard.BLACK ] + self.captures[ GoBoard.BLACK ],
'territory' : territory[ GoBoard.BLACK ],
'captures' : self.captures[ GoBoard.BLACK ]
}
}
return scores
def CalculateReasonableMove( self ):
# This is a quite laughable proposition as only programs like Google's DeepMind AlphaGo
# have been able to master the game of Go. I can't help, however, but try to offer some
# kind of support for the idea of the computer trying to take a somewhat reasonable turn.
# What we do here is simple. We simply try to evaluate every possible move to see which
# appears to be most immediately advantageous. Of course, there is no thinking ahead here,
# so this can't be all that great.
whose_turn = self.whose_turn
opponent = self.OpponentOf( self.whose_turn )
consecutive_pass_count = self.consecutive_pass_count
captures = {
whose_turn : self.captures[ whose_turn ],
opponent : self.captures[ opponent ]
}
board = self.CurrentBoard()
current_territory = board.CalculateTerritory()[0]
current_group_list_stats = {
whose_turn : self.CalculateGroupListStats( board.AnalyzeGroups( whose_turn ) ),
opponent : self.CalculateGroupListStats( board.AnalyzeGroups( opponent ) ),
}
best_move = None
best_rank = 0
for location in board.AllLocationsOfState( GoBoard.EMPTY ):
self.history.append( board.Clone() )
try:
self.PlaceStone( location[0], location[1] )
territory = self.CurrentBoard().CalculateTerritory()[0]
group_list_stats = {
whose_turn : self.CalculateGroupListStats( self.CurrentBoard().AnalyzeGroups( whose_turn ) ),
opponent : self.CalculateGroupListStats( self.CurrentBoard().AnalyzeGroups( opponent ) )
}
rank = 0
if territory[ whose_turn ] > current_territory[ whose_turn ]:
rank += ( territory[ whose_turn ] - current_territory[ whose_turn ] ) * 10
if territory[ opponent ] < current_territory[ opponent ]:
rank += ( current_territory[ opponent ] - territory[ opponent ] ) * 9
if self.captures[ whose_turn ] > captures[ whose_turn ]:
rank += ( self.captures[ whose_turn ] - captures[ whose_turn ] ) * 8
if group_list_stats[ whose_turn ][ 'jeopardy_count ' ] < current_group_list_stats[ whose_turn ][ 'jeopardy_count' ]:
rank += ( current_group_list_stats[ whose_turn ][ 'jeopardy_count' ] - group_list_stats[ whose_turn ][ 'jeopardy_count ' ] ) * 7
if group_list_stats[ whose_turn ][ 'largest_group' ] > current_group_list_stats[ whose_turn ][ 'largest_group' ]:
rank += ( group_list_stats[ whose_turn ][ 'largest_group' ] - current_group_list_stats[ whose_turn ][ 'largest_group' ] ) * 6
if group_list_stats[ opponent ][ 'total_liberties' ] < current_group_list_stats[ opponent ][ 'total_liberties' ]:
rank += ( current_group_list_stats[ opponent ][ 'total_liberties' ] - group_list_stats[ opponent ][ 'total_liberties' ] ) * 5
if rank > best_rank:
best_rank = rank
best_move = location
except:
pass
finally:
self.history.pop()
self.whose_turn = whose_turn
self.consecutive_pass_count = consecutive_pass_count
self.captures[ whose_turn ] = captures[ whose_turn ]
self.captures[ opponent ] = captures[ opponent ]
if not best_move:
best_move = ( -1, -1 )
return best_move
def CalculateGroupListStats( self, group_list ):
stats = {
'jeopardy_count' : 0,
'largest_group' : 0,
'smallest_group' : 9999,
}
for group in group_list:
if group[ 'liberties' ] == 1:
stats[ 'jeopardy_count' ] += 1
if len( group[ 'location_list' ] ) > stats[ 'largest_group' ]:
stats[ 'largest_group' ] = len( group[ 'location_list' ] )
if len( group[ 'location_list' ] ) < stats[ 'smallest_group' ]:
stats[ 'smallest_group' ] = len( group[ 'location_list' ] )
return stats
def Print( self ):
print( str( self.CurrentBoard() ) )
def PrintGroupListData( self, group_list ):
group_list = sorted( group_list, key = lambda group : group[ 'liberties' ] )
for group in group_list:
print( '-------------------------' )
print( 'Group: ' + ','.join( [ '(%d,%d)' % ( location[0], location[1] ) for location in group[ 'location_list' ] ] ) )
print( 'Liberties: %d' % group[ 'liberties' ] )
def PrintGroupAnalysis( self ):
board = self.CurrentBoard()
white_group_list = board.AnalyzeGroups( GoBoard.WHITE )
black_group_list = board.AnalyzeGroups( GoBoard.BLACK )
print( 'ANALYSIS OF BOARD' )
print( '==================================' )
print( 'For white:' )
self.PrintGroupListData( white_group_list )
print( '==================================' )
print( 'For black:' )
self.PrintGroupListData( black_group_list )
print( '' )
def PrintScoreCalculation( self ):
scores = self.CalculateScores()
print( 'White score: %d' % scores[ GoBoard.WHITE ][ 'score' ] )
print( 'Black score: %d' % scores[ GoBoard.BLACK ][ 'score' ] )
def Serialize( self ):
data = {
'size' : self.size,
'history' : [ board.Serialize() for board in self.history ],
'whose_turn' : self.whose_turn,
'consecutive_pass_count' : self.consecutive_pass_count,
'captures' : {
'white' : self.captures[ GoBoard.WHITE ],
'black' : self.captures[ GoBoard.BLACK ],
}
}
return data
def Deserialize( self, data ):
self.size = data[ 'size' ]
self.history = [ GoBoard().Deserialize( board ) for board in data[ 'history' ] ]
self.whose_turn = data[ 'whose_turn' ]
self.consecutive_pass_count = data[ 'consecutive_pass_count' ]
self.captures = {
GoBoard.WHITE : data[ 'captures' ][ 'white' ],
GoBoard.BLACK : data[ 'captures' ][ 'black' ],
} | {
"content_hash": "fb81235d07b5a3e7a11bf4fe8ea7d19c",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 148,
"avg_line_length": 46.10377358490566,
"alnum_prop": 0.5373439738080622,
"repo_name": "spencerparkin/Go",
"id": "3b8eb3f804ac11f8f9a438d3c30a3daef477cae5",
"size": "9788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "go_game.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "439"
},
{
"name": "HTML",
"bytes": "21398"
},
{
"name": "JavaScript",
"bytes": "2572"
},
{
"name": "Python",
"bytes": "31828"
}
],
"symlink_target": ""
} |
import os
import program_rust
import task_primes
class Program(program_rust.ProgramRust):
def __init__(self, scale):
current_dir = os.path.dirname(os.path.realpath(__file__))
source = os.path.join(current_dir, 'primes.rs')
task = task_primes.TaskPrimes(scale=scale)
super(Program, self).__init__(source=source, task=task)
| {
"content_hash": "d41e5a3a4a139b92a6adde670a7602b6",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 61,
"avg_line_length": 31.09090909090909,
"alnum_prop": 0.7017543859649122,
"repo_name": "eterevsky/benchmarks",
"id": "b3c1a90688a3e800ae5cd3b855159de58403bf1c",
"size": "342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rust_primes/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1578"
},
{
"name": "Python",
"bytes": "7249"
},
{
"name": "Rust",
"bytes": "1575"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from web import models
admin.site.register(models.SocialLink)
admin.site.register(models.Stat)
admin.site.register(models.Project)
admin.site.register(models.Tag)
admin.site.register(models.SocialTag)
admin.site.register(models.FAQ)
admin.site.register(models.PublicTag)
admin.site.register(models.Public)
| {
"content_hash": "8cba2f0cf5cd378458e5401634e2a9a8",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 38,
"avg_line_length": 30.90909090909091,
"alnum_prop": 0.8294117647058824,
"repo_name": "db0company/db0.company",
"id": "9c610a0d52fb5a3d08c52457024e76e276447fef",
"size": "340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9028"
},
{
"name": "HTML",
"bytes": "10913"
},
{
"name": "JavaScript",
"bytes": "2714"
},
{
"name": "Python",
"bytes": "25861"
}
],
"symlink_target": ""
} |
"""
Script to update Route53 with AWS hostnames found in nventory. Initially
works only on AWS hostnames (CNAMEs).
"""
import argparse
from zambi import ZambiConn
from nvlib import Nventory
from c3.utils.graphite import Graphite
from c3.aws.route53.hostedzone import HostedZone
class Nv2Route53(object):
''' Update Route53 with AWS hostnames in nventory. '''
def __init__(self, options):
self.options = options
self.cmgr = ZambiConn()
self.accounts = self.cmgr.get_accounts(self.options.aws_account)
for self.account in self.accounts:
self.dns = None
self.update_records()
if self.options.graphite:
self.send_metrics()
def send_metrics(self):
''' Send route53 metrics to graphite '''
mpfx = 'business.aws.route53.%s' % self.account
(creates, updates, count) = self.dns.get_metrics()
grp = Graphite(server=self.options.graphite_server)
grp.send_metric(mpfx + ".updates",
updates, self.options.debug)
grp.send_metric(mpfx + ".creates",
creates, self.options.debug)
grp.send_metric(mpfx + ".count",
count, self.options.debug)
def update_records(self):
''' Run route53 updates based on nventory '''
nvd = Nventory(ini_file=self.options.nv_ini)
conn = self.cmgr.get_connection(self.account, service='route53')
self.dns = HostedZone(conn, self.account, self.options.comment,
domain=self.options.domain)
data = {
'loc': '',
'env': '',
'sclass': '',
'acct': self.account,
'domain': self.options.domain}
nodes = nvd.get_nodes(data)
nvec2nodes = dict()
for node in nodes:
if node['name'] and node['ec2_instance_id']:
nvec2nodes[node['name']] = {
'type': 'CNAME',
'ttl': self.options.ttl,
'resource': [node['ec2_public_hostname']]}
if nvec2nodes:
self.dns.add_update_records(
nvec2nodes, record_type='CNAME', ttl=self.options.ttl)
else:
print 'INFO: No nodes found in external data source.'
def parser_setup():
''' Setup the options parser '''
desc = 'Manages Route53 from external node classifier. '''
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-a',
action='store',
dest='aws_account',
help='Use AWS Account')
parser.add_argument('-d',
action='store',
dest='domain',
help='Use domain, top level tld')
parser.add_argument('-t',
action='store',
type=int,
dest='ttl',
default=60,
help='Set Route53 record ttl')
parser.add_argument('-c',
action='store',
dest='comment',
default='Managed by c3-nv2route53.py',
help='Set Route53 record comment')
parser.add_argument('-g',
action='store_true',
dest='graphite',
default=False,
help='Send output to graphite')
parser.add_argument('-G',
action='store',
dest='graphite_server',
default='dev.relay-aws.graphite.ctgrd.com',
help='the graphite server to send to')
parser.add_argument('-D',
action='store_true',
dest='debug',
default=False,
help="Print, but don't actually send anything")
parser.add_argument(
'-n',
action='store',
dest='nv_ini',
default="/app/secrets/nv_prd.ini",
help='External DB ini file to use; useful for testing')
return parser
def main():
''' Setup options and call main program '''
parser = parser_setup()
options = parser.parse_args()
Nv2Route53(options)
if __name__ == '__main__':
main()
| {
"content_hash": "4214eefc7fd95aad2d16e1bcddb42deb",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 72,
"avg_line_length": 36.63865546218487,
"alnum_prop": 0.5128440366972477,
"repo_name": "CityGrid/c3",
"id": "a7b0756c7a4ffd6bac9b5b64b8942c0cf767c0ae",
"size": "4975",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/c3nv2route53.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl6",
"bytes": "5"
},
{
"name": "Python",
"bytes": "168033"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='aggregator',
author='Alvin Wan',
version='0.0.1',
description='aggregates BIDS analysis results into Graph database',
packages=find_packages()
)
| {
"content_hash": "b94a935488b8331944326ad606c7a2ea",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 71,
"avg_line_length": 22.4,
"alnum_prop": 0.7008928571428571,
"repo_name": "BIDS-projects/aggregator",
"id": "92f5a4804bb588aa236715fbc05c3933bdf23663",
"size": "224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11707"
}
],
"symlink_target": ""
} |
from effectThread import *
from ledFunctions import *
class effect_cop(StoppableThread):
import time
iInterval = 1
BUFFER_EFFECT_COPS = 1
def effect_cops(self):
if self.BUFFER_EFFECT_COPS == 1:
setRGB(255,0,0)
self.BUFFER_EFFECT_COPS = 0
else:
setRGB(0,0,255)
self.BUFFER_EFFECT_COPS = 1
def __init__(self, iInterval):
self.iInterval = iInterval
StoppableThread.__init__(self)
def run(self):
while not self.stopped():
self.effect_cops()
time.sleep(self.iInterval)
| {
"content_hash": "a7521bad4f490ea036240cf4b402e06e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 43,
"avg_line_length": 24.076923076923077,
"alnum_prop": 0.5559105431309904,
"repo_name": "simondankelmann/LED-Server",
"id": "dbd33db0b095637efcaf5c71b688591bebbd2549",
"size": "626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "effect_cops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7277"
},
{
"name": "Shell",
"bytes": "205"
}
],
"symlink_target": ""
} |
from pact_test import given
from pact_test import with_request
from pact_test import has_pact_with
from pact_test import upon_receiving
from pact_test import service_consumer
from pact_test import will_respond_with
from pact_test import ServiceProviderTest
from pact_test.either import *
def test_default_service_consumer_value():
t = ServiceProviderTest()
assert t.service_consumer is None
def test_decorator_service_consumer_value():
@service_consumer('Library App')
class MyTest(ServiceProviderTest):
pass
t = MyTest()
assert t.service_consumer == 'Library App'
def test_default_has_pact_with_value():
t = ServiceProviderTest()
assert t.has_pact_with is None
def test_decorator_has_pact_with_value():
@has_pact_with('Books Service')
class MyTest(ServiceProviderTest):
pass
t = MyTest()
assert t.has_pact_with == 'Books Service'
def test_class_decorators():
@service_consumer('Library App')
@has_pact_with('Books Service')
class MyTest(ServiceProviderTest):
pass
t = MyTest()
assert t.service_consumer == 'Library App'
assert t.has_pact_with == 'Books Service'
def test_method_decorators():
class MyTest(ServiceProviderTest):
@given('the breakfast menu is available')
@upon_receiving('a request for a breakfast')
@with_request('I don\'t like spam')
@will_respond_with('Spam & Eggs')
def make_me_breakfast(self):
return 'Spam & Eggs'
t = MyTest()
decorated_method = next(t.decorated_methods)
assert decorated_method is not None
assert decorated_method.given == 'the breakfast menu is available'
assert decorated_method.upon_receiving == 'a request for a breakfast'
assert decorated_method.with_request == 'I don\'t like spam'
assert decorated_method.will_respond_with == 'Spam & Eggs'
assert decorated_method() == 'Spam & Eggs'
def test_missing_method_decorators():
class MyTest(ServiceProviderTest):
@given('the breakfast menu is available')
def make_me_breakfast(self):
return 'Spam & Eggs'
t = MyTest()
try:
next(t.decorated_methods)
assert False
except StopIteration:
assert True
def test_missing_service_consumer():
@has_pact_with('Restaurant Customer')
class MyTest(ServiceProviderTest):
pass
msg = 'Missing setup for "service_consumer"'
assert MyTest().is_valid().value.startswith(msg)
def test_missing_has_pact_with():
@service_consumer('Restaurant Customer')
class MyTest(ServiceProviderTest):
pass
msg = 'Missing setup for "has_pact_with"'
assert MyTest().is_valid().value.startswith(msg)
def test_valid_test():
@service_consumer('Spam')
@has_pact_with('Eggs')
class MyTest(ServiceProviderTest):
pass
assert type(MyTest().is_valid()) is Right
| {
"content_hash": "0a64064b70b008b97b507939c7d4c499",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 73,
"avg_line_length": 26.916666666666668,
"alnum_prop": 0.6749226006191951,
"repo_name": "Kalimaha/pact-test",
"id": "67b10ce1b82bb1b0e5fe793d03d27be99b37a903",
"size": "2907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/models/service_provider_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "127995"
}
],
"symlink_target": ""
} |
from synergine_xyz.Context import Context as XyzContext
from intelligine.cst import IMPENETRABLE
from synergine_xyz.cst import POSITIONS
from intelligine.synergy.stigmergy.MoleculesManager import MoleculesManager
class Context(XyzContext):
def __init__(self):
super().__init__()
self._molecules = MoleculesManager(self)
def molecules(self):
return self._molecules
def position_is_penetrable(self, position):
"""
Return True if position is empty or occuped by non physical impenetrable object.
:param position:
:return:
"""
objects_ids_on_this_point = self.metas.list.get(POSITIONS, position, allow_empty=True)
for object_id_on_this_point in objects_ids_on_this_point:
if self.metas.states.have(object_id_on_this_point, IMPENETRABLE):
return False
return True
def position_can_be_odorus(self, position):
"""
Return True if position can smell
:param position:
:return:
"""
return self.position_is_penetrable(position)
| {
"content_hash": "ae492af0c5c3131ab5bd9d19b7b13a8f",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 94,
"avg_line_length": 28.94736842105263,
"alnum_prop": 0.6572727272727272,
"repo_name": "buxx/intelligine",
"id": "0addf13172c87d1504e53ef73287a187b65e3e16",
"size": "1100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "intelligine/core/Context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "133554"
}
],
"symlink_target": ""
} |
"""
test_naming.py
Created by Niall Richard Murphy on 2011-05-30.
"""
import config_parse
import constants
import os
import sys
import test_infrastructure as ti
# Perhaps unittest2 is available. Try to import it, for
# those cases where we are running python 2.7.
try:
import unittest2 as unittest
except ImportError:
import unittest
def suite():
tests = ['testHostnameDefined',
'testDomainNameDefined',
'testResolversConfigured']
return unittest.TestSuite(map(TestBulkNaming, tests))
class TestBulkNaming(ti.defaultTestConfiguration):
def testHostnameDefined(self):
results = ti.IndividualAttributesDefined(self.cp.routers, 'hostname')
self.assertEqual(results, [],
"These routers should have hostnames defined: %s" % (results))
def testDomainNameDefined(self):
results = ti.IndividualAttributesDefined(self.cp.routers, 'domain_name')
self.assertEqual(results, [],
"These routers should have domain names defined: %s" % (results))
def testResolversConfigured(self):
results = ti.IndividualAttributesDefined(self.cp.routers, 'resolvers')
self.assertEqual(results, [],
"These routers should have resolvers defined: %s" % (results))
if __name__ == '__main__':
suite = suite()
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)).run(suite)
| {
"content_hash": "2c3e6a9279e37d5de0a608160854b8bf",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 76,
"avg_line_length": 29.217391304347824,
"alnum_prop": 0.7254464285714286,
"repo_name": "niallrmurphy/pyvern",
"id": "4e4381ac3cb2153ec042d7f03aba7d14566e3f38",
"size": "1384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/test_naming.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "912"
},
{
"name": "Python",
"bytes": "123073"
}
],
"symlink_target": ""
} |
import os
main_dir = os.path.dirname(os.path.dirname(__file__))
| {
"content_hash": "93ca58297f186901cb4c747526e4defa",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 53,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.6923076923076923,
"repo_name": "mcyprian/pyp2rpm",
"id": "958c468c52b57ac35cc2f32c7839d6252f8fb4b7",
"size": "65",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pyp2rpm/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "162357"
},
{
"name": "Roff",
"bytes": "3420"
}
],
"symlink_target": ""
} |
import typing
class BaseLockException(Exception):
# Error codes:
LOCK_FAILED = 1
def __init__(
self,
*args: typing.Any,
fh: typing.Optional[typing.IO] = None,
**kwargs: typing.Any,
) -> None:
self.fh = fh
Exception.__init__(self, *args)
class LockException(BaseLockException):
pass
class AlreadyLocked(LockException):
pass
class FileToLarge(LockException):
pass
| {
"content_hash": "c7b0f52a56e7710f36fc769366ef6104",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 46,
"avg_line_length": 16.555555555555557,
"alnum_prop": 0.6062639821029083,
"repo_name": "WoLpH/portalocker",
"id": "b360c77e84d99d14b03042a90042d261a97267e3",
"size": "447",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "portalocker/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "49105"
}
],
"symlink_target": ""
} |
from .plugin import SimStatePlugin
from ..s_action_object import ast_stripping_decorator, SimActionObject
import sys
import functools
import logging
l = logging.getLogger('simuvex.plugins.solver')
#pylint:disable=unidiomatic-typecheck
#
# Timing stuff
#
_timing_enabled = False
import time
lt = logging.getLogger('simuvex.plugins.solver.timing')
def timed_function(f):
if _timing_enabled:
@functools.wraps(f)
def timing_guy(*args, **kwargs):
the_solver = kwargs.pop('the_solver', None)
the_solver = args[0] if the_solver is None else the_solver
s = the_solver.state
start = time.time()
r = f(*args, **kwargs)
end = time.time()
duration = end-start
try:
if s.scratch.sim_procedure is None and s.scratch.bbl_addr is not None:
location = "bbl %#x, stmt %s (inst %s)" % (
s.scratch.bbl_addr,
s.scratch.stmt_idx,
('%s' % s.scratch.ins_addr if s.scratch.ins_addr is None else '%#x' % s.scratch.ins_addr)
)
elif s.scratch.sim_procedure is not None:
location = "sim_procedure %s" % s.scratch.sim_procedure
else:
location = "unknown"
except Exception: #pylint:disable=broad-except
l.error("Got exception while generating timer message:", exc_info=True)
location = "unknown"
lt.log(int((end-start)*10), '%s took %s seconds at %s', f.__name__, round(duration, 2), location)
if break_time >= 0 and duration > break_time:
import ipdb; ipdb.set_trace()
return r
return timing_guy
else:
return f
#pylint:disable=global-variable-undefined
def enable_timing():
global _timing_enabled
_timing_enabled = True
lt.setLevel(1)
def disable_timing():
global _timing_enabled
_timing_enabled = False
import os
if os.environ.get('SOLVER_TIMING', False):
enable_timing()
else:
disable_timing()
break_time = float(os.environ.get('SOLVER_BREAK_TIME', -1))
#
# Various over-engineered crap
#
def error_converter(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except claripy.UnsatError:
e_type, value, traceback = sys.exc_info()
raise SimUnsatError, ("Got an unsat result", e_type, value), traceback
except claripy.ClaripyFrontendError:
e_type, value, traceback = sys.exc_info()
raise SimSolverModeError, ("Translated claripy error:", e_type, value), traceback
return wrapped_f
#
# Premature optimizations
#
def _concrete_bool(e):
if isinstance(e, bool):
return e
elif isinstance(e, claripy.ast.Base) and e.op == 'BoolV':
return e.args[0]
elif isinstance(e, SimActionObject) and e.op == 'BoolV':
return e.args[0]
else:
return None
def _concrete_value(e):
# shortcuts for speed improvement
if isinstance(e, (int, long, float, bool)):
return e
elif isinstance(e, claripy.ast.Base) and e.op in claripy.operations.leaf_operations_concrete:
return e.args[0]
elif isinstance(e, SimActionObject) and e.op in claripy.operations.leaf_operations_concrete:
return e.args[0]
else:
return None
def concrete_path_bool(f):
@functools.wraps(f)
def concrete_shortcut_bool(self, *args, **kwargs):
v = _concrete_bool(args[0])
if v is None:
return f(self, *args, **kwargs)
else:
return v
return concrete_shortcut_bool
def concrete_path_not_bool(f):
@functools.wraps(f)
def concrete_shortcut_not_bool(self, *args, **kwargs):
v = _concrete_bool(args[0])
if v is None:
return f(self, *args, **kwargs)
else:
return not v
return concrete_shortcut_not_bool
def concrete_path_scalar(f):
@functools.wraps(f)
def concrete_shortcut_scalar(self, *args, **kwargs):
v = _concrete_value(args[0])
if v is None:
return f(self, *args, **kwargs)
else:
return v
return concrete_shortcut_scalar
def concrete_path_tuple(f):
@functools.wraps(f)
def concrete_shortcut_tuple(self, *args, **kwargs):
v = _concrete_value(args[0])
if v is None:
return f(self, *args, **kwargs)
else:
return ( v, )
return concrete_shortcut_tuple
def concrete_path_list(f):
@functools.wraps(f)
def concrete_shortcut_list(self, *args, **kwargs):
v = _concrete_value(args[0])
if v is None:
return f(self, *args, **kwargs)
else:
return [ v ]
return concrete_shortcut_list
#
# The main event
#
import claripy
class SimSolver(SimStatePlugin):
"""
Symbolic solver.
"""
def __init__(self, solver=None): #pylint:disable=redefined-outer-name
l.debug("Creating SimSolverClaripy.")
SimStatePlugin.__init__(self)
self._stored_solver = solver
def reload_solver(self):
"""
Reloads the solver. Useful when changing solver options.
"""
constraints = self._solver.constraints
self._stored_solver = None
self._solver.add(constraints)
@property
def _solver(self):
if self._stored_solver is not None:
return self._stored_solver
if o.ABSTRACT_SOLVER in self.state.options:
self._stored_solver = claripy.SolverVSA()
elif o.REPLACEMENT_SOLVER in self.state.options:
self._stored_solver = claripy.SolverReplacement(auto_replace=False)
elif o.CACHELESS_SOLVER in self.state.options:
self._stored_solver = claripy.SolverCacheless()
elif o.COMPOSITE_SOLVER in self.state.options:
self._stored_solver = claripy.SolverComposite()
elif o.SYMBOLIC in self.state.options and o.approximation & self.state.options:
self._stored_solver = claripy.SolverHybrid()
elif o.SYMBOLIC in self.state.options:
self._stored_solver = claripy.Solver()
else:
self._stored_solver = claripy.SolverConcrete()
return self._stored_solver
#
# Get unconstrained stuff
#
def Unconstrained(self, name, bits, **kwargs):
if o.SYMBOLIC_INITIAL_VALUES in self.state.options:
# Return a symbolic value
if o.ABSTRACT_MEMORY in self.state.options:
l.debug("Creating new top StridedInterval")
r = claripy.TSI(bits=bits, name=name, uninitialized=True, **kwargs)
else:
l.debug("Creating new unconstrained BV named %s", name)
if o.UNDER_CONSTRAINED_SYMEXEC in self.state.options:
r = self.BVS(name, bits, uninitialized=True, **kwargs)
else:
r = self.BVS(name, bits, **kwargs)
return r
else:
# Return a default value, aka. 0
return claripy.BVV(0, bits)
def BVS(self, name, size, min=None, max=None, stride=None, uninitialized=False, explicit_name=None, **kwargs): #pylint:disable=redefined-builtin
"""
Creates a bit-vector symbol (i.e., a variable). Other **kwargs are passed directly on to the constructor of
claripy.ast.BV.
:param name: The name of the symbol.
:param size: The size (in bits) of the bit-vector.
:param min: The minimum value of the symbol.
:param max: The maximum value of the symbol.
:param stride: The stride of the symbol.
:param uninitialized: Whether this value should be counted as an "uninitialized" value in the course of an
analysis.
:param explicit_name: If False, an identifier is appended to the name to ensure uniqueness.
:return: A BV object representing this symbol.
"""
r = claripy.BVS(name, size, min=min, max=max, stride=stride, uninitialized=uninitialized, explicit_name=explicit_name, **kwargs)
self.state._inspect('symbolic_variable', BP_AFTER, symbolic_name=next(iter(r.variables)), symbolic_size=size, symbolic_expr=r)
self.state.log.add_event('unconstrained', name=iter(r.variables).next(), bits=size, **kwargs)
return r
#
# Operation passthroughs to claripy
#
def __getattr__(self, a):
f = getattr(claripy._all_operations, a)
if hasattr(f, '__call__'):
ff = error_converter(ast_stripping_decorator(f))
if _timing_enabled:
ff = functools.partial(timed_function(ff), the_solver=self)
ff.__doc__ = f.__doc__
return ff
else:
return f
def __dir__(self):
return sorted(set(dir(super(SimSolver, self)) + dir(claripy._all_operations) + dir(self.__class__)))
#
# Branching stuff
#
def copy(self):
return SimSolver(solver=self._solver.branch())
@error_converter
def merge(self, others, merge_conditions): # pylint: disable=W0613
#import ipdb; ipdb.set_trace()
merging_occurred, self._stored_solver = self._solver.merge(
[ oc._solver for oc in others ],
merge_conditions,
)
#import ipdb; ipdb.set_trace()
return merging_occurred
@error_converter
def widen(self, others):
c = self.state.se.BVS('random_widen_condition', 32)
merge_conditions = [ [ c == i ] for i in range(len(others)+1) ]
merging_occurred = self.merge(others, merge_conditions)
return merging_occurred
#
# Frontend passthroughs
#
def downsize(self):
return self._solver.downsize()
@property
def constraints(self):
return self._solver.constraints
def _adjust_constraint(self, c):
if self.state._global_condition is None:
return c
elif c is None: # this should never happen
l.critical("PLEASE REPORT THIS MESSAGE, AND WHAT YOU WERE DOING, TO YAN")
return self.state._global_condition
else:
return self.Or(self.Not(self.state._global_condition), c)
def _adjust_constraint_list(self, constraints):
if self.state._global_condition is None:
return constraints
if len(constraints) == 0:
return constraints.__class__((self.state._global_condition,))
else:
return constraints.__class__((self._adjust_constraint(self.And(*constraints)),))
@timed_function
@ast_stripping_decorator
@error_converter
def eval_to_ast(self, e, n, extra_constraints=(), exact=None):
"""
Evaluate an expression, using the solver if necessary. Returns AST objects.
:param e: the expression
:param n: the number of desired solutions
:param extra_constraints: extra constraints to apply to the solver
:param exact: if False, returns approximate solutions
:return: a tuple of the solutions, in the form of claripy AST nodes
:rtype: tuple
"""
return self._solver.eval_to_ast(e, n, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=exact)
@concrete_path_tuple
@timed_function
@ast_stripping_decorator
@error_converter
def eval(self, e, n, extra_constraints=(), exact=None):
"""
Evaluate an expression, using the solver if necessary. Returns primitives.
:param e: the expression
:param n: the number of desired solutions
:param extra_constraints: extra constraints to apply to the solver
:param exact: if False, returns approximate solutions
:return: a tuple of the solutions, in the form of Python primitives
:rtype: tuple
"""
return self._solver.eval(e, n, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=exact)
@concrete_path_scalar
@timed_function
@ast_stripping_decorator
@error_converter
def max(self, e, extra_constraints=(), exact=None):
if exact is False and o.VALIDATE_APPROXIMATIONS in self.state.options:
ar = self._solver.max(e, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=False)
er = self._solver.max(e, extra_constraints=self._adjust_constraint_list(extra_constraints))
assert er <= ar
return ar
return self._solver.max(e, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=exact)
@concrete_path_scalar
@timed_function
@ast_stripping_decorator
@error_converter
def min(self, e, extra_constraints=(), exact=None):
if exact is False and o.VALIDATE_APPROXIMATIONS in self.state.options:
ar = self._solver.min(e, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=False)
er = self._solver.min(e, extra_constraints=self._adjust_constraint_list(extra_constraints))
assert ar <= er
return ar
return self._solver.min(e, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=exact)
@timed_function
@ast_stripping_decorator
@error_converter
def solution(self, e, v, extra_constraints=(), exact=None):
if exact is False and o.VALIDATE_APPROXIMATIONS in self.state.options:
ar = self._solver.solution(e, v, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=False)
er = self._solver.solution(e, v, extra_constraints=self._adjust_constraint_list(extra_constraints))
if er is True:
assert ar is True
return ar
return self._solver.solution(e, v, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=exact)
@concrete_path_bool
@timed_function
@ast_stripping_decorator
@error_converter
def is_true(self, e, extra_constraints=(), exact=None):
if exact is False and o.VALIDATE_APPROXIMATIONS in self.state.options:
ar = self._solver.is_true(e, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=False)
er = self._solver.is_true(e, extra_constraints=self._adjust_constraint_list(extra_constraints))
if er is False:
assert ar is False
return ar
return self._solver.is_true(e, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=exact)
@concrete_path_not_bool
@timed_function
@ast_stripping_decorator
@error_converter
def is_false(self, e, extra_constraints=(), exact=None):
if exact is False and o.VALIDATE_APPROXIMATIONS in self.state.options:
ar = self._solver.is_false(e, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=False)
er = self._solver.is_false(e, extra_constraints=self._adjust_constraint_list(extra_constraints))
if er is False:
assert ar is False
return ar
return self._solver.is_false(e, extra_constraints=self._adjust_constraint_list(extra_constraints), exact=exact)
@timed_function
@ast_stripping_decorator
@error_converter
def solve(self, extra_constraints=(), exact=None):
return self._solver.solve(extra_constraints=self._adjust_constraint_list(extra_constraints), exact=exact)
@timed_function
@ast_stripping_decorator
@error_converter
def satisfiable(self, extra_constraints=(), exact=None):
if exact is False and o.VALIDATE_APPROXIMATIONS in self.state.options:
er = self._solver.satisfiable(extra_constraints=self._adjust_constraint_list(extra_constraints))
ar = self._solver.satisfiable(extra_constraints=self._adjust_constraint_list(extra_constraints), exact=False)
if er is True:
assert ar is True
return ar
return self._solver.satisfiable(extra_constraints=self._adjust_constraint_list(extra_constraints), exact=exact)
@timed_function
@ast_stripping_decorator
@error_converter
def add(self, *constraints):
cc = self._adjust_constraint_list(constraints)
return self._solver.add(cc)
#
# And some convenience stuff
#
@concrete_path_scalar
def any_int(self, e, **kwargs):
ans = self.eval(e, 1, **kwargs)
if len(ans) > 0: return ans[0]
else: raise SimUnsatError("Not satisfiable: %s" % e.shallow_repr())
def any_str(self, e, **kwargs):
ans = self.any_n_str(e, 1, **kwargs)
if len(ans) > 0: return ans[0]
else: raise SimUnsatError("Not satisfiable: %s" % e.shallow_repr())
def any_n_str_iter(self, e, n, **kwargs):
if len(e) == 0:
yield ""
return
for s in self.eval(e, n, **kwargs):
yield ("%x" % s).zfill(len(e)/4).decode('hex')
def any_n_str(self, e, n, **kwargs):
return list(self.any_n_str_iter(e, n, **kwargs))
min_int = min
max_int = max
def any_n_int(self, e, n, **kwargs):
try:
return list(self.eval(e, n, **kwargs))
except SimUnsatError:
return [ ]
def exactly_n(self, e, n, **kwargs):
r = self.any_n_int(e, n, **kwargs)
if len(r) != n:
raise SimValueError("concretized %d values (%d required) in exactly_n" % (len(r), n))
return r
def exactly_n_int(self, e, n, **kwargs):
r = self.any_n_int(e, n, **kwargs)
if len(r) != n:
raise SimValueError("concretized %d values (%d required) in exactly_n" % (len(r), n))
return r
def exactly_int(self, e, default=None, **kwargs):
try:
r = self.any_n_int(e, 1, **kwargs)
except (SimValueError, SimSolverModeError):
if default is not None:
return default
raise
if len(r) != 1:
if default is None:
raise SimValueError("concretized %d values (%d required) in exactly_int", len(r), 1)
else:
return default
return r[0]
@timed_function
@ast_stripping_decorator
def unique(self, e, **kwargs):
if not isinstance(e, claripy.ast.Base):
return True
# if we don't want to do symbolic checks, assume symbolic variables are multivalued
if o.SYMBOLIC not in self.state.options and self.symbolic(e):
return False
r = self.any_n_int(e, 2, **kwargs)
if len(r) == 1:
self.add(e == r[0])
return True
elif len(r) == 0:
raise SimValueError("unsatness during uniqueness check(ness)")
else:
return False
def symbolic(self, e): # pylint:disable=R0201
if type(e) in (int, str, float, bool, long):
return False
return e.symbolic
def single_valued(self, e):
if self.state.mode == 'static':
if type(e) in (int, str, float, bool, long):
return True
else:
return e.cardinality <= 1
else:
# All symbolic expressions are not single-valued
return not self.symbolic(e)
def simplify(self, *args):
if len(args) == 0:
return self._solver.simplify()
elif isinstance(args[0], (int, long, float, bool)):
return args[0]
elif isinstance(args[0], claripy.ast.Base) and args[0].op in claripy.operations.leaf_operations_concrete:
return args[0]
elif isinstance(args[0], SimActionObject) and args[0].op in claripy.operations.leaf_operations_concrete:
return args[0].ast
elif not isinstance(args[0], (SimActionObject, claripy.ast.Base)):
return args[0]
else:
return self._claripy_simplify(*args)
@timed_function
@ast_stripping_decorator
@error_converter
def _claripy_simplify(self, *args): #pylint:disable=no-self-use
return claripy.simplify(args[0])
def variables(self, e): #pylint:disable=no-self-use
return e.variables
SimStatePlugin.register_default('solver_engine', SimSolver)
from .. import s_options as o
from .inspect import BP_AFTER
from ..s_errors import SimValueError, SimUnsatError, SimSolverModeError
| {
"content_hash": "73e7e70018701a0073db53a4daea8db2",
"timestamp": "",
"source": "github",
"line_count": 569,
"max_line_length": 148,
"avg_line_length": 36.17223198594024,
"alnum_prop": 0.6092702361286562,
"repo_name": "chubbymaggie/simuvex",
"id": "6a6f508c1aafb14e84dbe9e5547d4e1cec44e7ec",
"size": "20605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simuvex/plugins/solver.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6276"
},
{
"name": "C++",
"bytes": "34210"
},
{
"name": "Makefile",
"bytes": "599"
},
{
"name": "Python",
"bytes": "854125"
}
],
"symlink_target": ""
} |
"""
WSGI config for ProgrammerCompetencyMatrix project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ProgrammerCompetencyMatrix.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "8b24f1cdfa44d7fd5c2e2ef3872caff1",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 86,
"avg_line_length": 30.5,
"alnum_prop": 0.7939110070257611,
"repo_name": "FiaDot/programmer-competency-matrix",
"id": "2f3710fb5869226da7d0f4e542ea602c4af1554c",
"size": "427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ProgrammerCompetencyMatrix/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "58260"
},
{
"name": "JavaScript",
"bytes": "29342"
},
{
"name": "Python",
"bytes": "68321"
}
],
"symlink_target": ""
} |
import logging
from flask import Flask
from flask import request
app = Flask(__name__)
app.config['DEBUG'] = True
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from google.appengine.api import taskqueue
from google.appengine.ext import db
import json
from news import News
from news import NewsModel
import fetchWeb
import landmark
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
@app.route('/testNews')
def testNews():
# Try to add test url
fetchWeb.test_parse_url()
news = News()
testurl = 'http://www.appledaily.com.tw/realtimenews/article/new/20150822/675760/'
result = news.loadfromdb(testurl)
return "Result: " + str(news)
@app.route('/parse_hardcode_url')
def parseHardcodeUrl():
url = "http://www.appledaily.com.tw/realtimenews/section/hot/"
result = fetchWeb.parse_single_url(url)
if result:
return "Parse single url succeed: " + url
else:
return "Parse single url failed... " + url
@app.route('/parse_single_url')
def parseSingleUrl():
url = request.args.get("url")
if url == None or url == "":
return "There is no URL to parse"
result = fetchWeb.parse_single_url(url)
if result:
return "Parse single url succeed: " + url
else:
return "Parse single url failed... " + url
@app.route('/trigger_background_parsing')
def parserWorker():
taskqueue.add(queue_name='default', url='/perform_parsing', params={})
return "Trigger the background parsing... done."
@app.route('/perform_parsing', methods=['POST'])
def perform_parsing():
logging.info("perform_parsing start")
fetchWeb.parse_all_url()
logging.info("perform_parsing end")
return "Performing background parsing end"
@app.route('/query_landmark')
def query_landmark():
lat = request.args.get('lat')
lng = request.args.get('lng')
# if lat == None or lng == None:
# return '{}'
#
# latlng = str(lat) + "," + str(lng)
#
# if latlng not in landmark.LANDMARK_KEYWORDS:
# return '{}'
lm = landmark.getlandmark(lat, lng)
if lm == None:
return '{}'
first_keyword = landmark.LANDMARK_KEYWORDS[lm.location][0]
related_news_ids = lm.related_news
logging.info("Query position has " + str(len(related_news_ids)) + " news")
return_dict = {'landmark_name': first_keyword}
return_list = []
for news_key in related_news_ids:
key = db.Key(news_key)
entry = NewsModel.all().filter('__key__ =', key).get()
if entry != None:
dic = {}
dic['news_id'] = news_key
dic['title'] = entry.title
return_list.append(dic)
return_dict['news_list'] = return_list
return json.dumps(return_dict)
@app.route('/trigger_background_landmark')
def landmarkWorker():
taskqueue.add(queue_name='default', url='/perform_create_landmark', params={})
return "Trigger the creation of landmark in background... done."
def is_keyword_in_news(keywords, title, content):
title_content = title + content
for keyword in keywords:
if keyword in title_content:
return True
return False
@app.route('/perform_create_landmark', methods=['POST'])
def perform_create_landmark():
logging.info("Start to create landmark database")
all_news = NewsModel.all()
for latlng in landmark.LANDMARK_KEYWORDS:
counter = 0
keywords = landmark.LANDMARK_KEYWORDS[latlng]
related_news_keys = []
for news in all_news:
result = is_keyword_in_news(keywords, news.title, news.content)
if result == True:
related_news_keys.append(str(news.key()))
counter += 1
ret = str(related_news_keys)
lm = landmark.Landmark(latlng, related_news_keys)
lm.writetodb()
logging.info("Create database for " + str(latlng) + " completed, it has " + str(counter) + " news")
logging.info("All database for landmark are completed")
return "Write to db complete"
@app.route('/query_article')
def query_article():
key = db.Key(request.args.get('news_key'))
entry = NewsModel.all().filter('__key__ =', key).get()
if entry != None:
return json.dumps({"title" : entry.title,
"datetime" : str(entry.news_datetime),
"article" : entry.content,
"popularity" : entry.popularity,
"image_url" : entry.news_first_image_url,
"url" : entry.news_url})
else:
return '{}'
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, nothing at this URL.', 404
| {
"content_hash": "dea531c011f3a5f1987970a830615b94",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 107,
"avg_line_length": 30.38607594936709,
"alnum_prop": 0.6192459904186628,
"repo_name": "chatea/NewsMap",
"id": "de322351e0b312e5f4723db78e107af4022446f7",
"size": "4815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9479"
},
{
"name": "HTML",
"bytes": "8321"
},
{
"name": "JavaScript",
"bytes": "6345"
},
{
"name": "Python",
"bytes": "1258479"
}
],
"symlink_target": ""
} |
""" Implements instance extraction and decoding for mention-ranking models.
This module implements two variants of the mention ranking model within a
framework that expresses coreference resolution as predicting latent structures,
while performing learning using a latent structured perceptron with
cost-augmented inference.
Hence, both variants are expressed as predicting a latent graph. In particular,
let m_1, ..., m_n be all mentions in a document. Let m_0 be a dummy mention for
anaphoricity determination. For an anaphor m_j, we predict the graph with nodes
m_0, ..., m_j and with the arc (m_j, m_i) if m_i is selected as the antecedent
of m_j. Such a graph is called a *substructure* (compared to the graph which
summarizes the decisions for the whole document).
The two variants implemented here are
- latent antecedents for training (Chang et al., 2012): when learning
weights, compare the prediction (m_j,m_i) with the best-scoring
prediction (m_j,m_k) s.t. m_j and m_k are coreferent
- closest antecedents for training (Denis and Baldridge, 2008): when
learning weights, compare the prediction (m_j,m_i) with (m_j,m_k), where
m_k is the closest antecedent of m_j.
To implement these variants, this module contains a function that defines the
search space for the graphs, and two decoders: one decoder computes the
best-scoring antecedent prediction and the best-scoring coreferent antecedent,
while the other computes the best-scoring antecedent prediction and the closest
antecedent.
References:
- Pascal Denis and Jason Baldridge. 2008. Specialized models and ranking
for coreference resolution. In *Proceedings of the 2008 Conference on
Empirical Methods in Natural Language Processing*, Waikiki, Honolulu,
Hawaii, 25-27 October 2008, pages 660-669.
http://www.aclweb.org/anthology/D08-1069
- Kai-Wei Chang, Rajhans Samdani, Alla Rozovskaya, Mark Sammons, and
Dan Roth. 2012. Illinois-Coref: The UI system in the CoNLL-2012 shared
task. In *Proceedings of the Shared Task of the 16th Conference on
Computational Natural Language Learning*, Jeju Island, Korea, 12-14 July
2012, pages 113-117.
http://www.aclweb.org/anthology/W12-4513
"""
from cort.coreference import perceptrons
__author__ = 'martscsn'
def extract_substructures(doc):
""" Extract the search space for the mention ranking model,
The mention ranking model consists in computing the optimal antecedent
for an anaphor, which corresponds to predicting an edge in graph. This
functions extracts the search space for each such substructure (one
substructure corresponds to one antecedent decision for an anaphor).
The search space is represented as a nested list of mention pairs. The
mention pairs are candidate arcs in the graph. The ith list contains all
potential (mention, antecedent) pairs for the ith mention in the
document. The antecedents are ordered by distance. For example,
the third list contains the pairs (m_3, m_2), (m_3, m_1), (m_3, m_0),
where m_j is the jth mention in the document.
Args:
doc (CoNLLDocument): The document to extract substructures from.
Returns:
(list(list((Mention, Mention)))): The nested list of mention pairs
describing the search space for the substructures.
"""
substructures = []
# iterate over mentions
for i, ana in enumerate(doc.system_mentions):
for_anaphor_arcs = []
# iterate in reversed order over candidate antecedents
for ante in sorted(doc.system_mentions[:i], reverse=True):
for_anaphor_arcs.append((ana, ante))
substructures.append(for_anaphor_arcs)
return substructures
class RankingPerceptron(perceptrons.Perceptron):
""" A perceptron for mention ranking with latent antecedents. """
def argmax(self, substructure, arc_information):
""" Decoder for mention ranking with latent antecedents.
Compute highest-scoring antecedent and highest-scoring antecedent
consistent with gold coreference information for one anaphor.
Args:
substructure (list((Mention, Mention))): The list of mention pairs
which define the search space for one substructure. For mention
ranking, this list consists of all potential anaphor-antecedent
pairs for one fixed anaphor in descending order, such as
(m_3, m_2), (m_2, m_1), (m_2, m_0)
arc_information (dict((Mention, Mention),
((array, array, array), list(int), bool)):
A mapping of arcs (= mention pairs) to information about these
arcs. The information consists of the features, the costs for
the arc (for each label), and whether predicting the arc to be
coreferent is consistent with the gold annotation). The features
are divided in three arrays: the first array contains the non-
numeric features, the second array the numeric features, and the
third array the values for the numeric features. The features
are represented as integers via feature hashing.
Returns:
A 7-tuple describing the highest-scoring anaphor-antecedent
decision, and the highest-scoring anaphor-antecedent decision
consistent with the gold annotation. The tuple consists of:
- **best_arcs** (*list((Mention, Mention))*): the
highest-scoring antecedent decision (the list contains only
one arc),
- **best_labels** (*list(str)*): empty, the ranking approach
does not employ any labels,
- **best_scores** (*list(float)*): the score of the
highest-scoring antecedent decision,
- **best_cons_arcs** (*list((Mention, Mention))*): the
highest-scoring antecedent decision consistent with the gold
annotation (the list contains only one arc),
- **best_cons_labels** (*list(str)*): empty, the ranking
approach does not employ any labels
- **best_cons_scores** (*list(float)*): the score of the
highest-scoring antecedent decision consistent with the
gold information
- **is_consistent** (*bool*): whether the highest-scoring
antecedent decision is consistent with the gold information.
"""
best, max_val, best_cons, max_cons, best_is_consistent = \
self.find_best_arcs(substructure, arc_information)
return (
[best],
[],
[max_val],
[best_cons],
[],
[max_cons],
best_is_consistent
)
class RankingPerceptronClosest(perceptrons.Perceptron):
""" A perceptron for mention ranking with closest antecedents for training.
"""
def argmax(self, substructure, arc_information):
""" Decoder for mention ranking with closest antecedents for training.
Compute highest-scoring antecedent and closest gold antecedent for one
anaphor.
Args:
substructure (list((Mention, Mention))): The list of mention pairs
which define the search space for one substructure. For mention
ranking, this list consists of all potential anaphor-antecedent
pairs for one fixed anaphor in descending order, such as
(m_3, m_2), (m_3, m_1), (m_3, m_0)
arc_information (dict((Mention, Mention),
((array, array, array), list(int), bool)):
A mapping of arcs (= mention pairs) to information about these
arcs. The information consists of the features, the costs for
the arc (for each label), and whether predicting the arc to be
coreferent is consistent with the gold annotation). The features
are divided in three arrays: the first array contains the non-
numeric features, the second array the numeric features, and the
third array the values for the numeric features. The features
are represented as integers via feature hashing.
Returns:
A 7-tuple describing the highest-scoring anaphor-antecedent
decision, and the anaphor-antecedent pair with the closest gold
antecedent. The tuple consists of:
- **best_arcs** (*list((Mention, Mention))*): the
highest-scoring antecedent decision (the list contains only
one arc),
- **best_labels** (*list(str)*): empty, the ranking approach
does not employ any labels,
- **best_scores** (*list(float)*): the score of the
highest-scoring antecedent decision,
- **best_cons_arcs** (*list((Mention, Mention))*): the
anaphor-antecedent pair with the closest gold antecedent (the
list contains only one arc),
- **best_cons_labels** (*list(str)*): empty, the ranking
approach does not employ any labels
- **best_cons_scores** (*list(float)*): the score of the
anaphor-antecedent pair with the closest gold antecedent
- **is_consistent** (*bool*): whether the highest-scoring
antecedent decision is consistent with the gold information.
"""
max_val = float("-inf")
best = None
max_cons = float("-inf")
best_cons = None
best_is_consistent = False
for arc in substructure:
score = self.score_arc(arc, arc_information)
consistent = arc_information[arc][2]
if score > max_val:
best = arc
max_val = score
best_is_consistent = consistent
# take closest
if not best_cons and consistent:
best_cons = arc
max_cons = score
return (
[best],
[],
[max_val],
[best_cons],
[],
[max_cons],
best_is_consistent
) | {
"content_hash": "a42ae5273a9c46ab73b9c0b50c083219",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 80,
"avg_line_length": 45.646288209606986,
"alnum_prop": 0.6277623648713289,
"repo_name": "smartschat/cort",
"id": "3a7ff9231708a9f65f805f4466903a1b5a3dca5f",
"size": "10453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cort/coreference/approaches/mention_ranking.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4234"
},
{
"name": "Java",
"bytes": "324927"
},
{
"name": "JavaScript",
"bytes": "475404"
},
{
"name": "Perl",
"bytes": "165676"
},
{
"name": "Python",
"bytes": "420807"
},
{
"name": "R",
"bytes": "4022"
}
],
"symlink_target": ""
} |
from .resource import Resource
class StorageAccountUpdateParameters(Resource):
"""
The parameters to update on the account.
:param id: Resource Id
:type id: str
:param name: Resource name
:type name: str
:param type: Resource type
:type type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param account_type: Gets or sets the account type. Note that StandardZRS
and PremiumLRS accounts cannot be changed to other account types, and
other account types cannot be changed to StandardZRS or PremiumLRS.
Possible values include: 'Standard_LRS', 'Standard_ZRS', 'Standard_GRS',
'Standard_RAGRS', 'Premium_LRS'
:type account_type: str
:param custom_domain: User domain assigned to the storage account. Name
is the CNAME source. Only one custom domain is supported per storage
account at this time. To clear the existing custom domain, use an empty
string for the custom domain name property.
:type custom_domain: :class:`CustomDomain
<fixtures.acceptancetestsstoragemanagementclient.models.CustomDomain>`
"""
_validation = {
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'account_type': {'key': 'properties.accountType', 'type': 'AccountType'},
'custom_domain': {'key': 'properties.customDomain', 'type': 'CustomDomain'},
}
def __init__(self, location, id=None, name=None, type=None, tags=None, account_type=None, custom_domain=None, **kwargs):
super(StorageAccountUpdateParameters, self).__init__(id=id, name=name, type=type, location=location, tags=tags, **kwargs)
self.account_type = account_type
self.custom_domain = custom_domain
| {
"content_hash": "a0922abe13a5d54159269292215ac9a2",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 129,
"avg_line_length": 41.285714285714285,
"alnum_prop": 0.6495304003954523,
"repo_name": "jkonecki/autorest",
"id": "1a4b6346505615b67842c8937ca65c592bf3943a",
"size": "2497",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/StorageManagementClient/storagemanagementclient/models/storage_account_update_parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "270"
},
{
"name": "C#",
"bytes": "6404226"
},
{
"name": "CSS",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "589"
},
{
"name": "Java",
"bytes": "52197"
},
{
"name": "JavaScript",
"bytes": "2844345"
},
{
"name": "PowerShell",
"bytes": "16348"
},
{
"name": "Ruby",
"bytes": "121623"
}
],
"symlink_target": ""
} |
import Cookie
import logging
import os
def get_cookie_value(key):
cookies = None
try:
cookies = Cookie.BaseCookie(os.environ.get('HTTP_COOKIE',''))
except Cookie.CookieError, error:
logging.debug("Ignoring Cookie Error, skipping get cookie: '%s'" % error)
if not cookies:
return None
cookie = cookies.get(key)
if not cookie:
return None
return cookie.value
# Cookie handling from http://appengine-cookbook.appspot.com/recipe/a-simple-cookie-class/
def set_cookie_value(key, value='', max_age=None,
path='/', domain=None, secure=None, httponly=False,
version=None, comment=None):
cookies = Cookie.BaseCookie()
cookies[key] = value
for var_name, var_value in [
('max-age', max_age),
('path', path),
('domain', domain),
('secure', secure),
#('HttpOnly', httponly), Python 2.6 is required for httponly cookies
('version', version),
('comment', comment),
]:
if var_value is not None and var_value is not False:
cookies[key][var_name] = str(var_value)
if max_age is not None:
cookies[key]['expires'] = max_age
cookies_header = cookies[key].output(header='').lstrip()
if httponly:
# We have to manually add this part of the header until GAE uses Python 2.6.
cookies_header += "; HttpOnly"
return cookies_header
| {
"content_hash": "8fcaf7c9cc2f26fbd2d6192f62b4a50d",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 90,
"avg_line_length": 29.551020408163264,
"alnum_prop": 0.6070441988950276,
"repo_name": "Khan/gae_bingo",
"id": "b78e7f20530a6c0e7ce1fbe40d184410e0eef810",
"size": "1448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cookies.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from msrest.serialization import Model
class DeploymentTrafficManagerProfile(Model):
"""
Deployment operation parameters.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar uri: URI referencing the template. Default value:
"https://azuresdkci.blob.core.windows.net/templatehost/CreateTrafficManagerProfile_2016-08-08/azuredeploy.json"
.
:vartype uri: str
:param content_version: If included it must match the ContentVersion in
the template.
:type content_version: str
:param location: Location for traffic manager or 'global'. Default value:
"global" .
:type location: str
:param monitor_path: Path to monitor. Default value: "/" .
:type monitor_path: str
:param monitor_port: Port to monitor. Default value: 80 .
:type monitor_port: int
:param monitor_protocol: Monitor protocol. Possible values include:
'http', 'https'. Default value: "http" .
:type monitor_protocol: str or :class:`monitorProtocol
<trafficmanagerprofilecreationclient.models.monitorProtocol>`
:param routing_method: Routing method. Possible values include:
'priority', 'performance', 'weighted'
:type routing_method: str or :class:`routingMethod
<trafficmanagerprofilecreationclient.models.routingMethod>`
:param status: Create an enabled or disabled profile. Possible values
include: 'enabled', 'disabled'. Default value: "enabled" .
:type status: str or :class:`status
<trafficmanagerprofilecreationclient.models.status>`
:param traffic_manager_profile_name: Name of resource.
:type traffic_manager_profile_name: str
:param ttl: DNS Config time-to-live in seconds. Default value: 30 .
:type ttl: int
:param unique_dns_name: Relative DNS name for the traffic manager
profile, resulting FQDN will be <uniqueDnsName>.trafficmanager.net, must
be globally unique.
:type unique_dns_name: str
:ivar mode: Gets or sets the deployment mode. Default value:
"Incremental" .
:vartype mode: str
"""
_validation = {
'uri': {'required': True, 'constant': True},
'routing_method': {'required': True},
'traffic_manager_profile_name': {'required': True},
'unique_dns_name': {'required': True},
'mode': {'required': True, 'constant': True},
}
_attribute_map = {
'uri': {'key': 'properties.templateLink.uri', 'type': 'str'},
'content_version': {'key': 'properties.templateLink.contentVersion', 'type': 'str'},
'location': {'key': 'properties.parameters.location.value', 'type': 'str'},
'monitor_path': {'key': 'properties.parameters.monitorPath.value', 'type': 'str'},
'monitor_port': {'key': 'properties.parameters.monitorPort.value', 'type': 'int'},
'monitor_protocol': {'key': 'properties.parameters.monitorProtocol.value', 'type': 'monitorProtocol'},
'routing_method': {'key': 'properties.parameters.routingMethod.value', 'type': 'routingMethod'},
'status': {'key': 'properties.parameters.status.value', 'type': 'status'},
'traffic_manager_profile_name': {'key': 'properties.parameters.trafficManagerProfileName.value', 'type': 'str'},
'ttl': {'key': 'properties.parameters.ttl.value', 'type': 'int'},
'unique_dns_name': {'key': 'properties.parameters.uniqueDnsName.value', 'type': 'str'},
'mode': {'key': 'properties.mode', 'type': 'str'},
}
uri = "https://azuresdkci.blob.core.windows.net/templatehost/CreateTrafficManagerProfile_2016-08-08/azuredeploy.json"
mode = "Incremental"
def __init__(self, routing_method, traffic_manager_profile_name, unique_dns_name, content_version=None, location="global", monitor_path="/", monitor_port=80, monitor_protocol="http", status="enabled", ttl=30):
self.content_version = content_version
self.location = location
self.monitor_path = monitor_path
self.monitor_port = monitor_port
self.monitor_protocol = monitor_protocol
self.routing_method = routing_method
self.status = status
self.traffic_manager_profile_name = traffic_manager_profile_name
self.ttl = ttl
self.unique_dns_name = unique_dns_name
| {
"content_hash": "1d823598b28f6970ad7504cd46033bbd",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 213,
"avg_line_length": 49.13793103448276,
"alnum_prop": 0.6748538011695906,
"repo_name": "BurtBiel/azure-cli",
"id": "780bf78046cf8afdd3e05097ce0c62bd273571eb",
"size": "4963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/command_modules/azure-cli-network/azure/cli/command_modules/network/mgmt_traffic_manager_profile/lib/models/deployment_traffic_manager_profile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "429"
},
{
"name": "Python",
"bytes": "2108820"
},
{
"name": "Shell",
"bytes": "3300"
}
],
"symlink_target": ""
} |
def setup(context, scenario):
"""
set up scenario scaffolding for local samples
:type context: behave.runner.Context
:type scenario: behave.model.Scenario
"""
pass
| {
"content_hash": "5b59b1fbeebee9e603b03a50ba428cab",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 49,
"avg_line_length": 26.857142857142858,
"alnum_prop": 0.675531914893617,
"repo_name": "SteelToeOSS/Samples",
"id": "79602bfd6d7ac44e3b8fd8c1cb2c83fe817d789f",
"size": "190",
"binary": false,
"copies": "1",
"ref": "refs/heads/2.x",
"path": "pysteel/scaffold/local.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "210"
},
{
"name": "Batchfile",
"bytes": "2137"
},
{
"name": "C#",
"bytes": "546165"
},
{
"name": "CSS",
"bytes": "8760"
},
{
"name": "HTML",
"bytes": "107853"
},
{
"name": "JavaScript",
"bytes": "72677"
},
{
"name": "Shell",
"bytes": "2091"
}
],
"symlink_target": ""
} |
import argparse
import cPickle as pickle
import gzip
import sys
import cv2
from imagesift import get_sift_keypoints
import numpy as np
from sklearn.datasets import load_files
from sklearn.metrics import accuracy_score, classification_report
from sklearn.preprocessing import normalize
parser = argparse.ArgumentParser()
parser.add_argument('container_path')
parser.add_argument('bof_path')
parser.add_argument('clf_path')
args = parser.parse_args()
container_path = args.container_path
bof_path = args.bof_path
clf_path = args.clf_path
bunch_files = load_files(container_path=container_path,
description='images',
shuffle=False,
load_content=False)
with gzip.open(bof_path, 'rb') as f:
bof = pickle.load(f)
with gzip.open(clf_path, 'rb') as f:
clf = pickle.load(f)
descs = []
for fname in bunch_files.filenames:
img = cv2.imread(fname, 0)
_, desc = get_sift_keypoints(img)
descs.append(desc)
X = bof.transform(descs)
normalize(X, copy=False)
y_pred = clf.predict(X)
y = bunch_files.target
print accuracy_score(y, y_pred)
print classification_report(y, y_pred, target_names=clf.target_names_)
| {
"content_hash": "700af61ebec64eac77a7fa32866a77f9",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 70,
"avg_line_length": 26,
"alnum_prop": 0.7023411371237458,
"repo_name": "pazeshun/jsk_apc",
"id": "5b7aaa4840b4c09751e0216e5000634898208ee9",
"size": "1243",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jsk_apc2015_common/scripts/test_bof_object_recognition.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "101871"
},
{
"name": "CMake",
"bytes": "42995"
},
{
"name": "Common Lisp",
"bytes": "695864"
},
{
"name": "Dockerfile",
"bytes": "1503"
},
{
"name": "HTML",
"bytes": "6364"
},
{
"name": "Python",
"bytes": "406153"
},
{
"name": "Shell",
"bytes": "4475"
}
],
"symlink_target": ""
} |
'''
Low level Skype for Linux interface implemented
using XWindows messaging. Uses direct Xlib calls
through ctypes module.
This module handles the options that you can pass to L{ISkype.__init__<skype.ISkype.__init__>}
for Linux machines when the transport is set to X11.
No further options are currently supported.
'''
import threading
from ctypes import *
from ctypes.util import find_library
import time
from Skype4Py.API import ICommand, _ISkypeAPIBase
from Skype4Py.enums import *
from Skype4Py.errors import ISkypeAPIError
# some Xlib constants
_PropertyChangeMask = 0x400000
_PropertyNotify = 28
_ClientMessage = 33
_PropertyNewValue = 0
_PropertyDelete = 1
# some Xlib types
c_ulong_p = POINTER(c_ulong)
DisplayP = c_void_p
Atom = c_ulong
AtomP = c_ulong_p
XID = c_ulong
Window = XID
Bool = c_int
Status = c_int
Time = c_ulong
c_int_p = POINTER(c_int)
# should the structures be aligned to 8 bytes?
_align = (sizeof(c_long) == 8 and sizeof(c_int) == 4)
# some Xlib structures
class _XClientMessageEvent(Structure):
if _align:
_fields_ = [('type', c_int),
('pad0', c_int),
('serial', c_ulong),
('send_event', Bool),
('pad1', c_int),
('display', DisplayP),
('window', Window),
('message_type', Atom),
('format', c_int),
('pad2', c_int),
('data', c_char * 20)]
else:
_fields_ = [('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', DisplayP),
('window', Window),
('message_type', Atom),
('format', c_int),
('data', c_char * 20)]
class _XPropertyEvent(Structure):
if _align:
_fields_ = [('type', c_int),
('pad0', c_int),
('serial', c_ulong),
('send_event', Bool),
('pad1', c_int),
('display', DisplayP),
('window', Window),
('atom', Atom),
('time', Time),
('state', c_int),
('pad2', c_int)]
else:
_fields_ = [('type', c_int),
('serial', c_ulong),
('send_event', Bool),
('display', DisplayP),
('window', Window),
('atom', Atom),
('time', Time),
('state', c_int)]
class _XErrorEvent(Structure):
if _align:
_fields_ = [('type', c_int),
('pad0', c_int),
('display', DisplayP),
('resourceid', XID),
('serial', c_ulong),
('error_code', c_ubyte),
('request_code', c_ubyte),
('minor_code', c_ubyte)]
else:
_fields_ = [('type', c_int),
('display', DisplayP),
('resourceid', XID),
('serial', c_ulong),
('error_code', c_ubyte),
('request_code', c_ubyte),
('minor_code', c_ubyte)]
class _XEvent(Union):
if _align:
_fields_ = [('type', c_int),
('xclient', _XClientMessageEvent),
('xproperty', _XPropertyEvent),
('xerror', _XErrorEvent),
('pad', c_long * 24)]
else:
_fields_ = [('type', c_int),
('xclient', _XClientMessageEvent),
('xproperty', _XPropertyEvent),
('xerror', _XErrorEvent),
('pad', c_long * 24)]
XEventP = POINTER(_XEvent)
# Xlib error handler type
XErrorHandlerP = CFUNCTYPE(c_int, DisplayP, POINTER(_XErrorEvent))
class _ISkypeAPI(_ISkypeAPIBase):
def __init__(self, handler, opts):
_ISkypeAPIBase.__init__(self, opts)
self.RegisterHandler(handler)
# check options
if opts:
raise TypeError('Unexpected parameter(s): %s' % ', '.join(opts.keys()))
# setup Xlib
libpath = find_library('X11')
if not libpath:
raise ImportError('Could not find X11 library')
self.x11 = cdll.LoadLibrary(libpath)
# setup Xlib function prototypes
self.x11.XCloseDisplay.argtypes = (DisplayP,)
self.x11.XCloseDisplay.restype = None
self.x11.XCreateSimpleWindow.argtypes = (DisplayP, Window, c_int, c_int, c_uint,
c_uint, c_uint, c_ulong, c_ulong)
self.x11.XCreateSimpleWindow.restype = Window
self.x11.XDefaultRootWindow.argtypes = (DisplayP,)
self.x11.XDefaultRootWindow.restype = Window
self.x11.XDeleteProperty.argtypes = (DisplayP, Window, Atom)
self.x11.XDeleteProperty.restype = None
self.x11.XDestroyWindow.argtypes = (DisplayP, Window)
self.x11.XDestroyWindow.restype = None
self.x11.XPending.argtypes = (DisplayP,)
self.x11.XPending.restype = c_int
self.x11.XGetAtomName.argtypes = (DisplayP, Atom)
self.x11.XGetAtomName.restype = c_char_p
self.x11.XGetErrorText.argtypes = (DisplayP, c_int, c_char_p, c_int)
self.x11.XGetErrorText.restype = None
self.x11.XGetWindowProperty.argtypes = (DisplayP, Window, Atom, c_long, c_long, Bool,
Atom, AtomP, c_int_p, c_ulong_p, c_ulong_p, POINTER(POINTER(Window)))
self.x11.XGetWindowProperty.restype = c_int
self.x11.XInitThreads.argtypes = ()
self.x11.XInitThreads.restype = Status
self.x11.XInternAtom.argtypes = (DisplayP, c_char_p, Bool)
self.x11.XInternAtom.restype = Atom
self.x11.XNextEvent.argtypes = (DisplayP, XEventP)
self.x11.XNextEvent.restype = None
self.x11.XOpenDisplay.argtypes = (c_char_p,)
self.x11.XOpenDisplay.restype = DisplayP
self.x11.XSelectInput.argtypes = (DisplayP, Window, c_long)
self.x11.XSelectInput.restype = None
self.x11.XSendEvent.argtypes = (DisplayP, Window, Bool, c_long, XEventP)
self.x11.XSendEvent.restype = Status
self.x11.XSetErrorHandler.argtypes = (XErrorHandlerP,)
self.x11.XSetErrorHandler.restype = None
self.x11.XLockDisplay.argtypes = (DisplayP,)
self.x11.XLockDisplay.restype = None
self.x11.XUnlockDisplay.argtypes = (DisplayP,)
self.x11.XUnlockDisplay.restype = None
# init Xlib
self.x11.XInitThreads()
self.error = None
# callback has to be saved to keep reference to bound method
self._error_handler_callback = XErrorHandlerP(self._error_handler)
self.x11.XSetErrorHandler(self._error_handler_callback)
self.disp = self.x11.XOpenDisplay(None)
if not self.disp:
raise ISkypeAPIError('Could not open XDisplay')
self.win_root = self.x11.XDefaultRootWindow(self.disp)
self.win_self = self.x11.XCreateSimpleWindow(self.disp, self.win_root,
100, 100, 100, 100, 1, 0, 0)
self.x11.XSelectInput(self.disp, self.win_root, _PropertyChangeMask)
self.win_skype = self.get_skype()
ctrl = 'SKYPECONTROLAPI_MESSAGE'
self.atom_msg = self.x11.XInternAtom(self.disp, ctrl, False)
self.atom_msg_begin = self.x11.XInternAtom(self.disp, ctrl + '_BEGIN', False)
self.loop_event = threading.Event()
self.loop_timeout = 0.0001
self.loop_break = False
def __del__(self):
if hasattr(self, 'x11'):
if hasattr(self, 'disp'):
if hasattr(self, 'win_self'):
self.x11.XDestroyWindow(self.disp, self.win_self)
self.x11.XCloseDisplay(self.disp)
def run(self):
self.DebugPrint('thread started')
# main loop
event = _XEvent()
data = ''
while not self.loop_break:
pending = self.x11.XPending(self.disp)
if not pending:
self.loop_event.wait(self.loop_timeout)
if self.loop_event.isSet():
self.loop_timeout = 0.0001
elif self.loop_timeout < 1.0:
self.loop_timeout *= 2
self.loop_event.clear()
continue
self.loop_timeout = 0.0001
for i in xrange(pending):
self.x11.XLockDisplay(self.disp)
self.x11.XNextEvent(self.disp, byref(event))
self.x11.XUnlockDisplay(self.disp)
if event.type == _ClientMessage:
if event.xclient.format == 8:
if event.xclient.message_type == self.atom_msg_begin:
data = str(event.xclient.data)
elif event.xclient.message_type == self.atom_msg:
if data != '':
data += str(event.xclient.data)
else:
print 'Warning! Middle of message received with no beginning!'
else:
continue
if len(event.xclient.data) != 20 and data:
self.notify(data.decode('utf-8'))
data = ''
elif event.type == _PropertyNotify:
if self.x11.XGetAtomName(self.disp, event.xproperty.atom) == '_SKYPE_INSTANCE':
if event.xproperty.state == _PropertyNewValue:
self.win_skype = self.get_skype()
# changing attachment status can cause an event handler to be fired, in
# turn it could try to call Attach() and doing this immediately seems to
# confuse Skype (command '#0 NAME xxx' returns '#0 CONNSTATUS OFFLINE' :D);
# to fix this, we give Skype some time to initialize itself
time.sleep(1.0)
self.SetAttachmentStatus(apiAttachAvailable)
elif event.xproperty.state == _PropertyDelete:
self.win_skype = None
self.SetAttachmentStatus(apiAttachNotAvailable)
self.DebugPrint('thread finished')
def _error_handler(self, disp, error):
# called from within Xlib when error occures
self.error = error.contents.error_code
self.DebugPrint('Xlib error', self.error)
# stop all pending commands
for command in self.Commands.values():
if hasattr(command, '_event'):
command._event.set()
return 0
def error_check(self):
'''Checks last Xlib error and raises an exception if needed.'''
if self.error != None:
if self.error == 3: # BadWindow
self.win_skype = None
self.SetAttachmentStatus(apiAttachNotAvailable)
buf = create_string_buffer(256)
self.x11.XGetErrorText(self.disp, self.error, buf, 256)
error = ISkypeAPIError('X11 error: %s' % buf.value)
self.error = None
raise error
def get_skype(self):
'''Returns Skype window ID or None if Skype not running.'''
skype_inst = self.x11.XInternAtom(self.disp, '_SKYPE_INSTANCE', True)
type_ret = Atom()
format_ret = c_int()
nitems_ret = c_ulong()
bytes_after_ret = c_ulong()
winp = pointer(Window())
fail = self.x11.XGetWindowProperty(self.disp, self.win_root, skype_inst,
0, 1, False, 33, byref(type_ret), byref(format_ret),
byref(nitems_ret), byref(bytes_after_ret), byref(winp))
if not fail and self.error == None and format_ret.value == 32 and nitems_ret.value == 1:
return winp.contents.value
def Close(self):
self.loop_break = True
self.loop_event.set()
while self.isAlive():
time.sleep(0.01)
self.DebugPrint('closed')
def SetFriendlyName(self, FriendlyName):
self.FriendlyName = FriendlyName
if self.AttachmentStatus == apiAttachSuccess:
# reattach with the new name
self.SetAttachmentStatus(apiAttachUnknown)
self.Attach()
def __Attach_ftimeout(self):
self.wait = False
def Attach(self, Timeout=30000, Wait=True):
if self.AttachmentStatus == apiAttachSuccess:
return
if not self.isAlive():
try:
self.start()
except AssertionError:
raise ISkypeAPIError('Skype API closed')
try:
self.wait = True
t = threading.Timer(Timeout / 1000.0, self.__Attach_ftimeout)
if Wait:
t.start()
while self.wait:
self.win_skype = self.get_skype()
if self.win_skype != None:
break
else:
time.sleep(1.0)
else:
raise ISkypeAPIError('Skype attach timeout')
finally:
t.cancel()
c = ICommand(-1, 'NAME %s' % self.FriendlyName, '', True, Timeout)
self.SendCommand(c, True)
if c.Reply != 'OK':
self.win_skype = None
self.SetAttachmentStatus(apiAttachRefused)
return
self.SendCommand(ICommand(-1, 'PROTOCOL %s' % self.Protocol), True)
self.SetAttachmentStatus(apiAttachSuccess)
def IsRunning(self):
return self.get_skype() != None
def Start(self, Minimized=False, Nosplash=False):
# options are not supported as of Skype 1.4 Beta for Linux
if not self.IsRunning():
import os
if os.fork() == 0: # we're the child
os.setsid()
os.execlp('skype')
def Shutdown(self):
import os
from signal import SIGINT
fh = os.popen('ps -o %p --no-heading -C skype')
pid = fh.readline().strip()
fh.close()
if pid:
os.kill(int(pid), SIGINT)
# Skype sometimes doesn't delete the '_SKYPE_INSTANCE' property
skype_inst = self.x11.XInternAtom(self.disp, '_SKYPE_INSTANCE', True)
self.x11.XDeleteProperty(self.disp, self.win_root, skype_inst)
self.win_skype = None
self.SetAttachmentStatus(apiAttachNotAvailable)
def SendCommand(self, Command, Force=False):
if self.AttachmentStatus != apiAttachSuccess and not Force:
self.Attach(Command.Timeout)
self.CommandsStackPush(Command)
self.CallHandler('send', Command)
com = u'#%d %s' % (Command.Id, Command.Command)
self.DebugPrint('->', repr(com))
if Command.Blocking:
Command._event = bevent = threading.Event()
else:
Command._timer = timer = threading.Timer(Command.Timeout / 1000.0, self.CommandsStackPop, (Command.Id,))
event = _XEvent()
event.xclient.type = _ClientMessage
event.xclient.display = self.disp
event.xclient.window = self.win_self
event.xclient.message_type = self.atom_msg_begin
event.xclient.format = 8
com = unicode(com).encode('utf-8') + '\x00'
for i in xrange(0, len(com), 20):
event.xclient.data = com[i:i+20]
if self.x11.XSendEvent(self.disp, self.win_skype, False, 0, byref(event)) == 0:
self.error_check()
event.xclient.message_type = self.atom_msg
self.loop_event.set()
self.error_check()
if Command.Blocking:
bevent.wait(Command.Timeout / 1000.0)
self.error_check()
if not bevent.isSet():
raise ISkypeAPIError('Skype command timeout')
else:
timer.start()
def notify(self, com):
self.DebugPrint('<-', repr(com))
# Called by main loop for all received Skype commands.
if com.startswith(u'#'):
p = com.find(u' ')
Command = self.CommandsStackPop(int(com[1:p]))
if Command:
Command.Reply = com[p + 1:]
if Command.Blocking:
Command._event.set()
del Command._event
else:
Command._timer.cancel()
del Command._timer
self.CallHandler('rece', Command)
else:
self.CallHandler('rece_api', com[p + 1:])
else:
self.CallHandler('rece_api', com)
| {
"content_hash": "bd63ab4a525e60397bd4d3be52dbef14",
"timestamp": "",
"source": "github",
"line_count": 428,
"max_line_length": 116,
"avg_line_length": 39.35280373831776,
"alnum_prop": 0.5360090245205723,
"repo_name": "neurodebian/htcondor",
"id": "da6416caddad3c38de848381bb86c3aedef2515b",
"size": "16843",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/API/posix_x11.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "18848"
},
{
"name": "Batchfile",
"bytes": "164295"
},
{
"name": "C",
"bytes": "1695675"
},
{
"name": "C++",
"bytes": "27269492"
},
{
"name": "CMake",
"bytes": "556754"
},
{
"name": "FORTRAN",
"bytes": "110251"
},
{
"name": "Groff",
"bytes": "6128"
},
{
"name": "HTML",
"bytes": "16109"
},
{
"name": "Java",
"bytes": "44327"
},
{
"name": "JavaScript",
"bytes": "2095"
},
{
"name": "Lex",
"bytes": "6527"
},
{
"name": "M4",
"bytes": "19489"
},
{
"name": "Makefile",
"bytes": "42701"
},
{
"name": "Objective-C",
"bytes": "42170"
},
{
"name": "PLpgSQL",
"bytes": "23393"
},
{
"name": "Perl",
"bytes": "4150910"
},
{
"name": "Python",
"bytes": "1048286"
},
{
"name": "Ruby",
"bytes": "24647"
},
{
"name": "SQLPL",
"bytes": "10933"
},
{
"name": "Shell",
"bytes": "1261504"
},
{
"name": "TeX",
"bytes": "17944"
},
{
"name": "Yacc",
"bytes": "62678"
}
],
"symlink_target": ""
} |
"""Introspection of Moya elements"""
from __future__ import unicode_literals
from ..elements import Attribute
from ..elements.elementproxy import ElementProxy
from ..tags.context import DataSetter
from .. import errors
from ..logic import DeferNodeContents
class GetParentElement(DataSetter):
class Help:
synopis = "get information about the parent element"
def get_value(self, context):
element_proxy = self.parent.get_proxy(context, app=context[".app"])
return element_proxy
class GetElement(DataSetter):
"""Retrieve information regarding an element."""
class Help:
synopsis = "get information about an element"
name = Attribute("Element name")
dst = Attribute("Destination", type="reference", default=None)
_from = Attribute("Application", type="application", default=None)
def logic(self, context):
app = self.get_app(context)
name = self.name(context)
dst = self.dst(context)
element_app, element = self.get_element(name, app=app)
element_proxy = element.get_proxy(context, element_app)
self.set_context(context, dst, element_proxy)
class FindElements(DataSetter):
"""Retrieve information regarding elements of a given type."""
class Help:
synopsis = "retrieve information regarding elements of a given type"
tag = Attribute("Element type")
ns = Attribute("XML namespace", type="namespace", default=None)
dst = Attribute("Destination", type="reference", default=None)
_from = Attribute("Application", type="application", default=None)
def logic(self, context):
ns, tag, dst = self.get_parameters(context, "ns", "tag", "dst")
app = self.get_app(context)
if ns is None:
ns = self.lib.namespace
let_map = self.get_let_map(context)
elements = [
ElementProxy(context, None, el)
for el in self.archive.get_elements_by_type(ns, tag)
]
if let_map:
elements = [
el
for el in elements
if all(let_map.get(k, None) == el.params.get(k) for k in let_map)
]
self.set_context(context, dst, elements)
class FindAppElements(DataSetter):
"""
Retrieve information regarding elements of a given type, with an entry per application.
"""
class Help:
synopsis = "retrieve information regarding elements of a given type"
tag = Attribute("Element type")
ns = Attribute("XML namespace", type="namespace", default=None)
dst = Attribute("Destination", type="reference", default=None)
_from = Attribute("Application", type="application", default=None)
def logic(self, context):
ns, tag, dst = self.get_parameters(context, "ns", "tag", "dst")
app = self.get_app(context)
if ns is None:
ns = self.lib.namespace
let_map = self.get_let_map(context)
archive = self.archive
elements = []
for el in archive.get_elements_by_type(ns, tag):
for app_name in archive.apps_by_lib[el.lib.long_name]:
app = archive.apps[app_name]
elements.append(ElementProxy(context, app, el))
if let_map:
elements = [
el
for el in elements
if all(let_map.get(k, None) == el.params.get(k) for k in let_map)
]
self.set_context(context, dst, elements)
class FindElement(DataSetter):
"""Retrieve an element of a given type."""
class Help:
synopsis = "retrieve information regarding an element" ""
tag = Attribute("Element type")
ns = Attribute("XML namespace", type="namespace", default=None)
dst = Attribute("Destination", type="reference", default=None)
_from = Attribute("Application", type="application", default=None)
def logic(self, context):
ns, tag, dst = self.get_parameters(context, "ns", "tag", "dst")
app = self.get_app(context)
if ns is None:
ns = self.lib.namespace
let_map = self.get_let_map(context)
for el in self.archive.get_elements_by_type(ns, tag):
if app and el.lib != app.lib:
continue
element = ElementProxy(context, app, el)
params = element.params
if all(let_map.get(k, None) == params.get(k) for k in let_map):
self.set_context(context, dst, element)
break
else:
self.set_context(context, dst, None)
class GetChildren(DataSetter):
"""Get the children of an element."""
class Help:
synopsis = "get an elements children"
element_ref = Attribute("Element Reference", default=None)
element = Attribute("Element", type="expression", default=None)
tag = Attribute("Element type")
ns = Attribute("XML namespace", type="namespace", default=None)
dst = Attribute("Destination", type="reference", default=None)
data = Attribute("Data only", type="boolean", default=False)
def logic(self, context):
(element, element_ref, tag, ns, dst, data) = self.get_parameters(
context, "element", "element_ref", "tag", "ns", "dst", "data"
)
app = getattr(element, "app", None)
if element is None and element_ref is None:
element_ref = self.parent.libid
if element is not None:
if not hasattr(element, "__moyaelement__"):
self.throw(
"bad-value.not-an-element",
"Can't get children of '{!r}' because it's not an element".format(
element
),
)
element = element.__moyaelement__()
elif element_ref is not None:
try:
app, element = self.get_element(element_ref)
except errors.ElementNotFoundError:
self.throw(
"bad-value.element-not-found",
"Element with reference '{}' was not found".format(element_ref),
)
else:
self.throw(
"bad-value.missing-element",
"A valid element is required, not {!r}".format(element),
)
if tag:
if ns is None:
ns = self.lib.namespace
children = list(element.children(element_type=(ns, tag)))
else:
children = element.get_children()
children = [ElementProxy(context, app, el) for el in children]
if data:
children = [child.params for child in children]
self.set_context(context, dst, children)
class ForChildren(DataSetter):
"""Loop for each child of an element"""
class Help:
synopsis = """iterate over the children of an element"""
element_ref = Attribute("Element Reference", default=None)
element = Attribute("Element", type="expression", default=None)
tag = Attribute("Element type")
ns = Attribute("XML namespace", type="namespace")
dst = Attribute("Destination", type="reference", default=None)
data = Attribute("Data only", type="boolean", default=False)
filter = Attribute(
"Filter on condition", required=False, type="expression", default=True
)
def logic(self, context):
(element, element_ref, tag, ns, dst, data) = self.get_parameters(
context, "element", "element_ref", "tag", "ns", "dst", "data"
)
app = getattr(element, "app", None)
if element is not None:
if not hasattr(element, "__moyaelement__"):
self.throw(
"bad-value.not-an-element",
"Can't get children of '{!r}' because it's not an element".format(
element
),
)
element = element.__moyaelement__()
elif element_ref is not None:
try:
app, element = self.get_element(element_ref)
except errors.ElementNotFoundError:
self.throw(
"bad-value.not-found",
"Element with reference '{}' was not found".format(element_ref),
)
else:
self.throw(
"bad-value.missing-element",
"A valid element is required, not {!r}".format(element),
)
if tag:
if ns is None:
ns = self.lib.namespace
children = list(element.children(element_type=(ns, tag)))
else:
children = element.get_children()
children = [ElementProxy(context, app, el) for el in children]
if data:
children = [child.params for child in children]
filter = self.filter
for child in children:
context[dst] = child
if filter(context):
yield DeferNodeContents(self)
class GetData(DataSetter):
"""Get all data from data tags."""
class Help:
synopsis = "get data from custom data tags"
tag = Attribute("Element type")
ns = Attribute("XML namespace", default=None)
dst = Attribute("Destination", type="reference", default=None)
_from = Attribute("Application", type="application", required=False, default=None)
def logic(self, context):
ns, tag, dst = self.get_parameters(context, "ns", "tag", "dst")
if ns is None:
app = self.get_app(context, check=False)
if app is None:
self.throw(
"get-data.namespace-missing",
"Couldn't detect namespace (set 'ns' or 'from' attribute)",
)
ns = app.lib.namespace
data = self.archive.get_data(context, ns, tag)
self.set_context(context, dst, data)
class GetDataItem(DataSetter):
"""Get a single data item."""
class Help:
synopsis = "get data from a single custom tag"
tag = Attribute("Element type")
ns = Attribute("XML namespace", default=None, required=True)
dst = Attribute("Destination", type="reference", default=None)
_from = Attribute("Application", type="application", required=False, default=None)
def logic(self, context):
ns, tag, dst, _from = self.get_parameters(context, "ns", "tag", "dst", "from")
lib = None
app = self.archive.find_app(_from)
if app is not None:
lib = app.lib
data = self.archive.get_data_item(
context, ns, tag, self.get_let_map(context), lib=lib
)
self.set_context(context, dst, data)
class GetDataFromElement(DataSetter):
element = Attribute("Element", type="expression")
dst = Attribute("Destination", type="reference", default=None)
def logic(self, context):
element = self.element(context).tag
data = element.get_all_data_parameters(context)
self.set_context(context, self.dst(context), data)
class GetDataElements(DataSetter):
tag = Attribute("Element type")
ns = Attribute("XML namespace", default=None)
dst = Attribute("Destination", type="reference", default=None)
byapp = Attribute("List data elements by app", type="boolean", default=False)
_from = Attribute("Application", type="application", required=False, default=None)
def logic(self, context):
ns, tag, dst, by_app = self.get_parameters(context, "ns", "tag", "dst", "byapp")
if ns is None:
app = self.get_app(context, check=False)
if app is None:
self.throw(
"get-data.namespace-missing",
"Couldn't detect namespace (set 'ns' or 'from' attribute)",
)
ns = app.lib.namespace
if by_app:
data = self.archive.get_app_data_elements(context, ns, tag)
else:
data = self.archive.get_data_elements(context, ns, tag)
self.set_context(context, dst, data)
| {
"content_hash": "074a652049d9cc19a935ba74fa723584",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 91,
"avg_line_length": 35.474926253687315,
"alnum_prop": 0.5824879427906203,
"repo_name": "moyaproject/moya",
"id": "301a77386e0ea145e27a03771f73299c1b11b02e",
"size": "12026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moya/tags/elements.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "662"
},
{
"name": "CSS",
"bytes": "98490"
},
{
"name": "Genshi",
"bytes": "949"
},
{
"name": "HTML",
"bytes": "14279826"
},
{
"name": "JavaScript",
"bytes": "369773"
},
{
"name": "Myghty",
"bytes": "774"
},
{
"name": "Python",
"bytes": "1828220"
},
{
"name": "Shell",
"bytes": "165"
},
{
"name": "Smalltalk",
"bytes": "154"
}
],
"symlink_target": ""
} |
"""@package skdaccess
The scikit-dataaccess package (import as skdaccess) provides a common api
framework for importing and handling scientific data sets. This also supports
standardized metadata tags, annotations, and queries.
The package contains a base DataClass that provides a top level DataGenerator
and DataWrapper inherited by specific data sources. The DataGenerator parses
data, and the DataWrapper iterates over the data sources and returns desired
pieces of data.
Metadata Tags of the form:
#@meta key, value
"""
#@meta package, help
from . import *
import pkg_resources
# __version__ = pkg_resources.get_distribution('scikit-dataaccess').version
| {
"content_hash": "cbcf26868a1817744b5fe6242e4623d2",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.7954887218045112,
"repo_name": "skdaccess/skdaccess",
"id": "ee338dc81f4eb12587e2cbbdab8851a3a0e24845",
"size": "2138",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "skdaccess/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "335879"
},
{
"name": "Python",
"bytes": "119834"
}
],
"symlink_target": ""
} |
import sendgrid
import json
import os
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
##################################################
# Create a domain whitelabel. #
# POST /whitelabel/domains #
data = {
"automatic_security": False,
"custom_spf": True,
"default": True,
"domain": "example.com",
"ips": [
"192.168.1.1",
"192.168.1.2"
],
"subdomain": "news",
"username": "john@example.com"
}
response = sg.client.whitelabel.domains.post(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# List all domain whitelabels. #
# GET /whitelabel/domains #
params = {'username': 'test_string', 'domain': 'test_string', 'exclude_subusers': 'true', 'limit': 1, 'offset': 1}
response = sg.client.whitelabel.domains.get(query_params=params)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Get the default domain whitelabel. #
# GET /whitelabel/domains/default #
response = sg.client.whitelabel.domains.default.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# List the domain whitelabel associated with the given user. #
# GET /whitelabel/domains/subuser #
response = sg.client.whitelabel.domains.subuser.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Disassociate a domain whitelabel from a given user. #
# DELETE /whitelabel/domains/subuser #
response = sg.client.whitelabel.domains.subuser.delete()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update a domain whitelabel. #
# PATCH /whitelabel/domains/{domain_id} #
data = {
"custom_spf": True,
"default": False
}
domain_id = "test_url_param"
response = sg.client.whitelabel.domains._(domain_id).patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve a domain whitelabel. #
# GET /whitelabel/domains/{domain_id} #
domain_id = "test_url_param"
response = sg.client.whitelabel.domains._(domain_id).get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Delete a domain whitelabel. #
# DELETE /whitelabel/domains/{domain_id} #
domain_id = "test_url_param"
response = sg.client.whitelabel.domains._(domain_id).delete()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Associate a domain whitelabel with a given user. #
# POST /whitelabel/domains/{domain_id}/subuser #
data = {
"username": "jane@example.com"
}
domain_id = "test_url_param"
response = sg.client.whitelabel.domains._(domain_id).subuser.post(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Add an IP to a domain whitelabel. #
# POST /whitelabel/domains/{id}/ips #
data = {
"ip": "192.168.0.1"
}
id = "test_url_param"
response = sg.client.whitelabel.domains._(id).ips.post(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Remove an IP from a domain whitelabel. #
# DELETE /whitelabel/domains/{id}/ips/{ip} #
id = "test_url_param"
ip = "test_url_param"
response = sg.client.whitelabel.domains._(id).ips._(ip).delete()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Validate a domain whitelabel. #
# POST /whitelabel/domains/{id}/validate #
id = "test_url_param"
response = sg.client.whitelabel.domains._(id).validate.post()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Create an IP whitelabel #
# POST /whitelabel/ips #
data = {
"domain": "example.com",
"ip": "192.168.1.1",
"subdomain": "email"
}
response = sg.client.whitelabel.ips.post(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve all IP whitelabels #
# GET /whitelabel/ips #
params = {'ip': 'test_string', 'limit': 1, 'offset': 1}
response = sg.client.whitelabel.ips.get(query_params=params)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve an IP whitelabel #
# GET /whitelabel/ips/{id} #
id = "test_url_param"
response = sg.client.whitelabel.ips._(id).get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Delete an IP whitelabel #
# DELETE /whitelabel/ips/{id} #
id = "test_url_param"
response = sg.client.whitelabel.ips._(id).delete()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Validate an IP whitelabel #
# POST /whitelabel/ips/{id}/validate #
id = "test_url_param"
response = sg.client.whitelabel.ips._(id).validate.post()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Create a Link Whitelabel #
# POST /whitelabel/links #
data = {
"default": True,
"domain": "example.com",
"subdomain": "mail"
}
params = {'limit': 1, 'offset': 1}
response = sg.client.whitelabel.links.post(request_body=data, query_params=params)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve all link whitelabels #
# GET /whitelabel/links #
params = {'limit': 1}
response = sg.client.whitelabel.links.get(query_params=params)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve a Default Link Whitelabel #
# GET /whitelabel/links/default #
params = {'domain': 'test_string'}
response = sg.client.whitelabel.links.default.get(query_params=params)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve Associated Link Whitelabel #
# GET /whitelabel/links/subuser #
params = {'username': 'test_string'}
response = sg.client.whitelabel.links.subuser.get(query_params=params)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Disassociate a Link Whitelabel #
# DELETE /whitelabel/links/subuser #
params = {'username': 'test_string'}
response = sg.client.whitelabel.links.subuser.delete(query_params=params)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update a Link Whitelabel #
# PATCH /whitelabel/links/{id} #
data = {
"default": True
}
id = "test_url_param"
response = sg.client.whitelabel.links._(id).patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve a Link Whitelabel #
# GET /whitelabel/links/{id} #
id = "test_url_param"
response = sg.client.whitelabel.links._(id).get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Delete a Link Whitelabel #
# DELETE /whitelabel/links/{id} #
id = "test_url_param"
response = sg.client.whitelabel.links._(id).delete()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Validate a Link Whitelabel #
# POST /whitelabel/links/{id}/validate #
id = "test_url_param"
response = sg.client.whitelabel.links._(id).validate.post()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Associate a Link Whitelabel #
# POST /whitelabel/links/{link_id}/subuser #
data = {
"username": "jane@example.com"
}
link_id = "test_url_param"
response = sg.client.whitelabel.links._(link_id).subuser.post(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
| {
"content_hash": "9e35b5f95830b48d238f8d94e165e894",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 114,
"avg_line_length": 27.308681672025724,
"alnum_prop": 0.6154480160131873,
"repo_name": "gabrielkrell/sendgrid-python",
"id": "f529d3ed2149e54678d20c3d2defcc7e0520a5cc",
"size": "8493",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/whitelabel/whitelabel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "357"
},
{
"name": "Python",
"bytes": "150754"
},
{
"name": "Shell",
"bytes": "2820"
}
],
"symlink_target": ""
} |
try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
from html5lib.tokenizer import HTMLTokenizer
from nose.tools import eq_
from bleach import linkify, url_re, DEFAULT_CALLBACKS as DC
def test_url_re():
def no_match(s):
match = url_re.search(s)
if match:
assert not match, 'matched {0!s}'.format(s[slice(*match.span())])
yield no_match, 'just what i am looking for...it'
def test_empty():
eq_('', linkify(''))
def test_simple_link():
eq_('a <a href="http://example.com" rel="nofollow">http://example.com'
'</a> link',
linkify('a http://example.com link'))
eq_('a <a href="https://example.com" rel="nofollow">https://example.com'
'</a> link',
linkify('a https://example.com link'))
eq_('a <a href="http://example.com" rel="nofollow">example.com</a> link',
linkify('a example.com link'))
def test_trailing_slash():
eq_('<a href="http://examp.com/" rel="nofollow">http://examp.com/</a>',
linkify('http://examp.com/'))
eq_('<a href="http://example.com/foo/" rel="nofollow">'
'http://example.com/foo/</a>',
linkify('http://example.com/foo/'))
eq_('<a href="http://example.com/foo/bar/" rel="nofollow">'
'http://example.com/foo/bar/</a>',
linkify('http://example.com/foo/bar/'))
def test_mangle_link():
"""We can muck with the href attribute of the link."""
def filter_url(attrs, new=False):
quoted = quote_plus(attrs['href'])
attrs['href'] = 'http://bouncer/?u={0!s}'.format(quoted)
return attrs
eq_('<a href="http://bouncer/?u=http%3A%2F%2Fexample.com" rel="nofollow">'
'http://example.com</a>',
linkify('http://example.com', DC + [filter_url]))
def test_mangle_text():
"""We can muck with the inner text of a link."""
def ft(attrs, new=False):
attrs['_text'] = 'bar'
return attrs
eq_('<a href="http://ex.mp">bar</a> <a href="http://ex.mp/foo">bar</a>',
linkify('http://ex.mp <a href="http://ex.mp/foo">foo</a>', [ft]))
def test_email_link():
tests = (
('a james@example.com mailto', False, 'a james@example.com mailto'),
('a james@example.com.au mailto', False,
'a james@example.com.au mailto'),
('a <a href="mailto:james@example.com">james@example.com</a> mailto',
True, 'a james@example.com mailto'),
('aussie <a href="mailto:james@example.com.au">'
'james@example.com.au</a> mailto', True,
'aussie james@example.com.au mailto'),
# This is kind of a pathological case. I guess we do our best here.
('email to <a href="james@example.com" rel="nofollow">'
'james@example.com</a>',
True,
'email to <a href="james@example.com">james@example.com</a>'),
('<br><a href="mailto:jinkyun@example.com">'
'jinkyun@example.com</a>',
True,
'<br>jinkyun@example.com'),
)
def _check(o, p, i):
eq_(o, linkify(i, parse_email=p))
for (o, p, i) in tests:
yield _check, o, p, i
def test_email_link_escaping():
tests = (
('''<a href='mailto:"james"@example.com'>'''
'''"james"@example.com</a>''',
'"james"@example.com'),
('''<a href="mailto:"j'ames"@example.com">'''
'''"j'ames"@example.com</a>''',
'"j\'ames"@example.com'),
('''<a href='mailto:"ja>mes"@example.com'>'''
'''"ja>mes"@example.com</a>''',
'"ja>mes"@example.com'),
)
def _check(o, i):
eq_(o, linkify(i, parse_email=True))
for (o, i) in tests:
yield _check, o, i
def test_prevent_links():
"""Returning None from any callback should remove links or prevent them
from being created."""
def no_new_links(attrs, new=False):
if new:
return None
return attrs
def no_old_links(attrs, new=False):
if not new:
return None
return attrs
def noop(attrs, new=False):
return attrs
in_text = 'a ex.mp <a href="http://example.com">example</a>'
out_text = 'a <a href="http://ex.mp">ex.mp</a> example'
tests = (
([noop], ('a <a href="http://ex.mp">ex.mp</a> '
'<a href="http://example.com">example</a>'), 'noop'),
([no_new_links, noop], in_text, 'no new, noop'),
([noop, no_new_links], in_text, 'noop, no new'),
([no_old_links, noop], out_text, 'no old, noop'),
([noop, no_old_links], out_text, 'noop, no old'),
([no_old_links, no_new_links], 'a ex.mp example', 'no links'),
)
def _check(cb, o, msg):
eq_(o, linkify(in_text, cb), msg)
for (cb, o, msg) in tests:
yield _check, cb, o, msg
def test_set_attrs():
"""We can set random attributes on links."""
def set_attr(attrs, new=False):
attrs['rev'] = 'canonical'
return attrs
eq_('<a href="http://ex.mp" rev="canonical">ex.mp</a>',
linkify('ex.mp', [set_attr]))
def test_only_proto_links():
"""Only create links if there's a protocol."""
def only_proto(attrs, new=False):
if new and not attrs['_text'].startswith(('http:', 'https:')):
return None
return attrs
in_text = 'a ex.mp http://ex.mp <a href="/foo">bar</a>'
out_text = ('a ex.mp <a href="http://ex.mp">http://ex.mp</a> '
'<a href="/foo">bar</a>')
eq_(out_text, linkify(in_text, [only_proto]))
def test_stop_email():
"""Returning None should prevent a link from being created."""
def no_email(attrs, new=False):
if attrs['href'].startswith('mailto:'):
return None
return attrs
text = 'do not link james@example.com'
eq_(text, linkify(text, parse_email=True, callbacks=[no_email]))
def test_tlds():
eq_('<a href="http://example.com" rel="nofollow">example.com</a>',
linkify('example.com'))
eq_('<a href="http://example.co" rel="nofollow">example.co</a>',
linkify('example.co'))
eq_('<a href="http://example.co.uk" rel="nofollow">example.co.uk</a>',
linkify('example.co.uk'))
eq_('<a href="http://example.edu" rel="nofollow">example.edu</a>',
linkify('example.edu'))
eq_('<a href="http://example.xxx" rel="nofollow">example.xxx</a>',
linkify('example.xxx'))
eq_('example.yyy', linkify('example.yyy'))
eq_(' brie', linkify(' brie'))
eq_('<a href="http://bit.ly/fun" rel="nofollow">bit.ly/fun</a>',
linkify('bit.ly/fun'))
def test_escaping():
eq_('< unrelated', linkify('< unrelated'))
def test_nofollow_off():
eq_('<a href="http://example.com">example.com</a>',
linkify('example.com', []))
def test_link_in_html():
eq_('<i><a href="http://yy.com" rel="nofollow">http://yy.com</a></i>',
linkify('<i>http://yy.com</i>'))
eq_('<em><strong><a href="http://xx.com" rel="nofollow">http://xx.com'
'</a></strong></em>',
linkify('<em><strong>http://xx.com</strong></em>'))
def test_links_https():
eq_('<a href="https://yy.com" rel="nofollow">https://yy.com</a>',
linkify('https://yy.com'))
def test_add_rel_nofollow():
"""Verify that rel="nofollow" is added to an existing link"""
eq_('<a href="http://yy.com" rel="nofollow">http://yy.com</a>',
linkify('<a href="http://yy.com">http://yy.com</a>'))
def test_url_with_path():
eq_('<a href="http://example.com/path/to/file" rel="nofollow">'
'http://example.com/path/to/file</a>',
linkify('http://example.com/path/to/file'))
def test_link_ftp():
eq_('<a href="ftp://ftp.mozilla.org/some/file" rel="nofollow">'
'ftp://ftp.mozilla.org/some/file</a>',
linkify('ftp://ftp.mozilla.org/some/file'))
def test_link_query():
eq_('<a href="http://xx.com/?test=win" rel="nofollow">'
'http://xx.com/?test=win</a>',
linkify('http://xx.com/?test=win'))
eq_('<a href="http://xx.com/?test=win" rel="nofollow">'
'xx.com/?test=win</a>',
linkify('xx.com/?test=win'))
eq_('<a href="http://xx.com?test=win" rel="nofollow">'
'xx.com?test=win</a>',
linkify('xx.com?test=win'))
def test_link_fragment():
eq_('<a href="http://xx.com/path#frag" rel="nofollow">'
'http://xx.com/path#frag</a>',
linkify('http://xx.com/path#frag'))
def test_link_entities():
eq_('<a href="http://xx.com/?a=1&b=2" rel="nofollow">'
'http://xx.com/?a=1&b=2</a>',
linkify('http://xx.com/?a=1&b=2'))
def test_escaped_html():
"""If I pass in escaped HTML, it should probably come out escaped."""
s = '<em>strong</em>'
eq_(s, linkify(s))
def test_link_http_complete():
eq_('<a href="https://user:pass@ftp.mozilla.org/x/y.exe?a=b&c=d'
'&e#f" rel="nofollow">'
'https://user:pass@ftp.mozilla.org/x/y.exe?a=b&c=d&e#f</a>',
linkify('https://user:pass@ftp.mozilla.org/x/y.exe?a=b&c=d&e#f'))
def test_non_url():
"""document.vulnerable should absolutely not be linkified."""
s = 'document.vulnerable'
eq_(s, linkify(s))
def test_javascript_url():
"""javascript: urls should never be linkified."""
s = 'javascript:document.vulnerable'
eq_(s, linkify(s))
def test_unsafe_url():
"""Any unsafe char ({}[]<>, etc.) in the path should end URL scanning."""
eq_('All your{"<a href="http://xx.yy.com/grover.png" '
'rel="nofollow">xx.yy.com/grover.png</a>"}base are',
linkify('All your{"xx.yy.com/grover.png"}base are'))
def test_skip_pre():
"""Skip linkification in <pre> tags."""
simple = 'http://xx.com <pre>http://xx.com</pre>'
linked = ('<a href="http://xx.com" rel="nofollow">http://xx.com</a> '
'<pre>http://xx.com</pre>')
all_linked = ('<a href="http://xx.com" rel="nofollow">http://xx.com</a> '
'<pre><a href="http://xx.com" rel="nofollow">http://xx.com'
'</a></pre>')
eq_(linked, linkify(simple, skip_pre=True))
eq_(all_linked, linkify(simple))
already_linked = '<pre><a href="http://xx.com">xx</a></pre>'
nofollowed = '<pre><a href="http://xx.com" rel="nofollow">xx</a></pre>'
eq_(nofollowed, linkify(already_linked))
eq_(nofollowed, linkify(already_linked, skip_pre=True))
def test_libgl():
"""libgl.so.1 should not be linkified."""
eq_('libgl.so.1', linkify('libgl.so.1'))
def test_end_of_sentence():
"""example.com. should match."""
out = '<a href="http://{0!s}" rel="nofollow">{0!s}</a>{1!s}'
intxt = '{0!s}{1!s}'
def check(u, p):
eq_(out.format(u, p),
linkify(intxt.format(u, p)))
tests = (
('example.com', '.'),
('example.com', '...'),
('ex.com/foo', '.'),
('ex.com/foo', '....'),
)
for u, p in tests:
yield check, u, p
def test_end_of_clause():
"""example.com/foo, shouldn't include the ,"""
eq_('<a href="http://ex.com/foo" rel="nofollow">ex.com/foo</a>, bar',
linkify('ex.com/foo, bar'))
def test_sarcasm():
"""Jokes should crash.<sarcasm/>"""
dirty = 'Yeah right <sarcasm/>'
clean = 'Yeah right <sarcasm/>'
eq_(clean, linkify(dirty))
def test_wrapping_parentheses():
"""URLs wrapped in parantheses should not include them."""
out = '{0!s}<a href="http://{1!s}" rel="nofollow">{2!s}</a>{3!s}'
tests = (
('(example.com)', ('(', 'example.com', 'example.com', ')')),
('(example.com/)', ('(', 'example.com/', 'example.com/', ')')),
('(example.com/foo)', ('(', 'example.com/foo',
'example.com/foo', ')')),
('(((example.com/))))', ('(((', 'example.com/)',
'example.com/)', ')))')),
('example.com/))', ('', 'example.com/))', 'example.com/))', '')),
('http://en.wikipedia.org/wiki/Test_(assessment)',
('', 'en.wikipedia.org/wiki/Test_(assessment)',
'http://en.wikipedia.org/wiki/Test_(assessment)', '')),
('(http://en.wikipedia.org/wiki/Test_(assessment))',
('(', 'en.wikipedia.org/wiki/Test_(assessment)',
'http://en.wikipedia.org/wiki/Test_(assessment)', ')')),
('((http://en.wikipedia.org/wiki/Test_(assessment))',
('((', 'en.wikipedia.org/wiki/Test_(assessment',
'http://en.wikipedia.org/wiki/Test_(assessment', '))')),
('(http://en.wikipedia.org/wiki/Test_(assessment)))',
('(', 'en.wikipedia.org/wiki/Test_(assessment))',
'http://en.wikipedia.org/wiki/Test_(assessment))', ')')),
('(http://en.wikipedia.org/wiki/)Test_(assessment',
('(', 'en.wikipedia.org/wiki/)Test_(assessment',
'http://en.wikipedia.org/wiki/)Test_(assessment', '')),
)
def check(test, expected_output):
eq_(out.format(*expected_output), linkify(test))
for test, expected_output in tests:
yield check, test, expected_output
def test_parentheses_with_removing():
expect = '(test.py)'
eq_(expect, linkify(expect, callbacks=[lambda *a: None]))
def test_ports():
"""URLs can contain port numbers."""
tests = (
('http://foo.com:8000', ('http://foo.com:8000', '')),
('http://foo.com:8000/', ('http://foo.com:8000/', '')),
('http://bar.com:xkcd', ('http://bar.com', ':xkcd')),
('http://foo.com:81/bar', ('http://foo.com:81/bar', '')),
('http://foo.com:', ('http://foo.com', ':')),
)
def check(test, output):
out = '<a href="{0}" rel="nofollow">{0}</a>{1}'
eq_(out.format(*output),
linkify(test))
for test, output in tests:
yield check, test, output
def test_tokenizer():
"""Linkify doesn't always have to sanitize."""
raw = '<em>test<x></x></em>'
eq_('<em>test<x></x></em>', linkify(raw))
eq_(raw, linkify(raw, tokenizer=HTMLTokenizer))
def test_ignore_bad_protocols():
eq_('foohttp://bar',
linkify('foohttp://bar'))
eq_('fohttp://<a href="http://exampl.com" rel="nofollow">exampl.com</a>',
linkify('fohttp://exampl.com'))
def test_max_recursion_depth():
"""If we hit the max recursion depth, just return the string."""
test = '<em>' * 2000 + 'foo' + '</em>' * 2000
eq_(test, linkify(test))
def test_link_emails_and_urls():
"""parse_email=True shouldn't prevent URLs from getting linkified."""
output = ('<a href="http://example.com" rel="nofollow">'
'http://example.com</a> <a href="mailto:person@example.com">'
'person@example.com</a>')
eq_(output, linkify('http://example.com person@example.com',
parse_email=True))
def test_links_case_insensitive():
"""Protocols and domain names are case insensitive."""
expect = ('<a href="HTTP://EXAMPLE.COM" rel="nofollow">'
'HTTP://EXAMPLE.COM</a>')
eq_(expect, linkify('HTTP://EXAMPLE.COM'))
def test_elements_inside_links():
eq_('<a href="#" rel="nofollow">hello<br></a>',
linkify('<a href="#">hello<br></a>'))
eq_('<a href="#" rel="nofollow"><strong>bold</strong> hello<br></a>',
linkify('<a href="#"><strong>bold</strong> hello<br></a>'))
def test_remove_first_childlink():
expect = '<p>something</p>'
callbacks = [lambda *a: None]
eq_(expect,
linkify('<p><a href="/foo">something</a></p>', callbacks=callbacks))
| {
"content_hash": "4d3c4725c221763df16c6a3facb3e5f5",
"timestamp": "",
"source": "github",
"line_count": 464,
"max_line_length": 78,
"avg_line_length": 33.3448275862069,
"alnum_prop": 0.5539038262668046,
"repo_name": "sylarcp/anita",
"id": "62da8d19f323bd5b2965867811a378d12c40a77b",
"size": "15472",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/bleach/tests/test_links.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "1365934"
},
{
"name": "GLSL",
"bytes": "174414"
},
{
"name": "HTML",
"bytes": "5975033"
},
{
"name": "JavaScript",
"bytes": "52645816"
},
{
"name": "Mako",
"bytes": "5751"
},
{
"name": "Python",
"bytes": "9472769"
},
{
"name": "Shell",
"bytes": "4036"
}
],
"symlink_target": ""
} |
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BST values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the globalboost.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "globalboost.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 18332 if testnet else 8332
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the globalboostd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(globalboostd):
info = globalboostd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
globalboostd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = globalboostd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(globalboostd):
address_summary = dict()
address_to_account = dict()
for info in globalboostd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = globalboostd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = globalboostd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(globalboostd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(globalboostd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BST available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to globalboostd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = globalboostd.createrawtransaction(inputs, outputs)
signed_rawtx = globalboostd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(globalboostd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = globalboostd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(globalboostd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = globalboostd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(globalboostd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of globalboost.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
globalboostd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(globalboostd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(globalboostd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(globalboostd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(globalboostd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = globalboostd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| {
"content_hash": "3c3b1e0086b2b8885ff868a132d516b7",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 111,
"avg_line_length": 38.83730158730159,
"alnum_prop": 0.6199039542249923,
"repo_name": "getcoin/globalboosty",
"id": "fa36462ad9e5db32390a3be0afc8c8178b605109",
"size": "10173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/spendfrom/spendfrom.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "167244"
},
{
"name": "C++",
"bytes": "2950052"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Erlang",
"bytes": "6761"
},
{
"name": "JavaScript",
"bytes": "12"
},
{
"name": "Nu",
"bytes": "293"
},
{
"name": "Objective-C++",
"bytes": "6330"
},
{
"name": "PHP",
"bytes": "2230"
},
{
"name": "Perl",
"bytes": "27505"
},
{
"name": "Python",
"bytes": "110559"
},
{
"name": "Shell",
"bytes": "116646"
},
{
"name": "TypeScript",
"bytes": "8991525"
}
],
"symlink_target": ""
} |
"""Certificate chain where the target certificate sets the extended key usage
to clientAuth. Neither the root nor the intermediate have an EKU."""
import sys
sys.path += ['../..']
import gencerts
# Self-signed root certificate.
root = gencerts.create_self_signed_root_certificate('Root')
# Intermediate certificate.
intermediate = gencerts.create_intermediate_certificate('Intermediate', root)
# Target certificate.
target = gencerts.create_end_entity_certificate('Target', intermediate)
target.get_extensions().set_property('extendedKeyUsage', 'clientAuth')
chain = [target, intermediate, root]
gencerts.write_chain(__doc__, chain, 'chain.pem')
| {
"content_hash": "34538c3701b2c99c55f72538e4a8347e",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 77,
"avg_line_length": 32.6,
"alnum_prop": 0.7653374233128835,
"repo_name": "youtube/cobalt_sandbox",
"id": "f9cc43ee146f404058f6051dcb68b046ce7e87b7",
"size": "837",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "net/data/verify_certificate_chain_unittest/target-eku-clientauth/generate-chains.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name='RSAExploits',
version='0.1.0',
author='Kumar Vikramjeet, Eric Azebu',
author_email='',
packages=['RSAExploits', 'RSAExploits.exploits'],
license='LICENSE.txt',
description='Collection of RSA exploits',
long_description=open('README.md').read(),
setup_requires=["pycrypto >= 2.4.1","sympy >= 0.7.1.rc1",],
install_requires=[
"pycrypto >= 2.4.1",
"sympy >= 0.7.1",
],
)
| {
"content_hash": "62fd248b83b97ddd0468e90302cd6a62",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 63,
"avg_line_length": 28.352941176470587,
"alnum_prop": 0.5975103734439834,
"repo_name": "vik001ind/RSAExploits",
"id": "aadd41986750985798cf3b4e847b232a54854253",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55322"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.urls import reverse
class CsrfFailureTestCase(TestCase):
def test_access_csrf_failure_view(self):
"""Verify that custom CSRF failure view loads properly."""
response = self.client.get(reverse("403-csrf"))
self.assertEqual(response.status_code, 403)
self.assertTemplateUsed(response, "403_csrf.html")
| {
"content_hash": "93ab4bab2c8f5531e7ebe307bc2256a6",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 66,
"avg_line_length": 38.3,
"alnum_prop": 0.7180156657963447,
"repo_name": "pydata/conf_site",
"id": "186672dc7f65eb1d23c8a6410d4fef76c1861329",
"size": "383",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "conf_site/core/tests/test_csrf_failure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24945"
},
{
"name": "HTML",
"bytes": "115341"
},
{
"name": "JavaScript",
"bytes": "244408"
},
{
"name": "Jinja",
"bytes": "901"
},
{
"name": "Python",
"bytes": "309825"
}
],
"symlink_target": ""
} |
"""
:mod:`nova` -- Cloud IaaS Platform
===================================
.. automodule:: nova
:platform: Unix
:synopsis: Infrastructure-as-a-Service Cloud platform.
.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
from exception import * | {
"content_hash": "e7aaee2806fb78bcabe7f8e31e3d0f16",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 60,
"avg_line_length": 32.8125,
"alnum_prop": 0.6819047619047619,
"repo_name": "jxta/cc",
"id": "2b25d1628d464cbbdf201dd40dedcabd50bd94bd",
"size": "1138",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "315721"
},
{
"name": "Shell",
"bytes": "7870"
}
],
"symlink_target": ""
} |
from ..utils.display import Displayable, default_renderer_base, json_renderer_base
from ..utils.display import MimeBundleType, RendererType
__all__ = (
"Displayable",
"default_renderer_base",
"json_renderer_base",
"MimeBundleType",
"RendererType"
)
| {
"content_hash": "f2e09136c034898facc799f12307bd2f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 82,
"avg_line_length": 24.636363636363637,
"alnum_prop": 0.7047970479704797,
"repo_name": "ellisonbg/altair",
"id": "2be6e0c4c30b333813f3fa7dc022a18e22e297f4",
"size": "271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "altair/vega/display.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "136763"
},
{
"name": "Makefile",
"bytes": "312"
},
{
"name": "Python",
"bytes": "1150719"
}
],
"symlink_target": ""
} |
"""Setup script for Bokeh."""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENCE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import os, platform, re, shutil, site, subprocess, sys, time
from os.path import abspath, dirname, exists, isdir, join, realpath, relpath
from shutil import copy
import sys
if 'install' in sys.argv and sys.platform.startswith('win'):
# Try use setuptools, so that entry_points is handled, creating a bokeh.exe
try:
import setuptools
except ImportError:
pass
try:
import colorama
def bright(text): return "%s%s%s" % (colorama.Style.BRIGHT, text, colorama.Style.RESET_ALL)
def dim(text): return "%s%s%s" % (colorama.Style.DIM, text, colorama.Style.RESET_ALL)
def white(text): return "%s%s%s" % (colorama.Fore.WHITE, text, colorama.Style.RESET_ALL)
def blue(text): return "%s%s%s" % (colorama.Fore.BLUE, text, colorama.Style.RESET_ALL)
def red(text): return "%s%s%s" % (colorama.Fore.RED, text, colorama.Style.RESET_ALL)
def green(text): return "%s%s%s" % (colorama.Fore.GREEN, text, colorama.Style.RESET_ALL)
def yellow(text): return "%s%s%s" % (colorama.Fore.YELLOW, text, colorama.Style.RESET_ALL)
except ImportError:
def bright(text): return text
def dim(text): return text
def white(text) : return text
def blue(text) : return text
def red(text) : return text
def green(text) : return text
def yellow(text) : return text
if 'nightly' in sys.argv:
from setuptools import setup
sys.argv.remove('nightly')
with open('__conda_version__.txt', 'r') as f:
version = f.read().rstrip()
vers_file = os.path.join('bokeh', '__conda_version__.py')
with open(vers_file, 'w') as f:
f.write("conda_version=" + "'" + version + "'")
else:
from distutils.core import setup
from distutils import dir_util
# Our own imports
import versioneer
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
ROOT = dirname(realpath(__file__))
BOKEHJSROOT = join(ROOT, 'bokehjs')
BOKEHJSBUILD = join(BOKEHJSROOT, 'build')
CSS = join(BOKEHJSBUILD, 'css')
JS = join(BOKEHJSBUILD, 'js')
SERVER = join(ROOT, 'bokeh/server')
if sys.version_info[0] < 3:
input = raw_input
# -----------------------------------------------------------------------------
# Local utilities
# -----------------------------------------------------------------------------
versioneer.versionfile_source = 'bokeh/_version.py'
versioneer.versionfile_build = 'bokeh/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'Bokeh-' # dirname like 'myproject-1.2.0'
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
copy("LICENSE.txt", "bokeh/")
package_data = ['LICENSE.txt', 'themes/*.yaml']
def package_path(path, filters=()):
if not os.path.exists(path):
raise RuntimeError("packaging non-existent path: %s" % path)
elif os.path.isfile(path):
package_data.append(relpath(path, 'bokeh'))
else:
for path, dirs, files in os.walk(path):
path = relpath(path, 'bokeh')
for f in files:
if not filters or f.endswith(filters):
package_data.append(join(path, f))
# You can't install Bokeh in a virtualenv because the lack of getsitepackages()
# This is an open bug: https://github.com/pypa/virtualenv/issues/355
# And this is an intended PR to fix it: https://github.com/pypa/virtualenv/pull/508
# Workaround to fix our issue: https://github.com/bokeh/bokeh/issues/378
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python)."""
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
prefixes = [sys.prefix, sys.exec_prefix]
sitepackages = []
seen = set()
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys.prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version_info[0] >= 3:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [os.path.join(prefix, "lib", "site-packages"), prefix]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
sitepackages.append(os.path.abspath(sitedir))
sitepackages = [p for p in sitepackages if os.path.isdir(p)]
return sitepackages
def check_remove_bokeh_install(site_packages):
old_bokeh_files = []
for d in os.listdir(site_packages):
bokeh_path = join(site_packages, d)
if not (d == 'bokeh' or d.startswith('bokeh-')):
continue
old_bokeh_files.append(bokeh_path)
if len(old_bokeh_files) == 0:
return
print("Found old Bokeh files:")
for path in old_bokeh_files:
print(" - %s" % path)
val = input("Remove %s? [y|N] " % ("it" if len(old_bokeh_files)==1 else "them",))
if val == "y":
print("Removing old Bokeh files...", end=" ")
for path in old_bokeh_files:
try:
if isdir(path): shutil.rmtree(path)
else: os.remove(path)
except (IOError, OSError) as e:
print(bright(red("\nUnable to remove old Bokeh file at %s, exiting" % path)) + " [reason: %s]" % e)
sys.exit(-1)
print("Done")
else:
print(bright(red("Old Bokeh files not removed, exiting.")))
sys.exit(1)
def remove_bokeh_pth(path_file):
if exists(path_file):
try:
os.remove(path_file)
except (IOError, OSError):
print(bright(red("Unable to remove old path file at %s, exiting" % path_file)))
sys.exit(-1)
return True
return False
BUILD_EXEC_FAIL_MSG = bright(red("Failed.")) + """
ERROR: subprocess.Popen(%r) failed to execute:
%s
Have you run `npm install` from the bokehjs subdirectory?
For more information, see the Dev Guide:
http://bokeh.pydata.org/en/latest/docs/dev_guide.html
"""
BUILD_FAIL_MSG = bright(red("Failed.")) + """
ERROR: 'gulp build' returned the following
---- on stdout:
%s
---- on stderr:
%s
"""
BUILD_SIZE_FAIL_MSG = """
ERROR: could not determine sizes:
%s
"""
BUILD_SUCCESS_MSG = bright(green("Success!")) + """
Build output:
%s"""
def build_js():
print("Building BokehJS... ", end="")
sys.stdout.flush()
os.chdir('bokehjs')
if sys.platform != "win32":
cmd = [join('node_modules', '.bin', 'gulp'), 'build']
else:
cmd = [join('node_modules', '.bin', 'gulp.cmd'), 'build']
t0 = time.time()
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
print(BUILD_EXEC_FAIL_MSG % (cmd, e))
sys.exit(1)
finally:
os.chdir('..')
result = proc.wait()
t1 = time.time()
if result != 0:
indented_msg = ""
outmsg = proc.stdout.read().decode('ascii', errors='ignore')
outmsg = "\n".join([" " + x for x in outmsg.split("\n")])
errmsg = proc.stderr.read().decode('ascii', errors='ignore')
errmsg = "\n".join([" " + x for x in errmsg.split("\n")])
print(BUILD_FAIL_MSG % (red(outmsg), red(errmsg)))
sys.exit(1)
indented_msg = ""
msg = proc.stdout.read().decode('ascii', errors='ignore')
pat = re.compile(r"(\[.*\]) (.*)", re.DOTALL)
for line in msg.strip().split("\n"):
m = pat.match(line)
if not m: continue # skip generate.py output lines
stamp, txt = m.groups()
indented_msg += " " + dim(green(stamp)) + " " + dim(txt) + "\n"
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_SUCCESS_MSG % indented_msg)
print("Build time: %s" % bright(yellow("%0.1f seconds" % (t1-t0))))
print()
print("Build artifact sizes:")
try:
def size(*path):
return os.stat(join("bokehjs", "build", *path)).st_size / 2**10
print(" - bokeh.js : %6.1f KB" % size("js", "bokeh.js"))
print(" - bokeh.css : %6.1f KB" % size("css", "bokeh.css"))
print(" - bokeh.min.js : %6.1f KB" % size("js", "bokeh.min.js"))
print(" - bokeh.min.css : %6.1f KB" % size("css", "bokeh.min.css"))
print(" - bokeh-widgets.js : %6.1f KB" % size("js", "bokeh-widgets.js"))
print(" - bokeh-widgets.css : %6.1f KB" % size("css", "bokeh-widgets.css"))
print(" - bokeh-widgets.min.js : %6.1f KB" % size("js", "bokeh-widgets.min.js"))
print(" - bokeh-widgets.min.css : %6.1f KB" % size("css", "bokeh-widgets.min.css"))
print(" - bokeh-compiler.js : %6.1f KB" % size("js", "bokeh-compiler.js"))
print(" - bokeh-compiler.min.js : %6.1f KB" % size("js", "bokeh-compiler.min.js"))
except Exception as e:
print(BUILD_SIZE_FAIL_MSG % e)
def install_js():
target_jsdir = join(SERVER, 'static', 'js')
target_cssdir = join(SERVER, 'static', 'css')
STATIC_ASSETS = [
join(JS, 'bokeh.js'),
join(JS, 'bokeh.min.js'),
join(CSS, 'bokeh.css'),
join(CSS, 'bokeh.min.css'),
]
if not all([exists(a) for a in STATIC_ASSETS]):
print("""
ERROR: Cannot install BokehJS: files missing in `./bokehjs/build`.
Please build BokehJS by running setup.py with the `--build_js` option.
Dev Guide: http://bokeh.pydata.org/docs/dev_guide.html#bokehjs.
""")
sys.exit(1)
if exists(target_jsdir):
shutil.rmtree(target_jsdir)
shutil.copytree(JS, target_jsdir)
if exists(target_cssdir):
shutil.rmtree(target_cssdir)
shutil.copytree(CSS, target_cssdir)
def clean():
print("Removing prior-built items...", end=" ")
build_dir = 'build/lib/bokeh'
if os.path.exists(build_dir):
dir_util.remove_tree(build_dir)
for root, dirs, files in os.walk('.'):
for item in files:
if item.endswith('.pyc'):
os.remove(os.path.join(root, item))
print("Done")
def get_user_jsargs():
print("""
Bokeh includes a JavaScript library (BokehJS) that has its own
build process. How would you like to handle BokehJS:
1) build and install fresh BokehJS
2) install last built BokehJS from bokeh/bokehjs/build
""")
mapping = {"1": True, "2": False}
value = input("Choice? ")
while value not in mapping:
print("Input '%s' not understood. Valid choices: 1, 2\n" % value)
value = input("Choice? ")
return mapping[value]
def parse_jsargs():
options = ('install', 'develop', 'sdist', 'egg_info', 'build')
installing = any(arg in sys.argv for arg in options)
if '--build_js' in sys.argv:
if not installing:
print("Error: Option '--build_js' only valid with 'install', 'develop', 'sdist', or 'build', exiting.")
sys.exit(1)
jsbuild = True
sys.argv.remove('--build_js')
elif '--install_js' in sys.argv:
# Note that --install_js can be used by itself (without sdist/install/develop)
jsbuild = False
sys.argv.remove('--install_js')
else:
if installing:
jsbuild = get_user_jsargs()
else:
jsbuild = False
return jsbuild
def package_tree(pkgroot):
""" Get list of packages by walking the directory structure and
including all dirs that have an __init__.py or are named test.
"""
subdirs = [os.path.relpath(i[0], ROOT).replace(os.path.sep, '.')
for i in os.walk(os.path.join(ROOT, pkgroot))
if '__init__.py' in i[2]]
return subdirs
# -----------------------------------------------------------------------------
# Main script
# -----------------------------------------------------------------------------
# Aliases for build_js and install_js
for i in range(len(sys.argv)):
if sys.argv[i] == '--build-js':
sys.argv[i] = '--build_js'
if sys.argv[i] == '--install-js':
sys.argv[i] = '--install_js'
# Set up this checkout or source archive with the right BokehJS files.
if sys.version_info[:2] < (2, 6):
raise RuntimeError("Bokeh requires python >= 2.6")
# Lightweight command to only install js and nothing more - developer mode
if len(sys.argv) == 2 and sys.argv[-1] == '--install_js':
install_js()
sys.exit(0)
# check for 'sdist' and make sure we always do a BokehJS build when packaging
if "sdist" in sys.argv:
if "--install_js" in sys.argv:
print("Removing '--install_js' incompatible with 'sdist'")
sys.argv.remove('--install_js')
if "--build_js" not in sys.argv:
print("Adding '--build_js' required for 'sdist'")
sys.argv.append('--build_js')
# check for package install, set jsinstall to False to skip prompt
jsinstall = True
if not exists(join(ROOT, 'MANIFEST.in')):
if "--build_js" in sys.argv or "--install_js" in sys.argv:
print("BokehJS source code is not shipped in sdist packages; "
"building/installing from the bokehjs source directory is disabled. "
"To build or develop BokehJS yourself, you must clone the full "
"Bokeh repository from https://github.com/bokeh/bokeh")
if "--build_js" in sys.argv:
sys.argv.remove('--build_js')
if "--install_js" in sys.argv:
sys.argv.remove('--install_js')
jsbuild = False
jsinstall = False
else:
jsbuild = parse_jsargs()
if jsbuild:
build_js()
if jsinstall:
install_js()
sampledata_suffixes = ('.csv', '.conf', '.gz', '.json', '.png', '.ics', '.geojson')
package_path(join(SERVER, 'static'))
package_path(join(ROOT, 'bokeh', 'core', '_templates'))
package_path(join(ROOT, 'bokeh', 'sampledata'), sampledata_suffixes)
if '--user' in sys.argv:
site_packages = site.USER_SITE
else:
site_packages = getsitepackages()[0]
path_file = join(site_packages, "bokeh.pth")
path = abspath(dirname(__file__))
print()
if 'develop' in sys.argv:
# Note that setuptools supports 'develop' too, but we roll our own implementation
# that removes any existing Bokeh installation, and works in virtualenv
if exists('bokeh/__conda_version__.py'):
print(bright(red("ERROR:")) + " Detected a __conda_version__.py file, exiting")
sys.exit(1)
check_remove_bokeh_install(site_packages)
with open(path_file, "w+") as f:
f.write(path)
print("Installing Bokeh for development:")
print(" - writing path '%s' to %s" % (path, path_file))
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % yellow("PACKAGED"))
sys.exit()
elif 'clean' in sys.argv:
clean()
elif 'install' in sys.argv:
pth_removed = remove_bokeh_pth(path_file)
print("Installing Bokeh:")
if pth_removed:
print(" - removed path file at %s" % path_file)
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED")))
elif '--help' in sys.argv:
if jsinstall:
print("Bokeh-specific options available with 'install' or 'develop':")
print()
print(" --build_js build and install a fresh BokehJS")
print(" --install_js install only last previously built BokehJS")
else:
print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'")
print()
print()
REQUIRES = [
'six>=1.5.2',
'requests>=1.2.3',
'PyYAML>=3.10',
'python-dateutil>=2.1',
'Jinja2>=2.7',
'numpy>=1.7.1',
'tornado>=4.3',
]
if sys.version_info[:2] == (2, 7):
REQUIRES.append('futures>=3.0.3')
_version = versioneer.get_version()
_cmdclass = versioneer.get_cmdclass()
# Horrible hack: workaround to allow creation of bdist_wheel on pip installation
# Why, for God's sake, is pip forcing the generation of wheels when installing a package?
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError as e:
# pip is not claiming for bdist_wheel when wheel is not installed
bdist_wheel = None
if bdist_wheel is not None:
_cmdclass["bdist_wheel"] = bdist_wheel
# Note on scripts and entry points. The 'scripts' value is handled by
# distutils but does not provide an .exe, making it not very useful on
# Windows. The 'entry_points' value is handled only if setuptools is
# used, and does make an .exe. Note that in our conda recipe, we
# seperately define an entry point.
setup(
name='bokeh',
version=_version,
cmdclass=_cmdclass,
packages=package_tree('bokeh'),
package_data={'bokeh': package_data},
author='Continuum Analytics',
author_email='info@continuum.io',
url='http://github.com/bokeh/bokeh',
description='Statistical and novel interactive HTML plots for Python',
license='New BSD',
scripts=['bin/bokeh', 'bin/bokeh-server'],
entry_points={'console_scripts': ['bokeh = bokeh.__main__:main',], },
zip_safe=False,
install_requires=REQUIRES
)
| {
"content_hash": "7f72d610b372edb70bbbd6e5a3607373",
"timestamp": "",
"source": "github",
"line_count": 596,
"max_line_length": 137,
"avg_line_length": 35.25167785234899,
"alnum_prop": 0.560542598762494,
"repo_name": "quasiben/bokeh",
"id": "0fe857dcc3941414e19ffdb36f98a902c96e922b",
"size": "21010",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "448001"
},
{
"name": "CoffeeScript",
"bytes": "2130601"
},
{
"name": "JavaScript",
"bytes": "2530410"
},
{
"name": "Python",
"bytes": "1056239"
},
{
"name": "Scala",
"bytes": "28977"
},
{
"name": "Shell",
"bytes": "13082"
}
],
"symlink_target": ""
} |
"""This module contains the GCI WorkSubmission Model.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from google.appengine.ext import blobstore
from google.appengine.ext import db
from django.utils.translation import ugettext
import soc.models.base
import soc.models.user
from soc.modules.gci.models import organization as gci_org_model
from soc.modules.gci.models import program as gci_program_model
class GCIWorkSubmission(soc.models.base.ModelWithFieldAttributes):
"""Model for work submissions for a task by students.
Scope will be set to the Organization to which this work has been submitted.
scope_path will be set to the task key name under which this work was
submitted.
"""
#: User who submitted this work
user = db.ReferenceProperty(reference_class=soc.models.user.User,
required=True,
collection_name='work_submissions')
#: Organization to which this work belongs to
org = db.ReferenceProperty(
reference_class=gci_org_model.GCIOrganization,
required=True, collection_name='work_submissions')
#: Program to which this work belongs to
program = db.ReferenceProperty(
reference_class=gci_program_model.GCIProgram,
required=True, collection_name='work_submissions')
#: Property allowing you to store information about your work
information = db.TextProperty(
required=False, verbose_name=ugettext('Info'))
information.help_text = ugettext(
'Information about the work you submit for this task')
#: Property containing an URL to this work or more information about it
url_to_work = db.LinkProperty(
required=False, verbose_name=ugettext('URL to your Work'))
url_to_work.help_text = ugettext(
'URL to a resource containing your work or more information about it')
#: Property pointing to the work uploaded as a file or archive
upload_of_work = blobstore.BlobReferenceProperty(
required=False, verbose_name=ugettext('Upload of Work'))
upload_of_work.help_text = ugettext(
'Your work uploaded as a single file or as archive')
#: Property containing the date when the work was submitted
submitted_on = db.DateTimeProperty(required=True, auto_now_add=True,
verbose_name=ugettext('Submitted on'))
| {
"content_hash": "86aea0aa2ed40e17d08b2149d236d083",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 78,
"avg_line_length": 36.66153846153846,
"alnum_prop": 0.7167436005035669,
"repo_name": "SRabbelier/Melange",
"id": "d6d97ddf4247d424f8eae4bfb7aedb421a788fdc",
"size": "2993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/soc/modules/gci/models/work_submission.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "Java",
"bytes": "1496"
},
{
"name": "JavaScript",
"bytes": "1623582"
},
{
"name": "PHP",
"bytes": "1032"
},
{
"name": "Perl",
"bytes": "177565"
},
{
"name": "Python",
"bytes": "15317793"
},
{
"name": "Ruby",
"bytes": "59"
},
{
"name": "Shell",
"bytes": "15303"
}
],
"symlink_target": ""
} |
import os
import pytest
import tensorflow as tf
from absl.testing import parameterized
from tensorflow import keras
from keras_nlp.tokenizers.byte_pair_tokenizer import BytePairTokenizer
VOCAB_PATH = keras.utils.get_file(
None,
"https://storage.googleapis.com/keras-nlp/models/roberta_base/vocab.json",
)
MERGE_PATH = keras.utils.get_file(
None,
"https://storage.googleapis.com/keras-nlp/models/roberta_base/merges.txt",
)
@pytest.mark.large
class BytePairTokenizerTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self.tokenizer = BytePairTokenizer(
vocabulary=VOCAB_PATH, merges=MERGE_PATH
)
def test_tokenize_list_input(self):
input_data = ["brown.", "black."]
call_output = self.tokenizer(input_data)
tokenize_output = self.tokenizer.tokenize(input_data)
expected = tf.ragged.constant([[31876, 4], [14178, 4]])
self.assertAllEqual(call_output, expected)
self.assertAllEqual(tokenize_output, expected)
input_data = tf.convert_to_tensor(["brown.", "black."])
encoded = self.tokenizer(input_data)
self.assertAllEqual(encoded, expected)
def test_tokenize_string_output(self):
input_data = ["quick brown fox.", "slow black bear."]
tokenizer = BytePairTokenizer(
vocabulary=VOCAB_PATH, merges=MERGE_PATH, dtype=tf.string
)
call_output = tokenizer(input_data)
expected = tf.ragged.constant(
[
["quick", "Ġbrown", "Ġfox", "."],
["slow", "Ġblack", "Ġbear", "."],
]
)
self.assertAllEqual(call_output, expected)
def test_tokenize_scalar_input(self):
input_data = "brown."
encoded = self.tokenizer.tokenize(input_data)
self.assertAllEqual(encoded, [31876, 4])
def test_detokenize_scalar_input(self):
input_data = ["quick brown fox."]
encoded = self.tokenizer.tokenize(input_data)
decoded = self.tokenizer.detokenize(encoded)
self.assertAllEqual(input_data, decoded)
def test_detokenize_list_input(self):
input_data = ["quick brown fox.", "slow black bear."]
encoded = self.tokenizer.tokenize(input_data)
decoded = self.tokenizer.detokenize(encoded)
self.assertAllEqual(input_data, decoded)
def test_whitespace_split(self):
input_data = "\n\n\n s"
encoded = self.tokenizer(input_data)
self.assertAllEqual(encoded, [50140, 50118, 1437, 579])
input_data = " \n\n\ns"
encoded = self.tokenizer(input_data)
self.assertAllEqual(encoded, [1437, 1437, 50140, 50118, 29])
def test_special_whitespace(self):
input_data = "\xa0 \xa0 \x3000 s"
encoded = self.tokenizer(input_data)
self.assertAllEqual(encoded, [50141, 50143, 12096, 579])
def test_cjk_input(self):
input_data = "素晴らしい!芭比Q啦~"
# Black formats long list by one element per line, which is bad to read.
expected = [36714, 20024, 21402, 37127, 27, 20024, 48945, 47918]
expected += [47780, 43251, 4394, 10172, 36484, 27969, 12410, 37127]
expected += [10965, 10674, 1864, 42393, 15722, 18164, 43251, 10809]
expected += [17772]
encoded = self.tokenizer(input_data)
self.assertAllEqual(encoded, expected)
def test_tokenize_with_tf_data(self):
data = [
"I am just a test string",
"I am also a test string",
"I am still a test string",
"me too",
"I am not a test string (joking)",
"You guys should add punctuation!",
"Period matters!",
]
ds = tf.data.Dataset.from_tensor_slices(data)
ds = ds.batch(2).map(self.tokenizer)
encoded = next(iter(ds))
expected = tf.ragged.constant(
[[100, 524, 95, 10, 1296, 6755], [100, 524, 67, 10, 1296, 6755]]
)
self.assertAllEqual(encoded, expected)
def test_config(self):
input_data = ["the quick brown whale."]
cloned_tokenizer = BytePairTokenizer.from_config(
self.tokenizer.get_config()
)
self.assertAllEqual(
self.tokenizer(input_data),
cloned_tokenizer(input_data),
)
@parameterized.named_parameters(("tf_format", "tf"), ("h5_format", "h5"))
def test_saving(self, format):
input_data = tf.constant(["the quick brown whale."])
tokenizer = self.tokenizer
inputs = keras.Input(dtype="string", shape=())
outputs = tokenizer(inputs)
model = keras.Model(inputs, outputs)
path = os.path.join(self.get_temp_dir(), "model")
model.save(path, save_format=format)
restored_model = keras.models.load_model(path)
self.assertAllEqual(
model(input_data),
restored_model(input_data),
)
| {
"content_hash": "d29738434dc19dcddde8d40be3ebece2",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 80,
"avg_line_length": 36.18248175182482,
"alnum_prop": 0.6104498688723018,
"repo_name": "keras-team/keras-nlp",
"id": "f6652c7530db143ef43f187bc63da01275df48b4",
"size": "5567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keras_nlp/tokenizers/byte_pair_tokenizer_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "288"
},
{
"name": "Jsonnet",
"bytes": "779"
},
{
"name": "Jupyter Notebook",
"bytes": "464150"
},
{
"name": "Python",
"bytes": "730841"
},
{
"name": "Shell",
"bytes": "1279"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class RegenerateKeyParameters(Model):
"""Regenerate key parameters.
:param key_name: key name to generate (Key1|Key2). Possible values
include: 'Key1', 'Key2'
:type key_name: str or :class:`KeyName
<azure.mgmt.cognitiveservices.models.KeyName>`
"""
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'KeyName'},
}
def __init__(self, key_name=None):
self.key_name = key_name
| {
"content_hash": "41eb1761fe4186ac3ac0521cf34846c3",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 70,
"avg_line_length": 26.61111111111111,
"alnum_prop": 0.6325678496868476,
"repo_name": "rjschwei/azure-sdk-for-python",
"id": "0d0487503e0f488373d070f14d2138c171e44d1d",
"size": "953",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "azure-mgmt-cognitiveservices/azure/mgmt/cognitiveservices/models/regenerate_key_parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8317911"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0075_attachment_path_id_unique'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='emojiset',
field=models.CharField(choices=[('apple', 'Apple style'), ('emojione', 'Emoji One style'), ('google', 'Google style'), ('twitter', 'Twitter style')], default='google', max_length=20),
),
]
| {
"content_hash": "a873660673e7f43f8ab39146b97d3b8a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 195,
"avg_line_length": 30.8125,
"alnum_prop": 0.5963488843813387,
"repo_name": "Galexrt/zulip",
"id": "280dae5c3899c8f27c072ce44f64707e4124568b",
"size": "567",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zerver/migrations/0076_userprofile_emojiset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "181865"
},
{
"name": "CoffeeScript",
"bytes": "18435"
},
{
"name": "Groovy",
"bytes": "5515"
},
{
"name": "HTML",
"bytes": "376447"
},
{
"name": "JavaScript",
"bytes": "1570488"
},
{
"name": "Nginx",
"bytes": "1228"
},
{
"name": "PHP",
"bytes": "18930"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "383634"
},
{
"name": "Puppet",
"bytes": "93562"
},
{
"name": "Python",
"bytes": "1830400"
},
{
"name": "Ruby",
"bytes": "255867"
},
{
"name": "Shell",
"bytes": "32366"
}
],
"symlink_target": ""
} |
import os, sys
if __name__ == "__main__":
os.environ.setdefault("PYTHONPATH", '/home/docker/hydroshare')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hydroshare.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "643c67dd48923197663eea50a9fe0d8e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 74,
"avg_line_length": 41.42857142857143,
"alnum_prop": 0.7172413793103448,
"repo_name": "hydroshare/hydroshare_temp",
"id": "663b77bd88656f61e2c57c646572298d1bb2bff3",
"size": "312",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "173515"
},
{
"name": "C++",
"bytes": "4136"
},
{
"name": "CSS",
"bytes": "228598"
},
{
"name": "CoffeeScript",
"bytes": "34267"
},
{
"name": "JavaScript",
"bytes": "736373"
},
{
"name": "Python",
"bytes": "1870088"
},
{
"name": "Shell",
"bytes": "5335"
},
{
"name": "XSLT",
"bytes": "790987"
}
],
"symlink_target": ""
} |
from common import Config
def lambda_handler(event, context=None):
failures = get_failures(event)
if failures:
raise Exception('The following steps have failed:\n' + '\n'.join(failures))
return event
def get_failures(results, context=[]):
failures = []
for state_name, result in results.items():
if state_name in Config.map_states:
for key, inner_results in result.items():
failures += get_failures(inner_results, context + [key])
elif 'failure' in result:
words = [state_name] + context + [result['failure'].get('Cause', '')]
failures += [' '.join(words)]
return failures
| {
"content_hash": "12447393fdb51bb1a971452a3e9bf691",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 79,
"avg_line_length": 28.59090909090909,
"alnum_prop": 0.6581875993640699,
"repo_name": "hhvm/packaging",
"id": "8318953a8564389d32764d2986092d2e1d1bab47",
"size": "824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aws/hhvm1/lambdas/check_for_failures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Hack",
"bytes": "17634"
},
{
"name": "JavaScript",
"bytes": "9536"
},
{
"name": "Makefile",
"bytes": "856"
},
{
"name": "Python",
"bytes": "50373"
},
{
"name": "Shell",
"bytes": "93690"
}
],
"symlink_target": ""
} |
"""Store UDHR data into database using ACBX and LOB
Adjust settings:
FNR DBID
ZF
$Date: 2008-08-29 16:46:45 +0200 (Fri, 29 Aug 2008) $
$Rev: 67 $
"""
# Copyright 2004-2008 Software AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FNR=47;DBID=12 # UDHR DB12
#FNR=47;DBID=8 #
# Copora downloaded from Natural Language Toolkit site
ZF='C:/z/install/nltk/nltk-corpora-0.8.zip'
UPREFIX='corpora/udhr/'
from zipfile import *
import adabas
import adabas.dump
from adabas.api import *
from adabas.datamap import *
#
# define Adabas resources
#
c1=Adabasx(fbl=64,rbl=64000) # allocate set of buffers ACBX,
# abd+format and record buffer
#c1.dumpcb=c1.dumpfb=c1.dumprb=c1.dumpbefore=1 # print Adabas buffers
FB='AA,32,A,WWL,4,WW,*.'
udh=Datamap('udhr_record',
String('country', 32),
Int4('declen'),
String('declaration', 64000-36), # stored in UTF-8
buffer=c1.rb)
def storeone(country, text):
"store one record with the country and the text related to the country"
wwl=len(text)
udh.country=country
udh.declen=wwl
udh.declaration=text
c1.rabd.send=32+4+wwl # set send size for record buffer
try:
c1.store() # issue N1
print '%s stored, ISN %d, size %d' % (country, c1.cb.isn, wwl)
c1.et() # end of transaction
except DatabaseError, (line, apa):
print line
dump.dump(apa.rb, header='Record Buffer')
dump.dump(apa.acbx, header='Control Block')
print "Skipping country", country
x=raw_input("Press enter to continue")
pass
try:
# print Adabas call buffers before and after
c1.cb.dbid=DBID # for ACBX; c1.dbid=DBID for ACB
c1.cb.fnr=FNR # set control block fields
c1.open(wcharset='UTF-8') # issue OP
c1.cb.cid='udhr'
c1.cb.isn=0
c1.fb.value=FB # put data into format buffer
#
# open corpora file and extract udhr file names
#
z=ZipFile(ZF)
zn=[x for x in z.namelist() if x.startswith('corpora/udhr/')]
zn.sort()
zd={}
for n in zn:
fn=n.replace(UPREFIX,'')
nn = fn.rsplit('-',1) # Bosnian_Bosanski-UTF8 to script/code page
f1=nn[0]
if len(nn)==2:
f2=nn[1]
else: # no '-' found
f2=''
if f1 in zd:
zd[f1].append(f2)
else:
zd[f1]=[f2]
for k, v in zd.iteritems():
if 'UTF8' in v:
utext=z.read(UPREFIX+k+'-UTF8')
storeone(k,utext)
elif 'UFT8' in v:
utext=z.read(UPREFIX+k+'-UFT8')
storeone(k,utext)
elif 'Latin1' in v:
utext=z.read(UPREFIX+k+'-Latin1')
storeone(k, utext.decode('latin1','replace').encode('utf8'))
elif 'Latin2' in v:
utext=z.read(UPREFIX+k+'-Latin2')
storeone(k, utext.decode('latin2','replace').encode('utf8'))
elif 'Arabic' in v:
utext=z.read(UPREFIX+k+'-Arabic')
storeone(k+'-cp1256', utext.decode('cp1256','replace').encode('utf8'))
else:
print 'unhandled:', k, v
z.close() # zipfile close
c1.close() # database close
except DatabaseError, (line, apa):
print line
dump.dump(apa.acbx, header='Control Block')
dump.dump(apa.rb, header='Recor Buffer')
c1.close()
raise
except:
c1.close()
| {
"content_hash": "dc1923b3a3a1488f34457ad780f23d37",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 82,
"avg_line_length": 28.971014492753625,
"alnum_prop": 0.592296148074037,
"repo_name": "flavio-casacurta/Nat2Py",
"id": "2a25ce4ca8667931e4334c8498a186e2244f65f7",
"size": "3998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Adabas/demo/udhr/storeudhr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "418"
},
{
"name": "HTML",
"bytes": "6838"
},
{
"name": "Jupyter Notebook",
"bytes": "244089"
},
{
"name": "Python",
"bytes": "158626"
}
],
"symlink_target": ""
} |
import logging
import time
from typing import Union, Optional
import ray
from ray.experimental.workflow import workflow_context
from ray.experimental.workflow.common import Workflow
from ray.experimental.workflow.step_executor import commit_step
from ray.experimental.workflow.storage import (
Storage, create_storage, get_global_storage, set_global_storage)
from ray.experimental.workflow.workflow_access import (
WorkflowManagementActor, MANAGEMENT_ACTOR_NAME, flatten_workflow_output)
logger = logging.getLogger(__name__)
def run(entry_workflow: Workflow,
storage: Optional[Union[str, Storage]] = None,
workflow_id: Optional[str] = None) -> ray.ObjectRef:
"""Run a workflow asynchronously. See "api.run()" for details."""
if workflow_id is None:
# Workflow ID format: {Entry workflow UUID}.{Unix time to nanoseconds}
workflow_id = f"{entry_workflow.id}.{time.time():.9f}"
if isinstance(storage, str):
set_global_storage(create_storage(storage))
elif isinstance(storage, Storage):
set_global_storage(storage)
elif storage is not None:
raise TypeError("'storage' should be None, str, or Storage type.")
storage_url = get_global_storage().storage_url
logger.info(f"Workflow job created. [id=\"{workflow_id}\", storage_url="
f"\"{storage_url}\"].")
try:
workflow_context.init_workflow_step_context(workflow_id, storage_url)
commit_step(entry_workflow)
try:
actor = ray.get_actor(MANAGEMENT_ACTOR_NAME)
except ValueError:
# the actor does not exist
actor = WorkflowManagementActor.options(
name=MANAGEMENT_ACTOR_NAME, lifetime="detached").remote()
# NOTE: It is important to 'ray.get' the returned output. This
# ensures caller of 'run()' holds the reference to the workflow
# result. Otherwise if the actor removes the reference of the
# workflow output, the caller may fail to resolve the result.
output = ray.get(actor.run_or_resume.remote(workflow_id, storage_url))
direct_output = flatten_workflow_output(workflow_id, output)
finally:
workflow_context.set_workflow_step_context(None)
return direct_output
# TODO(suquark): support recovery with ObjectRef inputs.
def resume(workflow_id: str,
storage: Optional[Union[str, Storage]] = None) -> ray.ObjectRef:
"""Resume a workflow asynchronously. See "api.resume()" for details.
"""
if isinstance(storage, str):
store = create_storage(storage)
elif isinstance(storage, Storage):
store = storage
elif storage is None:
store = get_global_storage()
else:
raise TypeError("'storage' should be None, str, or Storage type.")
logger.info(f"Resuming workflow [id=\"{workflow_id}\", storage_url="
f"\"{store.storage_url}\"].")
try:
actor = ray.get_actor(MANAGEMENT_ACTOR_NAME)
except ValueError:
# the actor does not exist
actor = WorkflowManagementActor.options(
name=MANAGEMENT_ACTOR_NAME, lifetime="detached").remote()
# NOTE: It is important to 'ray.get' the returned output. This
# ensures caller of 'run()' holds the reference to the workflow
# result. Otherwise if the actor removes the reference of the
# workflow output, the caller may fail to resolve the result.
output = ray.get(
actor.run_or_resume.remote(workflow_id, store.storage_url))
direct_output = flatten_workflow_output(workflow_id, output)
logger.info(f"Workflow job {workflow_id} resumed.")
return direct_output
def get_output(workflow_id: str) -> ray.ObjectRef:
"""Get the output of a running workflow.
See "api.get_output()" for details.
"""
try:
actor = ray.get_actor(MANAGEMENT_ACTOR_NAME)
except ValueError as e:
raise ValueError(
"Failed to connect to the workflow management "
"actor. The workflow could have already failed. You can use "
"workflow.resume() to resume the workflow.") from e
output = ray.get(actor.get_output.remote(workflow_id))
return flatten_workflow_output(workflow_id, output)
| {
"content_hash": "405bee2415987a7a80d0d9e6d41c1237",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 78,
"avg_line_length": 42.4,
"alnum_prop": 0.6728773584905661,
"repo_name": "pcmoritz/ray-1",
"id": "80c68eb6c5a05ad5d452a75e6ac33c192e5ed570",
"size": "4240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/experimental/workflow/execution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
} |
import math
from typing import List
from .constants import Constants
DIRECTIONS = Constants.DIRECTIONS
RESOURCE_TYPES = Constants.RESOURCE_TYPES
class Resource:
def __init__(self, r_type: str, amount: int):
self.type = r_type
self.amount = amount
class Cell:
def __init__(self, x, y):
self.pos = Position(x, y)
self.resource: Resource = None
self.citytile = None
self.road = 0
def has_resource(self):
return self.resource is not None and self.resource.amount > 0
class GameMap:
def __init__(self, width, height):
self.height = height
self.width = width
self.map: List[List[Cell]] = [None] * height
for y in range(0, self.height):
self.map[y] = [None] * width
for x in range(0, self.width):
self.map[y][x] = Cell(x, y)
def get_cell_by_pos(self, pos) -> Cell:
return self.map[pos.y][pos.x]
def get_cell(self, x, y) -> Cell:
return self.map[y][x]
def _setResource(self, r_type, x, y, amount):
"""
do not use this function, this is for internal tracking of state
"""
cell = self.get_cell(x, y)
cell.resource = Resource(r_type, amount)
class Position:
def __init__(self, x, y):
self.x = x
self.y = y
def __sub__(self, pos) -> int:
return abs(pos.x - self.x) + abs(pos.y - self.y)
def distance_to(self, pos):
"""
Returns Manhattan (L1/grid) distance to pos
"""
return self - pos
def is_adjacent(self, pos):
return (self - pos) <= 1
def __eq__(self, pos) -> bool:
return self.x == pos.x and self.y == pos.y
def equals(self, pos):
return self == pos
def translate(self, direction, units) -> 'Position':
if direction == DIRECTIONS.NORTH:
return Position(self.x, self.y - units)
elif direction == DIRECTIONS.EAST:
return Position(self.x + units, self.y)
elif direction == DIRECTIONS.SOUTH:
return Position(self.x, self.y + units)
elif direction == DIRECTIONS.WEST:
return Position(self.x - units, self.y)
elif direction == DIRECTIONS.CENTER:
return Position(self.x, self.y)
def direction_to(self, target_pos: 'Position') -> DIRECTIONS:
"""
Return closest position to target_pos from this position
"""
check_dirs = [
DIRECTIONS.NORTH,
DIRECTIONS.EAST,
DIRECTIONS.SOUTH,
DIRECTIONS.WEST,
]
closest_dist = self.distance_to(target_pos)
closest_dir = DIRECTIONS.CENTER
for direction in check_dirs:
newpos = self.translate(direction, 1)
dist = target_pos.distance_to(newpos)
if dist < closest_dist:
closest_dir = direction
closest_dist = dist
return closest_dir
def __str__(self) -> str:
return f"({self.x}, {self.y})"
| {
"content_hash": "0a7f89cd353afaeeca4e0b5cf5ee1b7a",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 72,
"avg_line_length": 28.754716981132077,
"alnum_prop": 0.5574146981627297,
"repo_name": "Kaggle/kaggle-environments",
"id": "3b11ec4b47093dc014baa55d6eba146d1bab90c1",
"size": "3048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kaggle_environments/envs/lux_ai_2021/test_agents/python/lux/game_map.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2809"
},
{
"name": "HTML",
"bytes": "25293"
},
{
"name": "Java",
"bytes": "77923"
},
{
"name": "JavaScript",
"bytes": "122379"
},
{
"name": "Jupyter Notebook",
"bytes": "1847244"
},
{
"name": "Python",
"bytes": "451561"
},
{
"name": "Shell",
"bytes": "16147"
},
{
"name": "TypeScript",
"bytes": "84404"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import cbh_datastore_model
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cbh_datastore_model'
copyright = u'2014, Andrew Stretton'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = cbh_datastore_model.__version__
# The full version, including alpha/beta/rc tags.
release = cbh_datastore_model.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cbh_datastore_modeldoc'
# -- Options for LaTeX output --------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'cbh_datastore_model.tex', u'cbh_datastore_model Documentation',
u'Andrew Stretton', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cbh_datastore_model', u'cbh_datastore_model Documentation',
[u'Andrew Stretton'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cbh_datastore_model', u'cbh_datastore_model Documentation',
u'Andrew Stretton', 'cbh_datastore_model', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "44685713db444fa889afd75e7b9540df",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 83,
"avg_line_length": 32.36776859504132,
"alnum_prop": 0.7058598238222903,
"repo_name": "thesgc/cbh_datastore_model",
"id": "8ab5df797a92a35e49252e0e162226f23fd6cdbd",
"size": "8254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "661"
},
{
"name": "Makefile",
"bytes": "1276"
},
{
"name": "Python",
"bytes": "34021"
}
],
"symlink_target": ""
} |
import asyncio
import logging
import multiprocessing as mp
import os
import threading
from typing import Any
from typing import Callable
from typing import Dict
from typing import Tuple
from typing import Optional
import kopf
import yaml
import ray.autoscaler._private.monitor as monitor
from ray._private import services
from ray.autoscaler._private import commands
from ray.ray_operator import operator_utils
from ray.ray_operator.operator_utils import STATUS_AUTOSCALING_EXCEPTION
from ray.ray_operator.operator_utils import STATUS_RUNNING
from ray.ray_operator.operator_utils import STATUS_UPDATING
from ray import ray_constants
logger = logging.getLogger(__name__)
# Queue to process cluster status updates.
cluster_status_q = mp.Queue() # type: mp.Queue[Optional[Tuple[str, str, str]]]
class RayCluster:
"""Manages an autoscaling Ray cluster.
Attributes:
config: Autoscaling configuration dict.
subprocess: The subprocess used to create, update, and monitor the
Ray cluster.
"""
def __init__(self, config: Dict[str, Any]):
self.config = config
self.name = self.config["cluster_name"]
self.namespace = self.config["provider"]["namespace"]
# Make directory for configs of clusters in the namespace,
# if the directory doesn't exist already.
namespace_dir = operator_utils.namespace_dir(self.namespace)
os.makedirs(namespace_dir, exist_ok=True)
self.config_path = operator_utils.config_path(
cluster_namespace=self.namespace, cluster_name=self.name)
# Monitor subprocess
# self.subprocess is non-null iff there's an active monitor subprocess
# or a finished monitor subprocess in need of cleanup.
self.subprocess = None # type: Optional[mp.Process]
# Monitor logs for this cluster will be prefixed by the monitor
# subprocess name:
self.subprocess_name = ",".join([self.name, self.namespace])
self.monitor_stop_event = mp.Event()
self.setup_logging()
def create_or_update(self, restart_ray: bool = False) -> None:
""" Create/update the Ray Cluster and run the monitoring loop, all in a
subprocess.
The main function of the Operator is managing the
subprocesses started by this method.
Args:
restart_ray: If True, restarts Ray to recover from failure.
"""
self.do_in_subprocess(self._create_or_update, args=(restart_ray, ))
def _create_or_update(self, restart_ray: bool = False) -> None:
try:
self.start_head(restart_ray=restart_ray)
self.start_monitor()
except Exception:
# Report failed autoscaler status to trigger cluster restart.
cluster_status_q.put((self.name, self.namespace,
STATUS_AUTOSCALING_EXCEPTION))
# `status_handling_loop` will increment the
# `status.AutoscalerRetries` of the CR. A restart will trigger
# at the subsequent "MODIFIED" event.
raise
def start_head(self, restart_ray: bool = False) -> None:
self.write_config()
# Don't restart Ray on head unless recovering from failure.
no_restart = not restart_ray
# Create or update cluster head and record config side effects.
self.config = commands.create_or_update_cluster(
self.config_path,
override_min_workers=None,
override_max_workers=None,
no_restart=no_restart,
restart_only=False,
yes=True,
no_config_cache=True,
no_monitor_on_head=True,
)
# Write the resulting config for use by the autoscaling monitor:
self.write_config()
def start_monitor(self) -> None:
"""Runs the autoscaling monitor."""
ray_head_pod_ip = commands.get_head_node_ip(self.config_path)
port = operator_utils.infer_head_port(self.config)
redis_address = services.address(ray_head_pod_ip, port)
mtr = monitor.Monitor(
redis_address=redis_address,
autoscaling_config=self.config_path,
redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
prefix_cluster_info=True,
stop_event=self.monitor_stop_event,
)
mtr.run()
def teardown(self) -> None:
"""Attempt orderly tear-down of Ray processes before RayCluster
resource deletion."""
self.do_in_subprocess(self._teardown, args=(), block=True)
def _teardown(self) -> None:
commands.teardown_cluster(
self.config_path,
yes=True,
workers_only=False,
override_cluster_name=None,
keep_min_workers=False)
def do_in_subprocess(self,
f: Callable[[], None],
args: Tuple = (),
block: bool = False) -> None:
# First stop the subprocess if it's alive
self.clean_up_subprocess()
# Reinstantiate process with f as target and start.
self.subprocess = mp.Process(
name=self.subprocess_name, target=f, args=args, daemon=True)
self.subprocess.start()
if block:
self.subprocess.join()
def clean_up_subprocess(self):
"""
Clean up the monitor process.
Executed when CR for this cluster is "DELETED".
Executed when Autoscaling monitor is restarted.
"""
if self.subprocess is None:
# Nothing to clean.
return
# Triggers graceful stop of the monitor loop.
self.monitor_stop_event.set()
self.subprocess.join()
# Clears the event for subsequent runs of the monitor.
self.monitor_stop_event.clear()
# Signal completed cleanup.
self.subprocess = None
def clean_up(self) -> None:
"""Executed when the CR for this cluster is "DELETED".
The key thing is to end the monitoring subprocess.
"""
self.teardown()
self.clean_up_subprocess()
self.clean_up_logging()
self.delete_config()
def setup_logging(self) -> None:
"""Add a log handler which appends the name and namespace of this
cluster to the cluster's monitor logs.
"""
self.handler = logging.StreamHandler()
# Filter by subprocess name to get this cluster's monitor logs.
self.handler.addFilter(
lambda rec: rec.processName == self.subprocess_name)
# Lines start with "<cluster name>,<cluster namespace>:"
logging_format = ":".join(
[self.subprocess_name, ray_constants.LOGGER_FORMAT])
self.handler.setFormatter(logging.Formatter(logging_format))
operator_utils.root_logger.addHandler(self.handler)
def clean_up_logging(self) -> None:
operator_utils.root_logger.removeHandler(self.handler)
def set_config(self, config: Dict[str, Any]) -> None:
self.config = config
def write_config(self) -> None:
"""Write config to disk for use by the autoscaling monitor."""
with open(self.config_path, "w") as file:
yaml.dump(self.config, file)
def delete_config(self) -> None:
try:
os.remove(self.config_path)
except OSError:
log_prefix = ",".join([self.name, self.namespace])
logger.warning(
f"{log_prefix}: config path does not exist {self.config_path}")
@kopf.on.startup()
def start_background_worker(memo: kopf.Memo, **_):
memo.status_handler = threading.Thread(
target=status_handling_loop, args=(cluster_status_q, ))
memo.status_handler.start()
@kopf.on.cleanup()
def stop_background_worker(memo: kopf.Memo, **_):
cluster_status_q.put(None)
memo.status_handler.join()
def status_handling_loop(queue: mp.Queue):
# TODO: Status will not be set if Operator restarts after `queue.put`
# but before `set_status`.
while True:
item = queue.get()
if item is None:
break
cluster_name, cluster_namespace, phase = item
try:
operator_utils.set_status(cluster_name, cluster_namespace, phase)
except Exception:
log_prefix = ",".join([cluster_name, cluster_namespace])
logger.exception(f"{log_prefix}: Error setting RayCluster status.")
@kopf.on.create("rayclusters")
@kopf.on.update("rayclusters")
@kopf.on.resume("rayclusters")
def create_or_update_cluster(body, name, namespace, logger, memo: kopf.Memo,
**kwargs):
"""
1. On creation of a RayCluster resource, create the Ray cluster.
2. On update of a RayCluster resource, update the cluster
without restarting Ray processes,
unless the Ray head's config is modified.
3. On operator restart ("resume"), rebuild operator memo state and restart
the Ray cluster's monitor process, without restarting Ray processes.
"""
_create_or_update_cluster(body, name, namespace, memo, restart_ray=False)
@kopf.on.field("rayclusters", field="status.autoscalerRetries")
def restart_cluster(body, status, name, namespace, memo: kopf.Memo, **kwargs):
"""On increment of status.autoscalerRetries, restart Ray processes.
Increment of autoscalerRetries happens when cluster's monitor fails,
for example due to Ray head failure.
"""
# Don't act on initialization of status.autoscalerRetries from nil to 0.
if status.get("autoscalerRetries"):
# Restart the Ray cluster:
_create_or_update_cluster(
body, name, namespace, memo, restart_ray=True)
def _create_or_update_cluster(cluster_cr_body,
name,
namespace,
memo,
restart_ray=False):
"""Create, update, or restart the Ray cluster described by a RayCluster
resource.
Args:
cluster_cr_body: The body of the K8s RayCluster resources describing
a Ray cluster.
name: The name of the Ray cluster.
namespace: The K8s namespace in which the Ray cluster runs.
memo: kopf memo state for this Ray cluster.
restart_ray: Only restart cluster Ray processes if this is true.
"""
# Convert the RayCluster custom resource to a Ray autoscaling config.
cluster_config = operator_utils.cr_to_config(cluster_cr_body)
# Verify the user didn't set a custom Redis password in Ray start commands.
# (custom Redis password is not supported by K8s operator.)
operator_utils.check_redis_password_not_specified(cluster_config, name,
namespace)
# Fetch or create the RayCluster python object encapsulating cluster state.
ray_cluster = memo.get("ray_cluster")
if ray_cluster is None:
ray_cluster = RayCluster(cluster_config)
memo.ray_cluster = ray_cluster
# Indicate in status.phase that a "create-or-update" is in progress.
cluster_status_q.put((name, namespace, STATUS_UPDATING))
# Store the autoscaling config for use by the Ray autoscaler.
ray_cluster.set_config(cluster_config)
# Launch a the Ray cluster by SSHing into the pod and running
# the initialization commands. This will not restart the cluster
# unless there was a failure.
ray_cluster.create_or_update(restart_ray=restart_ray)
# Indicate in status.phase that the head is up and the monitor is running.
cluster_status_q.put((name, namespace, STATUS_RUNNING))
@kopf.on.delete("rayclusters")
def delete_fn(memo: kopf.Memo, **kwargs):
ray_cluster = memo.get("ray_cluster")
if ray_cluster is None:
return
ray_cluster.clean_up()
def main():
if operator_utils.NAMESPACED_OPERATOR:
kwargs = {"namespaces": [operator_utils.OPERATOR_NAMESPACE]}
else:
kwargs = {"clusterwide": True}
asyncio.run(kopf.operator(**kwargs))
if __name__ == "__main__":
main()
| {
"content_hash": "68f5f26b489fa5706107559057461711",
"timestamp": "",
"source": "github",
"line_count": 331,
"max_line_length": 79,
"avg_line_length": 36.610271903323266,
"alnum_prop": 0.6371513451064532,
"repo_name": "pcmoritz/ray-1",
"id": "91a5df1fd6fbea186c0381ae9be61c19011c466f",
"size": "12118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/ray_operator/operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
} |
from abc import ABCMeta, abstractmethod
class Shape(object):
__metaclass__ = ABCMeta
@abstractmethod
def draw(self):
raise NotImplementedError()
class Rectangle(Shape):
def __init__(self):
super(Rectangle, self).__init__()
def draw(self):
print 'Drawing Rectangle...'
class Square(Shape):
def __init__(self):
super(Square, self).__init__()
def draw(self):
print 'Drawing Square...'
class DemoFacade():
def __init__(self):
self.rectangle = Rectangle()
self.square = Square()
def draw_rectangle(self):
self.rectangle.draw()
def draw_square(self):
self.square.draw()
# main class.
if __name__ == '__main__':
shape_facade = DemoFacade()
shape_facade.draw_rectangle()
shape_facade.draw_square() | {
"content_hash": "cbfd24a87073794464acc6a6e4d8dd07",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 41,
"avg_line_length": 18.818181818181817,
"alnum_prop": 0.5929951690821256,
"repo_name": "rolandovillca/python_basic_introduction",
"id": "422cbeb9adddf2fd605ba6d68693bc16c4727309",
"size": "828",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "patterns/creational/facade2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38543"
}
],
"symlink_target": ""
} |
def clamp(value, minValue, maxValue):
return min(max(minValue, value), maxValue)
def clampX(target, minX, maxX):
target.setX(clamp(target.getX(), minX, maxX))
def clampY(target, minY, maxY):
target.setY(clamp(target.getY(), minY, maxY))
def clampZ(target, minZ, maxZ):
target.setZ(clamp(target.getZ(), minZ, maxZ))
| {
"content_hash": "ecbbb470d869ee821505262812845ea9",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 46,
"avg_line_length": 29.272727272727273,
"alnum_prop": 0.7142857142857143,
"repo_name": "codistmonk/burdenofproof",
"id": "bad4228a22ca2b8ac282a9c33dc202029a8cac95",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demos/city/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "84"
},
{
"name": "C++",
"bytes": "52026"
},
{
"name": "Python",
"bytes": "837717"
},
{
"name": "Shell",
"bytes": "672"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
from django.core import signing
from django.db import models
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.crypto import get_random_string
from .. import app_settings as allauth_app_settings
from . import app_settings
from . import signals
from .utils import user_email
from .managers import EmailAddressManager, EmailConfirmationManager
from .adapter import get_adapter
@python_2_unicode_compatible
class EmailAddress(models.Model):
user = models.ForeignKey(allauth_app_settings.USER_MODEL,
verbose_name=_('user'))
email = models.EmailField(unique=app_settings.UNIQUE_EMAIL,
max_length=app_settings.EMAIL_MAX_LENGTH,
verbose_name=_('e-mail address'))
verified = models.BooleanField(verbose_name=_('verified'), default=False)
primary = models.BooleanField(verbose_name=_('primary'), default=False)
objects = EmailAddressManager()
class Meta:
verbose_name = _("email address")
verbose_name_plural = _("email addresses")
if not app_settings.UNIQUE_EMAIL:
unique_together = [("user", "email")]
def __str__(self):
return "%s (%s)" % (self.email, self.user)
def set_as_primary(self, conditional=False):
old_primary = EmailAddress.objects.get_primary(self.user)
if old_primary:
if conditional:
return False
old_primary.primary = False
old_primary.save()
self.primary = True
self.save()
user_email(self.user, self.email)
self.user.save()
return True
def send_confirmation(self, request=None, signup=False):
if app_settings.EMAIL_CONFIRMATION_HMAC:
confirmation = EmailConfirmationHMAC(self)
else:
confirmation = EmailConfirmation.create(self)
confirmation.send(request, signup=signup)
return confirmation
def change(self, request, new_email, confirm=True):
"""
Given a new email address, change self and re-confirm.
"""
try:
atomic_transaction = transaction.atomic
except AttributeError:
atomic_transaction = transaction.commit_on_success
with atomic_transaction():
user_email(self.user, new_email)
self.user.save()
self.email = new_email
self.verified = False
self.save()
if confirm:
self.send_confirmation(request)
@python_2_unicode_compatible
class EmailConfirmation(models.Model):
email_address = models.ForeignKey(EmailAddress,
verbose_name=_('e-mail address'))
created = models.DateTimeField(verbose_name=_('created'),
default=timezone.now)
sent = models.DateTimeField(verbose_name=_('sent'), null=True)
key = models.CharField(verbose_name=_('key'), max_length=64, unique=True)
objects = EmailConfirmationManager()
class Meta:
verbose_name = _("email confirmation")
verbose_name_plural = _("email confirmations")
def __str__(self):
return "confirmation for %s" % self.email_address
@classmethod
def create(cls, email_address):
key = get_random_string(64).lower()
return cls._default_manager.create(email_address=email_address,
key=key)
def key_expired(self):
expiration_date = self.sent \
+ datetime.timedelta(days=app_settings
.EMAIL_CONFIRMATION_EXPIRE_DAYS)
return expiration_date <= timezone.now()
key_expired.boolean = True
def confirm(self, request):
if not self.key_expired() and not self.email_address.verified:
email_address = self.email_address
get_adapter(request).confirm_email(request, email_address)
signals.email_confirmed.send(sender=self.__class__,
request=request,
email_address=email_address)
return email_address
def send(self, request=None, signup=False):
get_adapter(request).send_confirmation_mail(request, self, signup)
self.sent = timezone.now()
self.save()
signals.email_confirmation_sent.send(sender=self.__class__,
request=request,
confirmation=self,
signup=signup)
class EmailConfirmationHMAC:
def __init__(self, email_address):
self.email_address = email_address
@property
def key(self):
return signing.dumps(
obj=self.email_address.pk,
salt=app_settings.SALT)
@classmethod
def from_key(cls, key):
try:
max_age = (
60 * 60 * 24 * app_settings.EMAIL_CONFIRMATION_EXPIRE_DAYS)
pk = signing.loads(
key,
max_age=max_age,
salt=app_settings.SALT)
ret = EmailConfirmationHMAC(EmailAddress.objects.get(pk=pk))
except (signing.SignatureExpired,
signing.BadSignature,
EmailAddress.DoesNotExist):
ret = None
return ret
def confirm(self, request):
if not self.email_address.verified:
email_address = self.email_address
get_adapter(request).confirm_email(request, email_address)
signals.email_confirmed.send(sender=self.__class__,
request=request,
email_address=email_address)
return email_address
def send(self, request=None, signup=False):
get_adapter(request).send_confirmation_mail(request, self, signup)
signals.email_confirmation_sent.send(sender=self.__class__,
request=request,
confirmation=self,
signup=signup)
| {
"content_hash": "c207e4d3784f2dcfedd5fe7b3e7d5ee8",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 77,
"avg_line_length": 36.23295454545455,
"alnum_prop": 0.5817782656421515,
"repo_name": "nimbis/django-allauth",
"id": "293e5f331861dc2e643ef445ca1298f80c38cf78",
"size": "6377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "allauth/account/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "42101"
},
{
"name": "JavaScript",
"bytes": "3967"
},
{
"name": "Makefile",
"bytes": "298"
},
{
"name": "Python",
"bytes": "581551"
}
],
"symlink_target": ""
} |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import glob
from time import sleep
import tinctest
from tinctest.lib import local_path
from gppylib.commands.base import Command
from mpp.lib.PSQL import PSQL
from tinctest.lib import local_path, Gpdiff
from mpp.lib.filerep_util import Filerepe2e_Util
from mpp.lib.gprecoverseg import GpRecover
from mpp.lib.gpstart import GpStart
from mpp.lib.gpstop import GpStop
from mpp.lib.config import GPDBConfig
from mpp.lib.gpfilespace import Gpfilespace
from mpp.lib.gpdbverify import GpdbVerify
from mpp.models import MPPTestCase
from mpp.gpdb.tests.storage.lib.dbstate import DbStateClass
from mpp.gpdb.tests.storage.lib.common_utils import *
class GPDBStorageBaseTestCase():
'''
Base Class for Storage test-suits like Crash Recovery,
Pg_Two_Phase, sub_transaction
'''
def __init__(self, config=None):
if config is not None:
self.config = config
else:
self.config = GPDBConfig()
self.filereputil = Filerepe2e_Util()
self.gprecover = GpRecover(self.config)
self.gpstop = GpStop()
self.gpstart = GpStart()
self.gpfile = Gpfilespace(self.config)
self.gpverify = GpdbVerify(config=self.config)
self.dbstate = DbStateClass('run_validation', self.config)
self.port = os.getenv('PGPORT')
def invoke_fault(self, fault_name, type, role='mirror', port=None, occurence=None, sleeptime=None, seg_id=None):
''' Reset the fault and then issue the fault with the given type'''
self.filereputil.inject_fault(f=fault_name, y='reset', r=role, p=port , o=occurence, sleeptime=sleeptime, seg_id=seg_id)
self.filereputil.inject_fault(f=fault_name, y=type, r=role, p=port , o=occurence, sleeptime=sleeptime, seg_id=seg_id)
tinctest.logger.info('Successfully injected fault_name : %s fault_type : %s occurence : %s ' % (fault_name, type, occurence))
def start_db(self):
'''Gpstart '''
rc = self.gpstart.run_gpstart_cmd()
if not rc:
raise Exception('Failed to start the cluster')
tinctest.logger.info('Started the cluster successfully')
def stop_db(self):
''' Gpstop and dont check for rc '''
cmd = Command('Gpstop_a', 'gpstop -a')
tinctest.logger.info('Executing command: gpstop -a')
cmd.run()
def get_trigger_status(self, trigger_count,max_cnt=50):
'''Compare the pg_stat_activity count with the total number of trigger_sqls executed '''
psql_count=0
for i in range(1,trigger_count):
psql_count = PSQL.run_sql_command('select count(*) from pg_stat_activity;', flags='-q -t', dbname='postgres')
sleep(1)
tinctest.logger.info('Count of trigger sqls %s And it should be %s' % (psql_count, trigger_count))
if psql_count < trigger_count :
tinctest.logger.info('coming to the if loop in get_trigger_status')
return False
return True
def check_trigger_sql_hang(self, test_dir):
'''
@param ddl_type : create/drop
@param fault_type : commit/abort/end_prepare_two_phase_sleep
@description : Return the status of the trigger sqls: whether they are waiting on the fault
Since gpfaultinjector has no way to check if all the sqls are triggered, we are using
a count(*) on pg_stat_activity and compare the total number of trigger_sqls
'''
trigger_dir = local_path('%s_tests/trigger_sql/' % (test_dir))
trigger_count = len(glob.glob1(trigger_dir,"*.ans"))
return self.get_trigger_status(trigger_count)
def get_items_list(test_file):
''' Get file contents to a list '''
with open(test_file, 'r') as f:
test_list = [line.strip() for line in f]
return test_list
def validate_sql(filename):
''' Compare the out and ans files '''
out_file = local_path(filename.replace(".sql", ".out"))
ans_file = local_path(filename.replace('.sql' , '.ans'))
assert Gpdiff.are_files_equal(out_file, ans_file)
def run_sql(filename, verify=True):
''' Run the provided sql and validate it '''
out_file = local_path(filename.replace(".sql", ".out"))
PSQL.run_sql_file(sql_file = filename, out_file = out_file)
if verify == True:
validate_sql(filename)
| {
"content_hash": "f98478c64a68c1dbe43e0aa43acddde8",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 134,
"avg_line_length": 40.75806451612903,
"alnum_prop": 0.6705579738820736,
"repo_name": "rvs/gpdb",
"id": "22b4a57f1cb40a3376a191b137958c439996e195",
"size": "5054",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/test/tinc/tincrepo/mpp/gpdb/tests/storage/GPDBStorageBaseTestCase.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5665"
},
{
"name": "Batchfile",
"bytes": "11492"
},
{
"name": "C",
"bytes": "35013613"
},
{
"name": "C++",
"bytes": "3833252"
},
{
"name": "CMake",
"bytes": "17118"
},
{
"name": "CSS",
"bytes": "7407"
},
{
"name": "Csound Score",
"bytes": "179"
},
{
"name": "DTrace",
"bytes": "1160"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "715430"
},
{
"name": "HTML",
"bytes": "169634"
},
{
"name": "Java",
"bytes": "268348"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "196275"
},
{
"name": "M4",
"bytes": "102006"
},
{
"name": "Makefile",
"bytes": "420136"
},
{
"name": "PLSQL",
"bytes": "261269"
},
{
"name": "PLpgSQL",
"bytes": "5477026"
},
{
"name": "Perl",
"bytes": "3831299"
},
{
"name": "Perl6",
"bytes": "14219"
},
{
"name": "Python",
"bytes": "8653837"
},
{
"name": "Roff",
"bytes": "51338"
},
{
"name": "Ruby",
"bytes": "26724"
},
{
"name": "SQLPL",
"bytes": "3824391"
},
{
"name": "Shell",
"bytes": "527804"
},
{
"name": "XS",
"bytes": "8405"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "488001"
}
],
"symlink_target": ""
} |
import sys, os.path
from optparse import OptionParser
def main():
# Parse the command-line options.
parser = OptionParser()
parser.add_option("-v", "--verbosity",
action = "store",
dest = "verbosity",
default = "1",
type = "choice",
choices = ["0", "1", "2", "3"],
help = "Verbosity level; 0=minimal output, 1=normal output, 2=all output",
)
parser.add_option("--noinput",
action = "store_false",
dest = "interactive",
default = True,
help = "Tells Django to NOT prompt the user for input of any kind.",
)
parser.add_option("--failfast",
action = "store_true",
dest = "failfast",
default = False,
help = "Tells Django to stop running the test suite after first failed test.",
)
options, args = parser.parse_args()
# Configure Django.
from django.conf import settings
settings.configure(
DEBUG = False,
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
ROOT_URLCONF = "urls",
INSTALLED_APPS = (
"django.contrib.staticfiles",
"optimizations",
"test_optimizations",
),
MIDDLEWARE_CLASSES = (
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
),
STATIC_URL = "/static/",
STATIC_ROOT = os.path.join(os.path.dirname(__file__), "static"),
MEDIA_URL = "/media/",
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), "media"),
USE_TZ = True,
TEST_RUNNER = "django.test.runner.DiscoverRunner",
)
# Run Django setup (1.7+).
import django
try:
django.setup()
except AttributeError:
pass # This is Django < 1.7
# Configure the test runner.
from django.test.utils import get_runner
from django.core.management import call_command
call_command("collectstatic", interactive=False)
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity = int(options.verbosity),
interactive = options.interactive,
failfast = options.failfast,
)
# Run the tests.
failures = test_runner.run_tests(["test_optimizations"])
if failures:
sys.exit(failures)
if __name__ == "__main__":
main()
| {
"content_hash": "d1070e102786ceb6c680b928bb7e0bab",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 86,
"avg_line_length": 32.1,
"alnum_prop": 0.5794392523364486,
"repo_name": "etianen/django-optimizations",
"id": "f65aaa739f3050c5d0cb8dcc15e715254140ed2e",
"size": "2590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/runtests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "56"
},
{
"name": "JavaScript",
"bytes": "60"
},
{
"name": "Python",
"bytes": "77970"
}
],
"symlink_target": ""
} |
import os
import git
import logging
import yaml
from pecan import conf # noqa
logger = logging.getLogger(__name__)
RESOURCES_STRUCT = {'resources': {'rtype': {'key': {}}}}
class YAMLDBException(Exception):
pass
class YAMLBackend(object):
def __init__(self, git_repo_url, git_ref, sub_dir,
clone_path, cache_path):
""" Class to read and validate resources from YAML
files from stored in a GIT repository. The main data
structure as well as resources structure must follow
specific constraints.
This Class also maintains a cache file to avoid full
re-load and validation at init when ref hash has not
changed.
:param git_repo_url: The URI of the GIT repository
:param git_ref: The GIT repository refs such as
refs/zuul/master/Z3a46b05b4574472bbf093ff5562cba42
or refs/heads/master
:param sub_dir: The path from the GIT root to YAML files
:param clone_path: The path where to clone the GIT repository
:param cache_path: The path to the cached file
"""
self.git_repo_url = git_repo_url
self.git_ref = git_ref
self.clone_path = clone_path
self.cache_path = cache_path
self.cache_path_hash = "%s_%s" % (cache_path, '_hash')
self.db_path = os.path.join(self.clone_path, sub_dir)
self.rids = {}
self.refresh()
def _get_repo_hash(self):
repo = git.Git(self.clone_path)
repo_hash = repo.execute(['git', '--no-pager', 'log', '-1',
'--pretty=%h', 'HEAD'])
return repo_hash
def _get_cache_hash(self):
return file(self.cache_path_hash).read().strip()
def _update_cache(self):
repo_hash = self._get_repo_hash()
yaml.dump(self.data, file(self.cache_path, 'w'))
file(self.cache_path_hash, 'w').write(repo_hash)
logger.info("Cache file has been updated.")
def _load_from_cache(self):
if not os.path.isfile(self.cache_path_hash):
logger.info("No DB cache file found.")
else:
repo_hash = self._get_repo_hash()
cached_repo_hash = self._get_cache_hash()
if cached_repo_hash == repo_hash:
self.data = yaml.safe_load(file(self.cache_path))
logger.info("Load data from the cache.")
else:
logger.info("DB cache is outdated.")
def _update_git_clone(self):
repo = git.Git(self.clone_path)
repo.init()
try:
repo.execute(['git', 'remote', 'add',
'origin', self.git_repo_url])
except Exception:
logger.info("Re-using the previous repo path %s" % self.clone_path)
repo.execute(['git', 'remote', 'remove', 'origin'])
repo.execute(['git', 'remote', 'add',
'origin', self.git_repo_url])
logger.info("Update the previous remote origin to %s." % (
self.git_repo_url))
if self.git_ref != 'master' and not self.git_ref.startswith('refs/'):
if self.git_ref == "master^1":
# Keep that for compatibility SF < 2.4.0
ref = 'FETCH_HEAD^1'
else:
# Here git_ref is a commit SHA or SHA^1
ref = self.git_ref
repo.execute(['git', 'fetch', 'origin', 'master'])
repo.execute(['git', 'checkout', ref])
else:
repo.execute(['git', 'fetch', '-f', 'origin',
'%s:refs/remotes/origin/myref' % self.git_ref])
repo.execute(['git', 'checkout', 'origin/myref'])
logger.info("Updated GIT repo %s at ref %s." % (self.git_repo_url,
self.git_ref))
def _load_db(self):
def check_ext(f):
return f.endswith('.yaml') or f.endswith('.yml')
logger.info("Load data from the YAML files.")
self.rids = {}
yamlfiles = [f for f in os.listdir(self.db_path) if check_ext(f)]
for f in yamlfiles:
logger.info("Reading %s ..." % f)
try:
yaml_data = yaml.safe_load(
file(os.path.join(self.db_path, f)))
except:
raise YAMLDBException(
"YAML format corrupted in file %s" % (
os.path.join(self.db_path, f)))
if not self.data:
self.data = self.validate(yaml_data, self.rids)
else:
data_to_append = self.validate(yaml_data, self.rids)
for rtype, resources in data_to_append['resources'].items():
if rtype not in self.data['resources']:
self.data['resources'][rtype] = {}
self.data['resources'][rtype].update(resources)
@staticmethod
def _validate_base_struct(data):
try:
assert isinstance(data, type(RESOURCES_STRUCT))
assert isinstance(data['resources'],
type(RESOURCES_STRUCT['resources']))
except (AssertionError, KeyError):
raise YAMLDBException(
"The main resource data structure is invalid")
try:
for rtype, resources in data['resources'].items():
assert isinstance(
rtype, type(RESOURCES_STRUCT['resources'].keys()[0]))
assert isinstance(
resources, type(RESOURCES_STRUCT['resources']['rtype']))
except AssertionError:
raise YAMLDBException(
"Resource type %s structure is invalid" % rtype)
try:
for rtype, resources in data['resources'].items():
for rid, resource in resources.items():
assert isinstance(rid, str)
assert isinstance(
resource,
type(RESOURCES_STRUCT['resources']['rtype']['key']))
except AssertionError:
raise YAMLDBException(
"Resource %s of type %s is invalid" % (resource, rtype))
@staticmethod
def _validate_rid_unicity(data, rids):
# Verify at YAML load time that duplicated resources key
# are not present. To avoid overlapping of resources.
# https://gist.github.com/pypt/94d747fe5180851196eb implements a
# solution but seems difficult as it does not support the
# safe_loader and usage of that loader is important to avoid
# loading malicious yaml serialized objects.
# Inside a single yaml file a reviewer should take care to
# the duplicated keys issue and also look at the logs for
# checking not expected detected changes.
#
# Nevertheless between two or more yaml files this check can be
# implemented.
for rtype, resources in data['resources'].items():
for rid, resource in resources.items():
rids.setdefault(rtype, {})
if rid not in rids[rtype]:
rids[rtype][rid] = None
else:
raise YAMLDBException(
"Duplicated resource ID detected for "
"resource type: %s id: %s" % (rtype, rid))
def refresh(self):
""" Reload of the YAML files.
"""
self.data = None
self._update_git_clone()
self._load_from_cache()
# Load from files. Cache is not up to date.
if not self.data:
self._load_db()
self._update_cache()
@staticmethod
def validate(data, rids):
""" Validate the resource data structure.
"""
YAMLBackend._validate_base_struct(data)
YAMLBackend._validate_rid_unicity(data, rids)
return data
def get_data(self):
""" Return the full data structure.
"""
return self.data
| {
"content_hash": "d846eca592576ae297ca0ea87aa0ec81",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 79,
"avg_line_length": 40.01990049751244,
"alnum_prop": 0.5435106911984088,
"repo_name": "enovance/managesf",
"id": "0f1835a5f927cb49e5e28d108a8a262cf5d89520",
"size": "8629",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "managesf/model/yamlbkd/yamlbackend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "283851"
},
{
"name": "Shell",
"bytes": "831"
}
],
"symlink_target": ""
} |
import abc
import six
from st2common.runners.utils import get_logger_for_python_runner_action
@six.add_metaclass(abc.ABCMeta)
class Action(object):
"""
Base action class other Python actions should inherit from.
"""
description = None
def __init__(self, config=None, action_service=None):
"""
:param config: Action config.
:type config: ``dict``
:param action_service: ActionService object.
:type action_service: :class:`ActionService~
"""
self.config = config or {}
self.action_service = action_service
self.logger = get_logger_for_python_runner_action(action_name=self.__class__.__name__)
@abc.abstractmethod
def run(self, **kwargs):
pass
| {
"content_hash": "0bb1a522e05b7b625260a43ff1f09c1a",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 94,
"avg_line_length": 25.233333333333334,
"alnum_prop": 0.6354029062087186,
"repo_name": "tonybaloney/st2",
"id": "d58cca7ae64c347d34f2a194a8abdb09bab6bcca",
"size": "1537",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "st2common/st2common/runners/base_action.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "46066"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "4278891"
},
{
"name": "Shell",
"bytes": "47687"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
import pyfuzzy_toolbox.features.selection as selection
import os
path_name = os.path.dirname(os.path.realpath(__file__))
arff_file_test = path_name + '/cornell_movies_test.arff'
def test_features_selection_10_fold_crossvalidation_seed_1_min_folds_10_CfsSubsetEval():
selected_attributes = selection.select_attributes(arff_file_test, 10, 1, True, 10, search='BestFirst', evaluation='CfsSubsetEval')
assert len(selected_attributes) == 6
assert selected_attributes[0]['name'] == 'positive_to_negative_ratio_of_adjectives_count_and_bigrams_with_adjectives'
assert selected_attributes[1]['name'] == 'positive_to_negative_ratio_of_adjectives_sum'
assert selected_attributes[2]['name'] == 'positive_adjectives_sum_and_bigrams_with_adjectives'
assert selected_attributes[3]['name'] == 'positive_to_negative_ratio_of_adjectives_sum_and_bigrams_with_adjectives'
assert selected_attributes[4]['name'] == 'positive_to_negative_ratio_of_unigrams_and_bigrams_sum'
assert selected_attributes[5]['name'] == 'sum_ratio_of_negative_unigrams_bigrams_and_trigrams'
def test_features_selection_10_fold_crossvalidation_seed_1_min_folds_9_CfsSubsetEval():
selected_attributes = selection.select_attributes(arff_file_test, 10, 1, True, 9, search='BestFirst', evaluation='CfsSubsetEval')
assert len(selected_attributes) == 7
assert selected_attributes[0]['name'] == 'positive_to_negative_ratio_of_adjectives_count_and_bigrams_with_adjectives'
assert selected_attributes[1]['name'] == 'positive_to_negative_ratio_of_adjectives_sum'
assert selected_attributes[2]['name'] == 'positive_adjectives_sum_and_bigrams_with_adjectives'
assert selected_attributes[3]['name'] == 'positive_to_negative_ratio_of_adjectives_sum_and_bigrams_with_adjectives'
assert selected_attributes[4]['name'] == 'sum_ratio_of_positive_adjectives_and_bigrams_with_adjectives'
assert selected_attributes[5]['name'] == 'positive_to_negative_ratio_of_unigrams_and_bigrams_sum'
assert selected_attributes[6]['name'] == 'sum_ratio_of_negative_unigrams_bigrams_and_trigrams'
| {
"content_hash": "b9723de4b5b88b206b1db918708da965",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 131,
"avg_line_length": 72.71428571428571,
"alnum_prop": 0.7696463654223968,
"repo_name": "matheuscas/pyfuzzy_toolbox",
"id": "2719eb250b5aa510fa3dbdec3f4fb0d6b6f2fe1e",
"size": "2036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_feature_selection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1531"
},
{
"name": "Python",
"bytes": "213512"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from collections import OrderedDict
import pygion
from pygion import task, Fspace, R, Region, RW
# FIXME: Need a better way to determine task IDs.
hello = pygion.extern_task(
task_id=10000,
argument_types=[pygion.int64, pygion.float64],
return_type=pygion.int64,
calling_convention='regent')
saxpy = pygion.extern_task(
task_id=10001,
argument_types=[Region, pygion.float64],
privileges=[RW],
calling_convention='regent')
@task(privileges=[R])
def check(r):
assert (r.x == 3.5).all()
print('results validated successfully')
# This task needs an explicit ID so that Regent knows what to call.
@task(task_id=2)
def main():
print('hello from Python')
x = hello(1234, 3.14)
print('Python got result from Regent task: %s' % x.get())
print('creating a field space with two fields')
# Note: Need to use OrderedDict so that the field ordering matches Regent.
fs = Fspace(OrderedDict([('x', pygion.float64), ('y', pygion.float64)]))
print('creating a region with 12 elements')
r = Region([12], fs)
pygion.fill(r, 'x', 1)
pygion.fill(r, 'y', 2)
a = 1.5
print('calling SAXPY task in Regent')
saxpy(r, a)
check(r)
| {
"content_hash": "0e42b1a4e7b0ef8bab66630bce7fda89",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 78,
"avg_line_length": 26.53191489361702,
"alnum_prop": 0.6639935846030474,
"repo_name": "StanfordLegion/legion",
"id": "2e978e27c4fd54819c7e43d46c16e97bbc34d383",
"size": "1857",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "language/tests/python/run_pass/python_main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "335761"
},
{
"name": "C++",
"bytes": "17156793"
},
{
"name": "CMake",
"bytes": "240564"
},
{
"name": "Cuda",
"bytes": "29542"
},
{
"name": "Dockerfile",
"bytes": "612"
},
{
"name": "Fortran",
"bytes": "346250"
},
{
"name": "HTML",
"bytes": "3653"
},
{
"name": "JavaScript",
"bytes": "94778"
},
{
"name": "Makefile",
"bytes": "119231"
},
{
"name": "Perl",
"bytes": "145756"
},
{
"name": "Python",
"bytes": "1661733"
},
{
"name": "Raku",
"bytes": "34306"
},
{
"name": "Rouge",
"bytes": "2303312"
},
{
"name": "Rust",
"bytes": "222951"
},
{
"name": "Shell",
"bytes": "12892"
},
{
"name": "Terra",
"bytes": "1709732"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['MovingAverage'] , ['NoCycle'] , ['AR'] ); | {
"content_hash": "ae7e330c42538633402895fef41fb5f6",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 76,
"avg_line_length": 37.25,
"alnum_prop": 0.697986577181208,
"repo_name": "antoinecarme/pyaf",
"id": "f7e5bcfa3a2e15ea6b2e59c52778032929d9a115",
"size": "149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_MovingAverage_NoCycle_AR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
field = ['ehfpiAcc', 'geneSymbol', 'geneDescription', 'previousName', 'synonyms', 'entrezId',
'uniprotId', 'proteinName', 'ensemblGeneId', 'targetOrganism', 'drosophilaGene','humanHomolog', 'go',
'pathway', 'isVTP', 'resources', 'geneNote', 'fullName', 'abbreviation', 'aliases', 'strain',
'species', 'genus', 'family', 'group', 'kingdom', 'title', 'author', 'pubmedId', 'year', 'journal',
'abstract', 'doi', 'scope', 'assayType', 'reagent', 'phenotype', 'bioModelDescription',
'confirmatoryScreenDescription', 'primaryScreenDescription', 'hitsNumber', 'confirmedHitsNumber',
'primaryHitsNumber', 'screenNote']
fieldDes = ['EHFPI Accession', 'Gene Symbol', 'Gene Description', 'Previous Name', 'Gene Synonyms',
'Entrez Gene ID', 'UniProt ID', 'Protein Name', 'Ensembl Gene ID',
'Target Organism', 'Drosophila Gene','Human Homolog', 'GO', 'Pathway', 'isVTP', 'Resources', 'Gene Note',
'Pathogen Full Name', 'Pathogen Abbreviation', 'Pathogen Aliases', 'Pathogen(Strain)',
'Pathogen(Species)', 'Pathogen(Genus)', 'Pathogen(Family)', 'Pathogen(Group)', 'Pathogen(Kingdom)',
'Title', 'Author', 'Pubmed ID', 'Year', 'Journal', 'abstract', 'doi', 'Scope', 'Assay Type ',
'Reagent', 'Phenotype', 'BioModel Description', 'confirmatory Screen Description',
'primary Screen Description', 'Hits Number', 'ConfirmedHits Number', 'PrimaryHits Number',
'Screen Note']
fieldDic = {'ehfpiAcc': 'EHFPI Accession',
'geneSymbol': 'Gene Symbol',
'geneDescription': 'Gene Description',
'previousName': 'Previous Name',
'synonyms': 'Gene Synonyms',
'entrezId': 'Entrez Gene ID',
'uniprotId': 'UniProt ID',
'proteinName': 'Protein Name',
'ensemblGeneId': 'Ensembl Gene ID',
'targetOrganism': 'Target Organism',
'drosophilaGene':'Drosophila Gene',
'humanHomolog': 'Human Homolog',
'go': 'GO',
'pathway': 'Pathway',
'isVTP': 'isVTP',
'resources': 'Resources',
'geneNote': 'Gene Note',
'fullName': 'Pathogen Full Name',
'abbreviation': 'Pathogen Abbreviation',
'aliases': 'Pathogen Aliases',
'strain': 'Pathogen(Strain)',
'species': 'Pathogen(Species)',
'genus': 'Pathogen(Genus)',
'family': 'Pathogen(Family)',
'group': 'Pathogen(Group)',
'kingdom': 'Pathogen(Kingdom)',
'title': 'Title',
'author': 'Author',
'pubmedId': 'Pubmed ID',
'year': 'Year',
'journal': 'Journal',
'abstract': 'abstract',
'doi': 'doi',
'scope': 'Scope',
'assayType': 'Assay Type ',
'reagent': 'Reagent',
'phenotype': 'Phenotype',
'bioModelDescription': 'BioModel Description',
'confirmatoryScreenDescription': 'confirmatory Screen Description',
'primaryScreenDescription': 'primary Screen Description',
'hitsNumber': 'Hits Number',
'confirmedHitsNumber': 'ConfirmedHits Number',
'primaryHitsNumber': 'PrimaryHits Number',
'screenNote': 'Screen Note'
} | {
"content_hash": "9b020ce63fc85a8a8c4d7f7cf270d32a",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 129,
"avg_line_length": 64.44444444444444,
"alnum_prop": 0.4682266009852217,
"repo_name": "polojacky/ehfpi",
"id": "c379d4b9076f9e8abe99c065c2a5a6d53d35b231",
"size": "4092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ehf/ehf/commonVar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "429195"
},
{
"name": "Go",
"bytes": "7075"
},
{
"name": "JavaScript",
"bytes": "1950204"
},
{
"name": "PHP",
"bytes": "52571"
},
{
"name": "Python",
"bytes": "1537261"
},
{
"name": "Ruby",
"bytes": "879"
},
{
"name": "Stata",
"bytes": "9220"
}
],
"symlink_target": ""
} |
"""
Django settings for CloudConfigWebserver project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nx36-u337lp#51h4_q$j98eemxdxl3(3dq9t-6@^a6tjkn73bf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'webserver.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
],
},
},
]
WSGI_APPLICATION = 'webserver.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = False
USE_L10N = False
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "7534a29f4dde4ccc0fd649b872e986f3",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 71,
"avg_line_length": 24.924050632911392,
"alnum_prop": 0.6835957338750634,
"repo_name": "remohammadi/coreos-cloud-bootstrapper",
"id": "95e7dc39f06f8b0a18fb8aebdbf6da1360357e70",
"size": "1969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bootstrapper-docker/CloudConfigWebserver/webserver/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "395"
},
{
"name": "Python",
"bytes": "3502"
},
{
"name": "Shell",
"bytes": "7784"
}
],
"symlink_target": ""
} |
import sys
import serial
# import struct
import time
import math
import colorsys
import Queue
import threading
ser = open("/dev/dmx0",'w')
nullstr = u'00 ' * 513 # must start with null byte
Ustr = u'00 ' * 513 # must start with null byte
buf = bytearray.fromhex(Ustr)
while(True):
for i in range(255):
buf[4] = i
buf[5] = i
buf[6] = i
buf[7] = i
# and send it
ser.write(buf)
ser.flush()
#print "wrote val %d " % i
#time.sleep(0.03)
sys.stdout.flush()
print "255 frames" + repr(len(buf))
print repr(buf[4])
sys.stdout.flush()
| {
"content_hash": "d0bed4309d75cd8f51651d2ebbd51307",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 51,
"avg_line_length": 17.10810810810811,
"alnum_prop": 0.5671406003159558,
"repo_name": "headrotor/aurora-server",
"id": "997eaddc0922dc88e9e33053ced67e54418bbafc",
"size": "918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moddmx.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25431"
},
{
"name": "JavaScript",
"bytes": "22739"
},
{
"name": "Python",
"bytes": "65289"
},
{
"name": "Shell",
"bytes": "125"
}
],
"symlink_target": ""
} |
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from simple_convnet import SimpleConvNet
from common.trainer import Trainer
# データの読み込み
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)
# 処理に時間のかかる場合はデータを削減
# x_train, t_train = x_train[:5000], t_train[:5000]
# x_test, t_test = x_test[:1000], t_test[:1000]
max_epochs = 20
network = SimpleConvNet(input_dim=(1, 28, 28),
conv_param={'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1},
hidden_size=100, output_size=10, weight_init_std=0.01)
trainer = Trainer(network, x_train, t_train, x_test, t_test,
epochs=max_epochs, mini_batch_size=100,
optimizer='Adam', optimizer_param={'lr': 0.001},
evaluate_sample_num_per_epoch=1000)
trainer.train()
# パラメータの保存
network.save_params(os.path.dirname(os.path.abspath(__file__)) + "/params.pkl")
print("Saved Network Parameters!")
# グラフの描画
markers = {'train': 'o', 'test': 's'}
x = np.arange(max_epochs)
plt.plot(x, trainer.train_acc_list, marker='o', label='train', markevery=2)
plt.plot(x, trainer.test_acc_list, marker='s', label='test', markevery=2)
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show()
| {
"content_hash": "bc522fe86aa47f47290331332f7e8b46",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 95,
"avg_line_length": 33.75609756097561,
"alnum_prop": 0.6575144508670521,
"repo_name": "kgsn1763/deep-learning-from-scratch",
"id": "a0afa1e38ac36ff2e72448bbdad8f47d1f0312ac",
"size": "1553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ch07/train_convnet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95615"
}
],
"symlink_target": ""
} |
class Private:
def __init__(self, names):
self.__names = names
self.__data = {}
def __getattr__(self, name):
if name in self.__names:
return self.__data[name]
raise AttributeError(name)
def __setattr__(self, name, value):
if name.startswith("_Private"):
self.__dict__[name] = value
return
if name in self.__names:
self.__data[name] = value
return
raise TypeError("cannot set the attribute %r" % (name,))
class Person(Private):
""" Класс person.
"""
def __init__(self, parent = None):
Private.__init__(self, ["first_name", "last_name", 'id', "age", 'city', "addr", "parent"])
self.parent = parent
def __str__(self):
return ''.join((self.first_name.lower().title(),' ', self.last_name.lower().title()))
def new_child(self):
return Person(self)
| {
"content_hash": "63b178bd2b9953eb6db698295179a29e",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 98,
"avg_line_length": 30.032258064516128,
"alnum_prop": 0.5241675617615468,
"repo_name": "janusnic/21v-python",
"id": "5649417874881e8c754b86312f30d69804b0ccb7",
"size": "961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unit_09/person.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "369"
},
{
"name": "Python",
"bytes": "990972"
},
{
"name": "SQLPL",
"bytes": "147"
}
],
"symlink_target": ""
} |
import os
import platform
import subprocess
from os.path import join
import six
# 3rd party libs
from nose.plugins.skip import SkipTest
from git import *
# PyGitup imports
from tests import basepath, write_file, init_master
test_name = 'bundler'
repo_path = join(basepath, test_name + os.sep)
def setup():
master_path, master = init_master(test_name)
# Prepare master repo
master.git.checkout(b=test_name)
# Add Gemfile
gemfile = join(master_path, 'Gemfile')
write_file(gemfile, "source 'https://rubygems.org'\ngem 'colored'")
master.index.add([gemfile])
master.index.commit(test_name)
# Clone to test repo
path = join(basepath, test_name)
master.clone(path, b=test_name)
repo = Repo(path, odbt=GitCmdObjectDB)
repo.git.config('git-up.bundler.check', 'true')
assert repo.working_dir == path
def test_bundler():
""" Run bundler integration """
shell = True if platform.system() == 'Windows' else False
if os.environ.get('TRAVIS', False):
raise SkipTest('Skip this test on Travis CI :(')
# Helper methods
def is_installed(prog):
dev_null = open(os.devnull, 'wb')
try:
return_value = subprocess.call([prog, '--version'], shell=shell,
stdout=dev_null, stderr=dev_null)
return return_value == 0
except OSError:
return False
def get_output(cmd):
return str(subprocess.check_output(cmd, shell=shell))
# Check for ruby and bundler
if not (is_installed('ruby') and is_installed('gem')
and 'bundler' in get_output(['gem', 'list'])):
# Ruby not installed, skip test
raise SkipTest('Ruby not installed, skipped Bundler integration test')
os.chdir(repo_path)
from PyGitUp.gitup import GitUp
gitup = GitUp(testing=True)
gitup.run()
| {
"content_hash": "d6f04ab431e2bf0952a7b80886321f92",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 78,
"avg_line_length": 26.23611111111111,
"alnum_prop": 0.6363155108523028,
"repo_name": "christer155/PyGitUp",
"id": "72abdcf5a95086db110a06b79aacd01e864cdf7c",
"size": "1906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_bundler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62253"
},
{
"name": "Ruby",
"bytes": "1081"
}
],
"symlink_target": ""
} |
import mongoengine as me
from st2common import log as logging
from st2common.models.db import MongoDBAccess
from st2common.models.db import stormbase
from st2common.constants.types import ResourceType
__all__ = [
'ActionAliasDB'
]
LOG = logging.getLogger(__name__)
PACK_SEPARATOR = '.'
class ActionAliasDB(stormbase.StormBaseDB, stormbase.ContentPackResourceMixin,
stormbase.UIDFieldMixin):
"""
Database entity that represent an Alias for an action.
Attribute:
pack: Pack to which this alias belongs to.
name: Alias name.
ref: Alias reference (pack + name).
enabled: A flag indicating whether this alias is enabled in the system.
action_ref: Reference of an action this alias belongs to.
formats: Alias format strings.
"""
RESOURCE_TYPE = ResourceType.ACTION
UID_FIELDS = ['pack', 'name']
ref = me.StringField(required=True)
pack = me.StringField(
required=True,
help_text='Name of the content pack.')
enabled = me.BooleanField(
required=True, default=True,
help_text='A flag indicating whether the action alias is enabled.')
action_ref = me.StringField(
required=True,
help_text='Reference of the Action map this alias.')
formats = me.ListField(
field=me.StringField(),
help_text='Possible parameter formats that an alias supports.')
meta = {
'indexes': ['name']
}
def __init__(self, *args, **values):
super(ActionAliasDB, self).__init__(*args, **values)
self.ref = self.get_reference().ref
self.uid = self.get_uid()
# specialized access objects
actionalias_access = MongoDBAccess(ActionAliasDB)
MODELS = [ActionAliasDB]
| {
"content_hash": "5f94034c1c2c33b6dbb05e71b2040289",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 79,
"avg_line_length": 28.403225806451612,
"alnum_prop": 0.6604202157864849,
"repo_name": "alfasin/st2",
"id": "1757cc408a95caa3cccee1318a446bdc773bf571",
"size": "2541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "st2common/st2common/models/db/actionalias.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "36110"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "2907491"
},
{
"name": "Shell",
"bytes": "16363"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
import psychic_disco.util as util
class FakeLambdaConsole(object):
""" Fake Driver for AWS Lambda """
def _open_relative_json(self, json_path):
path = util.relative_path(__file__, json_path)
return util.open_json_file_as_dict(path)
def list_functions(self):
return self._open_relative_json("list_functions.json")
def get_function(self, **kwargs):
return self._open_relative_json("get_function.json")
def update_function_code(self, **kwargs):
assert kwargs['FunctionName'] == 'os-listdir'
assert kwargs['S3Bucket'] == "mybucket"
assert kwargs['S3Key'] == 'mykey'
def create_function(self, **kwargs):
assert kwargs['FunctionName'] == 'os-listdir'
assert kwargs['Handler'] == 'os.listdir'
assert kwargs['Runtime'] == 'python2.7'
assert kwargs['Role'] == '#1'
assert kwargs['Code'] == {
"S3Bucket": "mybucket",
"S3Key": "mykey"
}
| {
"content_hash": "aeaf3b1fdc0f365be04ed50827e4b46c",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 62,
"avg_line_length": 34.48275862068966,
"alnum_prop": 0.589,
"repo_name": "robertdfrench/psychic-disco",
"id": "54127a7e038fe5d7a359081077d5ef3e7eddfb28",
"size": "1000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "psychic_disco/tests/test_lambda_function/fake_gateway.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1504"
},
{
"name": "Python",
"bytes": "37702"
}
],
"symlink_target": ""
} |
from unittest import mock
import graphene
from .....checkout import base_calculations
from .....checkout.fetch import fetch_checkout_info, fetch_checkout_lines
from .....checkout.utils import (
add_variant_to_checkout,
add_voucher_to_checkout,
calculate_checkout_quantity,
invalidate_checkout_prices,
)
from .....plugins.manager import get_plugins_manager
from .....warehouse.models import Reservation
from ....core.utils import to_global_id_or_none
from ....tests.utils import get_graphql_content
from ...mutations.utils import update_checkout_shipping_method_if_invalid
MUTATION_CHECKOUT_LINE_DELETE = """
mutation checkoutLineDelete($id: ID, $lineId: ID!) {
checkoutLineDelete(id: $id, lineId: $lineId) {
checkout {
token
lines {
quantity
variant {
id
}
}
}
errors {
field
message
}
}
}
"""
@mock.patch(
"saleor.graphql.checkout.mutations.checkout_line_delete."
"update_checkout_shipping_method_if_invalid",
wraps=update_checkout_shipping_method_if_invalid,
)
@mock.patch(
"saleor.graphql.checkout.mutations.checkout_line_delete."
"invalidate_checkout_prices",
wraps=invalidate_checkout_prices,
)
def test_checkout_line_delete(
mocked_invalidate_checkout_prices,
mocked_update_shipping_method,
user_api_client,
checkout_line_with_reservation_in_many_stocks,
):
assert Reservation.objects.count() == 2
checkout = checkout_line_with_reservation_in_many_stocks.checkout
previous_last_change = checkout.last_change
lines, _ = fetch_checkout_lines(checkout)
assert calculate_checkout_quantity(lines) == 3
assert checkout.lines.count() == 1
line = checkout.lines.first()
assert line.quantity == 3
line_id = graphene.Node.to_global_id("CheckoutLine", line.pk)
variables = {"id": to_global_id_or_none(checkout), "lineId": line_id}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINE_DELETE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLineDelete"]
assert not data["errors"]
checkout.refresh_from_db()
lines, _ = fetch_checkout_lines(checkout)
assert checkout.lines.count() == 0
assert calculate_checkout_quantity(lines) == 0
assert Reservation.objects.count() == 0
manager = get_plugins_manager()
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
mocked_update_shipping_method.assert_called_once_with(checkout_info, lines)
assert checkout.last_change != previous_last_change
assert mocked_invalidate_checkout_prices.call_count == 1
def test_checkout_lines_delete_with_not_applicable_voucher(
user_api_client, checkout_with_item, voucher, channel_USD
):
manager = get_plugins_manager()
lines, _ = fetch_checkout_lines(checkout_with_item)
checkout_info = fetch_checkout_info(checkout_with_item, lines, [], manager)
subtotal = base_calculations.base_checkout_subtotal(
lines,
checkout_info.channel,
checkout_info.checkout.currency,
)
voucher.channel_listings.filter(channel=channel_USD).update(
min_spent_amount=subtotal.amount
)
checkout_info = fetch_checkout_info(checkout_with_item, lines, [], manager)
add_voucher_to_checkout(manager, checkout_info, lines, voucher)
assert checkout_with_item.voucher_code == voucher.code
line = checkout_with_item.lines.first()
line_id = graphene.Node.to_global_id("CheckoutLine", line.pk)
variables = {"id": to_global_id_or_none(checkout_with_item), "lineId": line_id}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINE_DELETE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLineDelete"]
assert not data["errors"]
checkout_with_item.refresh_from_db()
assert checkout_with_item.lines.count() == 0
assert checkout_with_item.voucher_code is None
def test_checkout_line_delete_remove_shipping_if_removed_product_with_shipping(
user_api_client, checkout_with_item, digital_content, address, shipping_method
):
checkout = checkout_with_item
digital_variant = digital_content.product_variant
checkout.shipping_address = address
checkout.shipping_method = shipping_method
checkout.save()
checkout_info = fetch_checkout_info(checkout, [], [], get_plugins_manager())
add_variant_to_checkout(checkout_info, digital_variant, 1)
line = checkout.lines.first()
line_id = graphene.Node.to_global_id("CheckoutLine", line.pk)
variables = {"id": to_global_id_or_none(checkout), "lineId": line_id}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINE_DELETE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLineDelete"]
assert not data["errors"]
checkout.refresh_from_db()
assert checkout.lines.count() == 1
assert not checkout.shipping_method
| {
"content_hash": "80155315df82c14342369a7d42f7565a",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 85,
"avg_line_length": 36.48571428571429,
"alnum_prop": 0.6836335160532498,
"repo_name": "mociepka/saleor",
"id": "b1320c07067c1a179d19466fb75f1ecf388bce72",
"size": "5108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/graphql/checkout/tests/mutations/test_checkout_line_delete.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
"""
Logplex instrumentation.
Usage:
lplex <message>... [--proc=<proc>] [--token=<token>]
lplex -h | --help
Options:
-h --help Show this screen.
--version Show version.
"""
import os
import json
from datetime import datetime, timedelta
from logplex import Logplex
from docopt import docopt
LOG_TOKEN = os.environ.get('LOG_TOKEN')
LOGPLEX_URL = os.environ.get('LOGPLEX_URL')
def dispatch_cli(args):
message = ' '.join(args.get('<message>', []))
proc = args.get('--proc') or 'buildpack'
token = args.get('--token', LOG_TOKEN)
lp = Logplex(token=token, url=LOGPLEX_URL)
lp.procid = proc
lp.puts(message)
def main():
arguments = docopt(__doc__, version='Logplex')
try:
dispatch_cli(arguments)
except Exception:
raise
exit()
if __name__ == '__main__':
main()
| {
"content_hash": "55bd031ac76aa776424f2a57003c15b9",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 54,
"avg_line_length": 18.41304347826087,
"alnum_prop": 0.615112160566706,
"repo_name": "kennethreitz/lplex",
"id": "eb6f2f95e9b47065167daf768924d305de468c39",
"size": "870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lp_cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "684371"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/tool/shared_taloscope_broken.iff"
result.attribute_template_id = -1
result.stfName("item_n","taloscope_broken")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "63f1a6d15b42acfaf902dabb500642a0",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 74,
"avg_line_length": 23.692307692307693,
"alnum_prop": 0.698051948051948,
"repo_name": "anhstudios/swganh",
"id": "b08932cc59563ae76705bab96685c895da8906d5",
"size": "453",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/loot/tool/shared_taloscope_broken.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals, print_function
BOARD_DIMENSION = 19
MIN_BOARD_DIMENSION = 5
MAX_BOARD_DIMENSION = 19
| {
"content_hash": "b6be2e7cdfe6f38c1b48d73e324c5646",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 72,
"avg_line_length": 28.8,
"alnum_prop": 0.7708333333333334,
"repo_name": "vail130/tenzen",
"id": "3bc489840e72909bb7ea061137c4b58c8bd23704",
"size": "144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tenzen/constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "239"
},
{
"name": "Python",
"bytes": "15669"
}
],
"symlink_target": ""
} |
'''module for connectors'''
__import__('pkg_resources').declare_namespace(__name__)
| {
"content_hash": "6ba7a59731a28dbe9884e70b67913ac1",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 55,
"avg_line_length": 42,
"alnum_prop": 0.6785714285714286,
"repo_name": "tomncooper/heron",
"id": "6cba8a20d5a6d05a556419017c5d8087c0c1da9a",
"size": "869",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "heronpy/connectors/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "14063"
},
{
"name": "C++",
"bytes": "1722746"
},
{
"name": "CSS",
"bytes": "77709"
},
{
"name": "HCL",
"bytes": "5314"
},
{
"name": "HTML",
"bytes": "39228"
},
{
"name": "Java",
"bytes": "4744099"
},
{
"name": "JavaScript",
"bytes": "1107129"
},
{
"name": "M4",
"bytes": "18741"
},
{
"name": "Makefile",
"bytes": "1046"
},
{
"name": "Objective-C",
"bytes": "2143"
},
{
"name": "Perl",
"bytes": "9298"
},
{
"name": "Python",
"bytes": "1692443"
},
{
"name": "Ruby",
"bytes": "1930"
},
{
"name": "Scala",
"bytes": "95609"
},
{
"name": "Shell",
"bytes": "195923"
},
{
"name": "Smarty",
"bytes": "528"
}
],
"symlink_target": ""
} |
from subprocess import call
BUILD_DIR='build'
FONTa='Fake Receipt'
LANG='hb'
OUTPUTBASE = LANG + '.' + FONTa
def call_shell(command):
splitted = command.split()
call(splitted)
print(command)
def text2image(text_file):
splitted = str('text2image --text=' + text_file + ' --fonts_dir ..').split()
splitted.append('--outputbase=' + OUTPUTBASE)
splitted.append('--font=Fake Receipt')
call(splitted)
def training():
command = ['tesseract', OUTPUTBASE + '.tif', OUTPUTBASE, 'box.train.stderr']
call(command)
def unicharset():
command = ['unicharset_extractor',
OUTPUTBASE + '.box']
call(command)
def clustering():
command = ['mftraining',
'-F', '../font_properties',
'-U', 'unicharset',
OUTPUTBASE + '.tr']
call(command)
def cntraining():
command = ['cntraining', OUTPUTBASE + '.tr']
call(command)
def cp_with_prefix(filename, prefix):
call_shell('cp ' + filename + ' ' + prefix + '.' + filename)
def prepare_for_combine():
cp_with_prefix('unicharset', LANG)
cp_with_prefix('shapetable', LANG)
cp_with_prefix('normproto', LANG)
cp_with_prefix('inttemp', LANG)
cp_with_prefix('pffmtable', LANG)
def combine():
command = ['combine_tessdata', LANG + '.']
call(command)
def copy_combined():
name = LANG + '.traineddata'
call_shell('cp ' + name + ' ../tessdata/' + name)
text2image('../training_text.txt')
training()
unicharset()
clustering()
cntraining()
prepare_for_combine()
combine()
copy_combined()
| {
"content_hash": "e10f978b62dc6ff0c0d2f8246d8bff0b",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 80,
"avg_line_length": 21.013333333333332,
"alnum_prop": 0.6135786802030457,
"repo_name": "stryku/hb",
"id": "337ff8134f46bd676c7f196d684a6f64af602b98",
"size": "1576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "image_processing/tesseract/trainer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37338"
},
{
"name": "Shell",
"bytes": "2130"
}
],
"symlink_target": ""
} |
import proto # type: ignore
from google.ads.googleads.v10.enums.types import (
response_content_type as gage_response_content_type,
)
from google.ads.googleads.v10.resources.types import (
shared_set as gagr_shared_set,
)
from google.protobuf import field_mask_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.services",
marshal="google.ads.googleads.v10",
manifest={
"MutateSharedSetsRequest",
"SharedSetOperation",
"MutateSharedSetsResponse",
"MutateSharedSetResult",
},
)
class MutateSharedSetsRequest(proto.Message):
r"""Request message for
[SharedSetService.MutateSharedSets][google.ads.googleads.v10.services.SharedSetService.MutateSharedSets].
Attributes:
customer_id (str):
Required. The ID of the customer whose shared
sets are being modified.
operations (Sequence[google.ads.googleads.v10.services.types.SharedSetOperation]):
Required. The list of operations to perform
on individual shared sets.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v10.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(
proto.STRING,
number=1,
)
operations = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="SharedSetOperation",
)
partial_failure = proto.Field(
proto.BOOL,
number=3,
)
validate_only = proto.Field(
proto.BOOL,
number=4,
)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class SharedSetOperation(proto.Message):
r"""A single operation (create, update, remove) on an shared set.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which resource
fields are modified in an update.
create (google.ads.googleads.v10.resources.types.SharedSet):
Create operation: No resource name is
expected for the new shared set.
This field is a member of `oneof`_ ``operation``.
update (google.ads.googleads.v10.resources.types.SharedSet):
Update operation: The shared set is expected
to have a valid resource name.
This field is a member of `oneof`_ ``operation``.
remove (str):
Remove operation: A resource name for the removed shared set
is expected, in this format:
``customers/{customer_id}/sharedSets/{shared_set_id}``
This field is a member of `oneof`_ ``operation``.
"""
update_mask = proto.Field(
proto.MESSAGE,
number=4,
message=field_mask_pb2.FieldMask,
)
create = proto.Field(
proto.MESSAGE,
number=1,
oneof="operation",
message=gagr_shared_set.SharedSet,
)
update = proto.Field(
proto.MESSAGE,
number=2,
oneof="operation",
message=gagr_shared_set.SharedSet,
)
remove = proto.Field(
proto.STRING,
number=3,
oneof="operation",
)
class MutateSharedSetsResponse(proto.Message):
r"""Response message for a shared set mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v10.services.types.MutateSharedSetResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE,
number=3,
message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="MutateSharedSetResult",
)
class MutateSharedSetResult(proto.Message):
r"""The result for the shared set mutate.
Attributes:
resource_name (str):
Returned for successful operations.
shared_set (google.ads.googleads.v10.resources.types.SharedSet):
The mutated shared set with only mutable fields after
mutate. The field will only be returned when
response_content_type is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
shared_set = proto.Field(
proto.MESSAGE,
number=2,
message=gagr_shared_set.SharedSet,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "b1a7d3c582d88a26711e052048d7a956",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 113,
"avg_line_length": 32.270718232044196,
"alnum_prop": 0.6397877075843178,
"repo_name": "googleads/google-ads-python",
"id": "8680ba4773a5655cd10f92efa62637669ef5cea2",
"size": "6441",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v10/services/types/shared_set_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
__version__ = 1.0
import random
import sys
import os
import io
import unittest
import logging
import logging.config
import yaml
import scipy as sp
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from ctypes import *
##############self defined functions#####################################
def hello(name="jiagui"):
"""
multiline comment here
"""
for index in range(0,len(name)):
yield name[index]
return
def includePath(flags):
'''
go through all folders which possible contain header files.
then combine all of them into a set of clang compile -I flag
'''
paths = set([])
#for DC
if os.path.exists("cscope.files"):
pathFile = open("cscope.files")
#filename is consist of absolute path and file name
for filename in pathFile:
dirname = os.path.dirname( filename )
if dirname not in paths:
paths.add(dirname)
while len(paths):
flags.append('-I')
flags.append(paths.pop())
return flags
class HelloPy(unittest.TestCase):
@unittest.skip("setUp")
def setUp(self):
self.seq = list(range(10))
@unittest.skip("ctype test")
def test_ctypes(self):
SMB_MAX_DATA_SIZE = 5
ARRAY5 = c_ubyte * SMB_MAX_DATA_SIZE
class SMB_REQUEST(Structure):
_fields_ = [
("Address", c_ubyte),
("Command", c_ubyte),
("BlockLength", c_ubyte),
("Data", ARRAY5)]
smbus_read_byte = CDLL('x').SmBusReadByte
smbus_read_byte.argtypes = [c_void_p,POINTER(SMB_REQUEST)]
smbus_read_byte.restype = c_int
open_smbus = CDLL('x').OpenSmbus
open_smbus.argtypes = []
open_smbus.restype = c_void_p
handle = open_smbus()
print( 'handle = %08Xh' % handle)
smb_request = SMB_REQUEST(1,2,5)
print('returned =',smbus_read_byte(handle,byref(smb_request)))
print('Address =',smb_request.Address)
print('Command =',smb_request.Command)
print('BlockLength =',smb_request.BlockLength)
for i,b in enumerate(smb_request.Data):
print ('Data[%d] = %02Xh' % (i,b))
@unittest.skip("done test_ctagsForE")
def test_ctagsForE(self):
import re
#p = re.compile(r'^\s*type\s+\w+\s*:.*\n\s*(\w+)\s*')
#p = re.compile(r'^\s*type\s+\w+\s*:\s*[[].*,\n\s*(\w+)\s*')
#p = re.compile(r'^\s*type\s+\w+\s*:\s*[[](?:.*,\n){2}\s*(\w+)\s*')
#p = re.compile(r'^\s*type\s+\w+\s*:(?:.*,\n){1}\s*(\w+)\s*')
string2= """type prach_rab_kind_t : [RANDOM,
PS16_32,
SGH];"""
p = re.compile(r'^\s*(type|extend)\s+\w+\s*:(?:.*?,){2}\s*(\w+)\s*')
string= "extend prach_rab_kind_t : [RANDOM, PS16_32, PS32_32];"
m=p.search(string)
print(m.groups())
@unittest.skip("done test_stringLen")
def test_stringLen(self):
piBitmapContent = '\x11\x11\x11\x11\x11\x11\x11\x11\x11'
print(len(piBitmapContent))
@unittest.skip("done test_plotBasic")
def test_plotBasic(self):
x=np.arange(1,5,0.2)
y=x
x2=x
y2=x**2
x3=x
y3=x**3-3
#select current figure
plt.figure(1)
plt.subplot(2,1,1)
lines=plt.plot(x,y,'r--',x2,y2,'k^')
#select current axis
plt.subplot(2,1,2)
lines=plt.plot(x3,y3,'gs')
#check line2d properties
#print(plt.setp(lines))
plt.title('basic plot')
plt.legend('line1','line2','line3')
plt.axis([0,6,0,150])
#plt.cla()
#clear current figure
#plt.clf()
plt.show()
#logging.config.dictConfig(yaml.load(open('logging.dict','r')))
#logger = logging.getLogger('simpleExample')
#logger.debug(x)
#logger.debug(y)
#logger.debug(y.size)
@unittest.skip("done test_sciPyBasic")
def test_sciPyBasic(self):
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
x=sp.mgrid[0:5,0:5]
logger.debug(x)
x=sp.poly1d([3,4,5])
logger.debug(x)
xx=x*x
logger.debug(xx)
@unittest.skip("done test_numpyDate")
def test_numpyDate(self):
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
date= np.datetime64('2013-06-15')
logger.debug(date)
date= np.datetime64('2013-06','D')
logger.debug(date)
date= np.datetime64('2013-06-12T03:02')
logger.debug(date)
date= np.array(['2013-06-12T03:02','2013-08-12T03:02','2013-06-12T04:02'],dtype='datetime64');
logger.debug(date)
date= np.array(['2013-06-12T03:02','2013-08-12T03:02:24.456','2013-06-12T04:02'],dtype='datetime64');
logger.debug(date)
@unittest.skip("done test_numpyMask")
def test_numpyMask(self):
import numpy.ma as ma
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
x=np.array([1,2,3,-1,5])
mx= ma.masked_array(x,mask=[0,0,0,1,0])
logger.debug(mx.mean())
y= ma.array([1,2,3],mask=[0,1,0])
logger.debug(y)
z= ma.masked_array([1,2,1.e20,3],1.e20)
logger.debug(z)
x= ma.array([1,2,3])
logger.debug(x.view(ma.MaskedArray))
x= np.arange(10).reshape(2,5)
logger.debug(x)
mx=np.ma.asarray(x)
logger.debug(mx)
logger.debug(type(mx))
x= np.ma.array([1., -1, np.nan, np.inf], mask=[1]+[0]*3)
logger.debug(x)
mf=np.ma.fix_invalid(x)
logger.debug(mf)
x= np.arange(4)
logger.debug(x)
mx=np.ma.masked_equal(x,2)
logger.debug(mx)
x= np.arange(4)
logger.debug(x)
mx=np.ma.masked_greater(x,2)
logger.debug(mx)
x= np.arange(4)
logger.debug(x)
mx=np.ma.masked_greater_equal(x,2)
logger.debug(mx)
x= np.arange(4)
logger.debug(x)
mx=np.ma.masked_less(x,2)
logger.debug(mx)
x= np.arange(4)
logger.debug(x)
mx=np.ma.masked_inside(x,1,3)
logger.debug(mx)
x= np.arange(4)
logger.debug(x)
mx=np.ma.masked_values(x,value=2.00,atol=0,rtol=0.00)
logger.debug(mx)
x= np.arange(4)
logger.debug(x)
mx=np.ma.masked_where(x<3,x)
logger.debug(mx)
@unittest.skip("done test_numpyIter")
def test_numpyIter(self):
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
#single element indexing
x=np.arange(6).reshape(2,3)
logger.debug('x[1,2]:%d',x[1,2])
logger.debug('x original order')
for xi in np.nditer(x):
logger.debug(xi)
logger.debug('external_loop, order="F"')
for xi in np.nditer(x, flags=['external_loop'],order='F'):
logger.debug(xi)
logger.debug('external_loop, order="F",buffered')
for xi in np.nditer(x, flags=['external_loop','buffered'],order='F'):
logger.debug(xi)
it = np.nditer(x, flags=['c_index'])
logger.debug('x <c_index>')
while not it.finished:
logger.debug('%d <%d>',it[0],it.index)
it.iternext()
it = np.nditer(x, flags=['f_index'])
logger.debug('x <f_index>')
while not it.finished:
logger.debug('%d <%d>',it[0],it.index)
it.iternext()
logger.debug('x original order, modify value x[1,2] op_flags=["readwrite"]')
for xi in np.nditer(x, op_flags=['readwrite']):
xi[...]=2*xi
logger.debug(xi)
logger.debug('x original order,external loop')
for xi in np.nditer(x, flags=['external_loop'] ):
logger.debug(xi)
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
#single element indexing
x=np.arange(6).reshape(2,3)
logger.debug('x original order')
for xi in np.nditer(x):
logger.debug(xi)
logger.debug('x.T original order')
for xi in np.nditer(x.T):
logger.debug(xi)
logger.debug('x.T C order')
for xi in np.nditer(x.T, order='C'):
logger.debug(xi)
logger.debug('x.T K order')
for xi in np.nditer(x.T, order='K'):
logger.debug(xi)
logger.debug('x.T F order')
for xi in np.nditer(x.T, order='F'):
logger.debug(xi)
for xi in np.nditer(x.T.copy(order='C')):
logger.debug(xi)
@unittest.skip("done test_numpyBitendien")
def test_numpyBitendien(self):
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
big_end_str = chr(0)+chr(1)+chr(3)+chr(2)
#big_end_arr = np.ndarray(shape=(2,),dtype='>i2',buffer=io.BytesIO(big_end_str.encode()))
big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_str.encode())
logger.debug(big_end_str)
logger.debug(big_end_arr)
logger.debug(big_end_arr[0])
wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='<i2', buffer=big_end_str.encode())
logger.debug(wrong_end_dtype_arr[0])
logger.debug(wrong_end_dtype_arr)
fixed_end_dtype_arr = wrong_end_dtype_arr.newbyteorder()
logger.debug(fixed_end_dtype_arr)
logger.debug(fixed_end_dtype_arr.tostring())
fixed_end_mem_arr=wrong_end_dtype_arr.byteswap()
logger.debug(fixed_end_mem_arr)
logger.debug(fixed_end_mem_arr.tostring())
swapped_end_arr = big_end_arr.byteswap().newbyteorder()
logger.debug(swapped_end_arr)
logger.debug(swapped_end_arr.tostring())
@unittest.skip("done test_numpyStructuredArray")
def test_numpyStructuredArray(self):
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
x= np.zeros(3,dtype='3i1,f4,(2,3)f8')
logger.debug(x)
logger.debug(x.dtype.names)
logger.debug(x.dtype)
logger.debug(x.dtype.fields)
x= np.zeros(3,dtype=('i4',[('r','u1'),('g','u1'),('b','u1'),('a','u1')]))
logger.debug(x)
x= np.zeros(3,dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
logger.debug(x)
x= np.zeros(3,dtype={'names':['col1','col2'],'formats':['i4','f4']})
logger.debug(x)
x= np.zeros(3,dtype={'col1':('i1',0,'title 1'),'col2':('f4',1,'title 2')})
logger.debug(x)
@unittest.skip("done test_numpyBroadcasting")
def test_numpyBroadcasting(self):
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
x= np.arange(4)
xx= x.reshape(4,1)
y= np.ones(5)
z= np.ones((3,4))
logger.debug(x.shape)
logger.debug(xx.shape)
logger.debug(y.shape)
logger.debug(z.shape)
logger.debug(xx+y)
@unittest.skip("done test_numpyVariableIndices")
def test_numpyVariableIndices(self):
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
z = np.arange(81).reshape(3,3,3,3)
indices=(1,1,1,1)
logger.debug(z[indices])
indices=(1,1,1,slice(0,2))
logger.debug(slice(0,2))
logger.debug(z[indices])
indices=(1,Ellipsis,1)
logger.debug(z[indices])
@unittest.skip("done test_numpyNewaxis")
def test_numpyNewaxis(self):
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
x = np.arange(5)
logger.debug(x)
x1=x[:, np.newaxis]
x2=x[np.newaxis, :]
x3=x1+x2
logger.debug(x)
logger.debug(x.shape)
logger.debug(x1)
logger.debug(x1.shape)
logger.debug(x2)
logger.debug(x2.shape)
logger.debug(x3)
logger.debug(x3.shape)
@unittest.skip("done test_numpyBoolean")
def test_numpyBoolean(self):
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
y=np.arange(35).reshape(5,7)
b = y>20
logger.debug(b)
logger.debug(y[b])
logger.debug(b[:,5])
logger.debug(b[2,:])
logger.debug(y[b[:,2]])
logger.debug(y[b[2,:]])
logger.debug(y[np.array([0,2,4]),1:3])
logger.debug(y[b[:,5],1:3])
print("sjg")
# dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
# print(dt['name'])
# print(dt['grades'])
# x = np.array([('Sarach', (8.0,7.0)), ('John', (8.0,5.0)), ],dtype = dt)
# print( x[1])
@unittest.skip("done test_numpyIndexArray")
def test_numpyIndexArray(self):
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
#single element indexing
x=np.arange(10)
x=x*2
logger.debug(x[np.array([3,3,2,1])])
logger.debug(x[np.array([3,-3,2,1])])
logger.debug(x[np.array([[2,-3],[2,1]])])
y=np.arange(35).reshape(5,7)
logger.debug(y)
logger.debug(y[np.array([0,2,4]),np.array([1,2,3])])
logger.debug(y[np.array([0,2,4]),2])
@unittest.skip("done test_numpyIndexAccess")
def test_numpyIndexAccess(self):
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
#single element indexing
x=np.zeros((2,2), dtype=[('a', np.int32),('b', np.float32, (3,3))])
logger.debug(x['a'])
logger.debug(x['b'])
@unittest.skip("done test_numpyIndexBasic")
def test_numpyIndexBasic(self):
#basic slicing return a view of data
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
#single element indexing
x=np.arange(10)
logger.debug(x[2])
logger.debug(x.shape)
x.shape = (2,5)
logger.debug(x[1,2])
logger.debug(x.shape)
x.shape = (1,10)
logger.debug(x[0,5])
logger.debug(x.shape)
x.shape = (10,1)
logger.debug(x[5,0])
logger.debug(x.shape)
x.shape = (2,5)
logger.debug(x[1])
logger.debug(x[:,1])
logger.debug(x[1,:])
#x=np.arange(10)
x.shape=(10,)
logger.debug(x[:5])
logger.debug(x[1:5])
logger.debug(x[1:9:2])
logger.debug(x[:-1])
logger.debug(x[1:-1:3])
logger.debug(x[::3])
logger.debug(x[(2,3,5)])
@unittest.skip("done test_numpyGenfromtxt") # has error on input string format. need further correct
def test_numpyGenfromtxt(self):
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
data = '''
#----------|-------------|-------------|------------------------- \n
# line | I | Q | Radio frame position \n
# | REFM MEAS | REFM MEAS | frame slot slot_position \n
#----------|-------------|-------------|------------------------- \n
1280 | -300 -300 | 240 518 | 0 0 1280 \n
1281 | -300 -300 | -492 -214 | 0 0 1281 \n
1282 | -66 -66 | -1806 -2084 | 0 0 1282 \n
1283 | 342 620 | 666 666 | 0 0 1283 \n
1284 | 168 168 | -360 -82 | 0 0 1284 \n
1285 | -1092 -814 | -1632 -1632 | 0 0 1285 \n
1286 | -726 -448 | -66 -66 | 0 0 1286 \n
1287 | 666 666 | 726 448 | 0 0 1287 \n
1288 | 300 300 | -176 102 | 0 0 1288 \n
'''
data = "1,2,3\n4,5,6"
x = np.genfromtxt(io.BytesIO(data.encode()), delimiter=',')
ndtype = [('line',np.int16),
('I_REFM',np.int16),
('I_MEAS',np.int16),
('Q_REFM',np.int16),
('Q_MEAS',np.int16),
('frame',np.int16),
('slot',np.int16),
('slot_position',np.int16)]
ndtype = [('line',np.int16),
('I',np.int16),
('Q',np.int16),
('frame_slot_position',np.int16) ]
#data = np.genfromtxt('SPECMAN_TEST_P7_HS_EL2_01_1_M01_ANT0.cmp',
data = np.genfromtxt(io.BytesIO(data.encode()),
dtype = ndtype,
comments = '#',
delimiter = '|',
autostrip = True)
logger.debug(data)
#names="BFN, CFN, Slot, SymbolBits,Amp_I, Amp_Q, Power, Ch_power, Type"
##convertfunc = lambda x: float(x.encode("utf-8").strip("%"))/100
#x = np.genfromtxt("SPECMAN_TEST_MBMS_MCCH_MICH_SETUP_1_M01_ANT7_CC20_CCPCH1079_short.bits",
# comments="#",
# delimiter=None,
# names=names,
# skip_header=1)
#logger.debug(x)
#logger.debug(x['BFN'])
#names="BFN, CFN, Slot, SymbolBits,Amp_I, Amp_Q, Power, Ch_power, Type"
#xmask = np.genfromtxt("SPECMAN_TEST_MBMS_MCCH_MICH_SETUP_1_M01_ANT7_CC20_CCPCH1079_short.bits", usemask=True,skip_header=1)
#logger.debug(xmask)
@unittest.skip("done test_numpyIOConvert")
def test_numpyIOConvert(self):
data = "0x7ab , 0xfff\n 0x555,0xefa"
#data = "0x7ab \n 0x555"
ndtype = [ ('UlTpc', np.dtype(('S6', 1)))]
#autostrip = True,
#converters={'UlTpcV': convertHex2Int})
convertfunc = lambda x: int(x,16)
x = np.genfromtxt(io.BytesIO(data.encode()),names=['UlTpcV','UlTpc'], dtype=[np.dtype(('S6', 1)), np.dtype(('S6', 1))],converters = {'UlTpcV': convertfunc,'UlTpc': convertfunc},delimiter=',')
#x = np.genfromtxt(io.BytesIO(data.encode()),names='a', dtype=np.dtype(('S6', 1)), converters = {'a': convertfunc},delimiter=',')
#x = np.genfromtxt(io.BytesIO(data.encode()),dtype=[('a', np.dtype(('S6', 1)))], converters = {'a': convertfunc},delimiter=',')
print(x)
@unittest.skip("done test_numpyIO")
def test_numpyIO(self):
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
data = "1,2,3\n4,5,6"
x = np.genfromtxt(io.BytesIO(data.encode()), delimiter=',')
logger.debug(x)
data = " 1 2 3\n 4 5 67\n890123 4"
x = np.genfromtxt(io.BytesIO(data.encode()), delimiter=3)
logger.debug(x)
data = "12345678912\n 4 7 9\n 4567 9"
x = np.genfromtxt(io.BytesIO(data.encode()), delimiter=(4,3,2))
logger.debug(x)
data = """#
#comments only
1, 10 , 2\n
#skip me
3, 22, 4
"""
x = np.genfromtxt(io.BytesIO(data.encode()), comments="#",delimiter=',')
logger.debug(x)
data = "\n".join(str(i) for i in range(10))
logger.debug(data)
x = np.genfromtxt(io.BytesIO(data.encode()), comments="#",delimiter=',')
logger.debug(x)
x = np.genfromtxt(io.BytesIO(data.encode()), comments="#",delimiter=',', skip_header=2,skip_footer=2)
logger.debug(x)
data = """#
#comments only
1 2 3 4 5 \n
#skip me
10 20 30 40 50 \n
"""
x = np.genfromtxt(io.BytesIO(data.encode()), comments="#",delimiter=None)
logger.debug(x)
data = """#
#comments only
1 2 3 4 5 \n
#skip me
10 20 30 40 50 \n
"""
x = np.genfromtxt(io.BytesIO(data.encode()), comments="#",delimiter=None,names="a,b,c,d,e", usecols=("a,c"))
logger.debug(x)
data = """#
#comments only
1 2 3 4 5 \n
#skip me
10 20 30 40 50 \n
"""
x = np.genfromtxt(io.BytesIO(data.encode()), comments="#",delimiter=None, usecols=(1,3))
logger.debug(x)
data = """#
#comments only
1 2 3 4 5 \n
#skip me
10 20 30 40 50 \n
"""
x = np.genfromtxt(io.BytesIO(data.encode()), comments="#",delimiter=None, usecols=(0,-1))
logger.debug(x)
data = """#
#name from dtypes list of pairs
1013 3.0 512 --\n
"""
x = np.genfromtxt(io.BytesIO(data.encode()), comments="#",delimiter=None,dtype=[("a", np.int16), ("b", np.float32), ("c", np.int16), ("d",str)])
logger.debug(x)
data = """#name from dtypes another form
1013 3.0 51 \n
"""
ndtypes=[('a', np.int16),('b',np.float32),('c',np.int8)]
x = np.genfromtxt(io.BytesIO(data.encode()), comments="#",delimiter=None, dtype=ndtypes )
logger.debug(x['a'])
logger.debug(x)
data = """#override default names from ndtypes
1013 3.0 51 \n
"""
names = ["A","B","C"]
ndtypes=[('a', np.int16),('b',np.float32),('c',np.int8)]
x = np.genfromtxt(io.BytesIO(data.encode()), comments="#",delimiter=None, dtype=ndtypes, names=names)
logger.debug(x['A'])
logger.debug(x)
data = """#names from files
#a b c\n
1013 3.0 512 \n
13 33.0 22 \n
"""
x = np.genfromtxt(io.BytesIO(data.encode()), comments="#",delimiter=None, names=True, skip_header=1)
logger.debug(x['a'])
logger.debug(x)
data = """#nan general
#a b c\n
1, 2.3%, 45.\n6, 78.9%, 0
"""
x = np.genfromtxt(io.BytesIO(data.encode()), comments="#",delimiter=None, names=True, skip_header=1)
logger.debug(x)
#data = """#nan converted case
##a b c\n
#1, 2.3%, 45.\n6, 78.9%, 0
#"""
#convertfunc = lambda x: float(x.encode("utf-8").strip("%"))/100
#x = np.genfromtxt(io.BytesIO(data), comments="#",delimiter=',', names=True, skip_header=1, converters={'b':convertfunc})
#logger.debug(x)
names="BFN, CFN, Slot, SymbolBits,Amp_I, Amp_Q, Power, Ch_power, Type"
#convertfunc = lambda x: float(x.encode("utf-8").strip("%"))/100
x = np.genfromtxt("SPECMAN_TEST_MBMS_MCCH_MICH_SETUP_1_M01_ANT7_CC20_CCPCH1079_short.bits",
comments="#",
delimiter=None,
names=names,
skip_header=1)
logger.debug(x)
logger.debug(x['BFN'])
names="BFN, CFN, Slot, SymbolBits,Amp_I, Amp_Q, Power, Ch_power, Type"
xmask = np.genfromtxt("SPECMAN_TEST_MBMS_MCCH_MICH_SETUP_1_M01_ANT7_CC20_CCPCH1079_short.bits", usemask=True,skip_header=1)
logger.debug(xmask)
names="BFN, CFN, Slot, SymbolBits,Amp_I, Amp_Q, Power, Ch_power, Type"
x = np.genfromtxt("SPECMAN_TEST_MBMS_MCCH_MICH_SETUP_1_M01_ANT7_CC20_CCPCH1079_short.bits",
comments="#",
delimiter=None,
names=names,
missing_values={4:"N/A", 7:"N/A",8:"N/A"},
filling_values={4:0, 7:0, 8:0},
skip_header=1)
logger.debug(x)
logger.debug(x['BFN'])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0:"N/A", 'b':" ", 2:"???"},
filling_values={0:0, 'b':0, 2:-999})
x=np.genfromtxt(io.BytesIO(data.encode()), **kwargs)
logger.debug(x)
@unittest.skip("done test_numpyCreateArray")
def test_numpyCreateArray(self):
import numpy as np
import logging
import logging.config
import yaml
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
x = np.float32(-1.0)
logger.info(x)
#create arrays
#convert from python structures
x = np.array([2,3,4,5])
logger.info(x)
x = np.array((2,3,4,5))
logger.info(x)
x = np.array([[1.0,2.0,3.0],(2,3+2j,4,5)])
logger.info(x)
#intrinsic
x = np.zeros((2,3))
logger.info(x)
x = np.ones((2,3))
logger.info(x)
x = np.linspace(1,10,6)
logger.info(x)
x = np.arange(1,10,2)
logger.info(x)
x = np.indices((4,4))
logger.info(x)
logger.info(x.T)
logger.info(x.data)
logger.info(x.dtype)
logger.info(x.flags)
logger.info(x.flat)
logger.info(x.imag)
logger.info(x.real)
logger.info(x.size)
logger.info(x.itemsize)
logger.info(x.nbytes)
logger.info(x.ndim)
logger.info(x.shape)
logger.info(x.strides)
logger.info(x.ctypes)
logger.info(x.base)
#logger.debug('debug message'
#logger.warn('warn message')
#logger.error('error message')
#logger.critical('critical message')
@unittest.skip("done test_yaml")
def test_yaml(self):
import logging
import logging.config
import yaml
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
document = """
- Mark McGwire
- Sammy Sosa
- Ken Griffey
"""
data = yaml.load(document)
logger.info(data)
document = """
-
Mark McGwire
-
Sammy Sosa
-
Ken Griffey
"""
data = yaml.load(document)
logger.info(data)
document = """
-
time: 20:03:20
player: Sammy Sosa
action: strike (miss)
"""
data = yaml.load(document)
logger.info(data)
document = """
american:
- Boston Red Sox
- Detroit Tigers
- New York Yankees
national:
- New York Mets
- Chicago Cubs
"""
data = yaml.load(document)
logger.info(data['american'])
logger.info(data)
document = """
-
name: Mark McGwire
hr: 65
avg: 0.278
-
name: Sammy Sosa
hr: 63
avg: 0.288
"""
data = yaml.load(document)
logger.info(data)
document = """
- [name , hr, avg ]
- [Mark McGwire, 65, 0.278]
- [Sammy Sosa , 63, 0.288]
"""
data = yaml.load(document)
logger.info(data)
document = """
Mark McGwire: {hr: 65, avg: 0.278}
Sammy Sosa: {
hr: 63,
avg: 0.288
}
"""
data = yaml.load(document)
logger.info(data)
# Comments
document = """
hr: # 1998 hr ranking
- Mark McGwire #hello1
- Sammy Sosa
rbi:
# 1998 rbi ranking
- Sammy Sosa #hello2
- Ken Griffey
#hello3
"""
data = yaml.load(document)
logger.info(data)
# Anchors and aliases
document = """
hr:
- Mark McGwire
# Following node labeled SS
- &SS Sammy Sosa
rbi:
- *SS # Subsequent occurrence
- Ken Griffey
"""
data = yaml.load(document)
logger.info(data)
# Inline nested mapping
document = """
# products purchased
- item : Super Hoop
quantity: 1
- item : Basketball
quantity: 4
- item : Big Shoes
quantity: 1
"""
data = yaml.load(document)
logger.info(data)
document = """
# products purchased
-
item : Super Hoop
quantity: 1
-
item : Basketball
quantity: 4
-
item : Big Shoes
quantity: 1
"""
data = yaml.load(document)
logger.info(data)
# Literal scalars
document = """
#--- | # ASCII art
|
\//||\/||
// || ||__
"""
data = yaml.load(document)
logger.info(data)
document = """
plain:
This unquoted scalar
spans many lines.
quoted: "So does this
quoted scalar.\n"
"""
data = yaml.load(document)
print(data['plain'])
print(data['quoted'])
logger.info(data)
document = """
canonical: 12345
decimal: +12_345
sexagesimal: 3:25:45
octal: 014
hexadecimal: 0xC
"""
data = yaml.load(document)
logger.info(data)
document = """
canonical: 1.23015e+3
exponential: 12.3015e+02
sexagesimal: 20:30.15
fixed: 1_230.15
negative infinity: -.inf
not a number: .NaN
"""
data = yaml.load(document)
logger.info(data)
document = """
null: ~
true: boolean
false: boolean
string: '12345'
"""
data = yaml.load(document)
logger.info(data)
data = yaml.load(open("yamlExample2.yaml",'r'))
logger.info(data)
# Folded scalars
document = """
folded:
>
Mark McGwire's
year was crippled
by a knee injury.
literal:
|
Mark McGwire's
year was crippled
by a knee injury.
"""
data = yaml.load(document)
logger.info(data)
document = """
|
Mark McGwire's
year was crippled
by a knee injury.
"""
data = yaml.load(document)
logger.info('|')
logger.info(data)
# Preserved indented block in a folded scalar
document = """
>
Sammy Sosa completed another
fine season with great stats.
63 Home Runs
0.288 Batting Average
What a year!
"""
data = yaml.load(document)
logger.info(data)
document = """
name: Mark McGwire
accomplishment:
>
Mark set a major league
home run record in 1998.
stats: |
65 Home Runs
0.278 Batting Average
"""
data = yaml.load(document)
logger.info(data)
data = yaml.load(open("yamlExample3.yaml",'r'))
logger.info(data)
# 'application' code
#logger.debug('debug message')
#logger.warn('warn message')
#logger.error('error message')
#logger.critical('critical message')
@unittest.skip("done test_argparse")
def test_argparse(self):
import argparse
parser = argparse.ArgumentParser()
parser.parse_args()
@unittest.skip("done test_loggingMultiProcess")
def test_loggingMultiProcess(self):
import loggingMultiProcess
loggingMultiProcess.main()
@unittest.skip("done test_dealWithHandlersThatBlock")
def test_dealWithHandlersThatBlock(self):
import logging
import logging.handlers
import queue
que = queue.Queue(-1) # no limit on size
queue_handler = logging.handlers.QueueHandler(que)
handler = logging.StreamHandler()
listener = logging.handlers.QueueListener(que, handler)
root = logging.getLogger()
root.addHandler(queue_handler)
formatter = logging.Formatter('%(threadName)s: %(message)s')
handler.setFormatter(formatter)
listener.start()
# The log output will display the thread which generated
# the event (the main thread) rather than the internal
# thread which monitors the internal queue. This is what
# you want to happen.
root.warning('Look out!')
listener.stop()
@unittest.skip("done test_configServer")
def test_configServer(self):
import logging
import logging.config
import time
import os
#read initial config file
logging.config.fileConfig('logging.conf')
#createe and start listener on port 9999
t = logging.config.listen(9999)
t.start()
logger = logging.getLogger('simpleExample')
try:
#loop through logging calls to see the difference
#new configurations make ,until Ctrl+C to terminate
while True:
logger.debug('debug message')
logger.info('info message')
logger.warn('warn message')
logger.error('error message')
logger.critical('critical message')
time.sleep(5)
except KeyboardInterrupt:
#clean up
logging.config.stopListening()
t.join()
@unittest.skip("done test_loggingToMultipleDestination")
def test_loggingToMultipleDestination(self):
import logging
logging.basicConfig(
level = logging.DEBUG,
format = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt = '%m-%d %H:%M',
filename = 'myapp.log',
filemode = 'w')
#create a logger with 'simple_example'
logger = logging.getLogger('simple_example')
logger.setLevel(logging.DEBUG)
#create file handler which logs even debug message
fh = logging.FileHandler('spam.log', mode = 'w')
fh.setLevel(logging.DEBUG)
#create consle handler which logs higher level message
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
#create formatter and add it to the handlers
ff = logging.Formatter('%(asctime)s-%(name)s-%(levelname)s-%(message)s')
cf = logging.Formatter('%(name)s-%(levelname)s-%(message)s')
fh.setFormatter(ff)
ch.setFormatter(cf)
#add handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
#add the console handler to root logger
logging.getLogger('').addHandler(ch)
logger1 = logging.getLogger('myapp.area1')
logger2 = logging.getLogger('myapp.area2')
logging.info('hi jiagui')
logger.info("don't forget me")
logger1.debug('Quick jiagui')
logger1.info('where are you ,jiagui')
logger2.warn('there is a hole in the minddle of road')
logger2.error('oh, god, jiagui fall in to the river')
@unittest.skip("done test_multipleHandlerAndFormatters")
def test_multipleHandlerAndFormatters(self):
import logging
#create a logger with 'simple_example'
logger = logging.getLogger('simple_example')
logger.setLevel(logging.DEBUG)
#create file handler which logs even debug message
fh = logging.FileHandler('spam.log', mode = 'w')
fh.setLevel(logging.DEBUG)
#create consle handler which logs higher level message
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
#create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s-%(name)s-%(levelname)s-%(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
#add handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
#application code
logger.debug('debug message')
logger.info('info message')
logger.warn('warn message')
logger.error('error message')
logger.critical('critical message')
@unittest.skip("done test_usingLoggingInMultipleModule")
def test_usingLoggingInMultipleModule(self):
import logging
import auxiliary
#create a logger with 'spam_application'
logger = logging.getLogger('spam_application')
logger.setLevel(logging.DEBUG)
#create file handler which logs even debug message
fh = logging.FileHandler('spam.log', mode = 'w')
fh.setLevel(logging.DEBUG)
#create consle handler which logs higher level message
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
#create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s-%(name)s-%(levelname)s-%(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
#add handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
logger.info('creating an instance of auxiliary.Auxiliary')
a = auxiliary.Auxiliary()
logger.info('created an instance of auxiliary.Auxiliary')
logger.info('calling auxiliary.Auxiliary.do_something()')
a.do_something()
logger.info('finished auxiliary.Auxiliary.do_something()')
logger.info('calling auxiliary.some_function()')
auxiliary.some_function()
logger.info('finished auxiliary.some_function()')
@unittest.skip("done test_loggingDict")
def test_loggingDict(self):
import logging
import logging.config
import yaml
logging.config.dictConfig(yaml.load(open('logging.dict','r')))
logger = logging.getLogger('simpleExample')
# 'application' code
logger.debug('debug message')
logger.info('info message')
logger.warn('warn message')
logger.error('error message')
logger.critical('critical message')
@unittest.skip("done test_loggingConfig")
def test_loggingConfig(self):
import logging
import logging.config
logging.config.fileConfig('logging.conf')
logger = logging.getLogger('simpleExample')
# 'application' code
logger.debug('debug message')
logger.info('info message')
logger.warn('warn message')
logger.error('error message')
logger.critical('critical message')
@unittest.skip("done test_loggingAdvance")
def test_loggingAdvance(self):
import logging
# create logger
logger = logging.getLogger('simple_example')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
# 'application' code
logger.debug('debug message')
logger.info('info message')
logger.warn('warn message')
logger.error('error message')
logger.critical('critical message')
@unittest.skip("done test_loggingBasic")
def test_loggingBasic(self):
import logging
logging.basicConfig( \
filename = 'example.log', \
format = ('%(asctime)s:'
'%(levelname)s:'
'%(message)s'),
datefmt = '%m/%d%Y/%I:%M:%S %p\t',\
level = logging.DEBUG, \
filemode = 'w')
logging.debug('This message should go to the log file')
logging.info('logging info in exmple.log file')
logging.warning('and logging warning in exmple.log file too')
@unittest.skip("TBD BinaryTestCase")
def test_binaryDataService(self):
import sys
print(sys.byteorder)
@unittest.skip("done test_reAdvance")
def test_reAdvance(self):
import re
#/^(?:[^f]+|f(?!oo))*$/; # Matches strings not containing 'foo'.
#/^(?:(?!PATTERN).)*$/; # Matches strings not containing PATTERN
str1="SPECMAN_TEST_W10_HS_MIN_BIT_RATE_08_1_M01_ANT0_REF_CC20_CPICH0.bits"
str2="SPECMAN_TEST_W10_HS_MIN_BIT_RATE_08_1_M01_ANT0_CC20_DPCH59.bits"
p = re.compile(r'(^(?:(?!REF).)*$)', re.IGNORECASE)
m2 = p.search(str1)
print("(m2):",m2)
p3 = re.compile(r'(SPECMAN).*(?!REF).*\.bits')
m3 = p3.search(str1)
print("(m3):",m3)
p4 = re.compile(r'SPECMAN.*(?=REF).*\.bits')
m4 = p4.search(str1)
print("(m4):",m4)
p5 = re.compile(r'(?<=REF).*\.bits')
m5 = p5.search(str1)
print("(m5):",m5)
p6 = re.compile(r'(?<!REF).*')
m6 = p6.search(str1)
print("(m6):",m6)
@unittest.skip("done test_re")
def test_re(self):
import re
p = re.compile(r'(sjg)', re.IGNORECASE)
print("p:",type(p))
m1 = p.match("sjg hello sjg")
print("(m1):",type(m1))
m2 = p.match("hello sjg")
print("(m2):",type(m2))
s = p.search("sjg hello sjg")
print("(s):",type(s))
fa = p.findall("sjg hello sjg")
print("(fa):",type(fa))
fi = p.finditer("sjg hello sjg")
print("(fi):",type(fi))
p = re.compile(r'[0-9]+')
m3 = p.match('te0mp2osjg9')
if m3:
print("(m3):",type(m3))
print("m3.span()",m3.span())
print("m3.group()",m3.group())
m4 = p.match('0temp2osjg9')
if m4:
print("(m4):",type(m4))
print("m4.span()",m4.span())
print("m4.group()",m4.group())
m5 = p.search('temp2osjg9')
if m5:
print("(m5):",type(m5))
print("m5.span()",m5.span())
print("m5.group()",m5.group())
l2 = p.findall('temp2osjg9')
if l2:
print("(l2):",type(l2))
i2 = p.finditer('temp2osjg9')
if i2:
print("(i2):",type(i2))
for match in i2:
print(match.group(), match.span())
charref = re.compile(r"""
&[#] # Start of a numeric entity reference
(
0[0-7]+ # Octal form
| [0-9]+ # Decimal form
| x[0-9a-fA-F]+ # Hexadecimal form
)
; # Trailing semicolon
""", re.VERBOSE)
p = re.compile('(a(b)c)d')
m6 = p.match('abcd')
print("m6 groups(): ", m6.groups(), "group(0):", m6.group(0), "group(1): ", m6.group(1), "group(2): ", m6.group(2))
@unittest.skip("done test_stringLib")
def test_stringLib(self):
import string
print(string.ascii_letters)
print(string.ascii_lowercase)
print(string.ascii_uppercase)
print(string.digits)
print(string.hexdigits)
print(string.octdigits)
print(string.punctuation)
print(string.printable)
print(string.whitespace)
#keyArg = [father='shen yonghseng', mother = 'xiezhongyu', brother = 'xujun']
dicArg = {'father':'shen yongsheng', 'mother':'xiezhongyu', 'brother':'xujun'}
print("father:{father},mother:{mother},brother:{brother}".format(**dicArg))
# code...
print("hello {0}, my question is :{1}. \
answer is :my wife's name is {name}".format(\
"jiagui",\
"what's your wife's name",\
name="qiuli"))
c = 3+4j
print("complex {0}, it is real part is {0.real},it is imaginary part is {0.imag}".format(c))
Arr = [0,1,2]
print("array is {0},it is first element is {0[0]},it is second element is {0[1]}, it is last element is {0[2]}".format(Arr))
print("{:<50}".format("right alignment"))
print("{:>50}".format("left alignment"))
print("{:^50}".format("center alignment"))
print("{:*^50}".format("center alignment and fill two side with *"))
print("{:f} {:+f} {: f} {:+f} {: f} {:-f}".format(3.14, 3.14, 3.14, -3.14, -3.14, -3.14))
print("{:x} {:x}".format(314, -314))
print("dec:{0:d} hex:{0:x} oct:{0:o} bin:{0:b}".format(123))
print("dec:{0:d} hex:{0:#x} oct:{0:#o} bin:{0:#b}".format(123))
print("repr() shows quotes: {0!r}; str() doesn't: {1!s}, ascii() show:{2!a}".format('test1', 'test2','test3'))
print("{0:,}".format(1234566))
print("{0:,.2%}".format(100000/3))
@unittest.skip("done test_basic1")
def test_basic1(self):
##################################################
def hello(data):
for index in range(0,len(data)):
yield data[index]
for char in hello('jiagui'):
print(char)
##################################################
xvec = [10, 20, 30]
yvec = [7, 5, 6]
for x,y in zip(xvec, yvec):
print(x,y)
##################################################
import os
print(os.getcwd())
os.system('cd ..')
print(os.getcwd())
#################################################
year = 1901
month =12
day =31
hour =5
minute = 13
second =22
if 1900 < year < 2100 and 1 <= month <= 12 \
and 1 <= day <= 31 and 0 <= hour < 24 \
and 0 <= minute < 60 and 0 <= second < 60: # Looks like a valid date
print(year, month, day, hour, minute, second)
#################################################
import re
print("hello"
"jiagui")
sjg = "sjg"
print(id(sjg))
print(type(sjg))
sjg2 = (1,2,3)
print(type(sjg2))
print((sjg2))
print(id(sjg2))
sjg3 = [1,2,3]
print(type(sjg3))
print((sjg3))
print(id(sjg3))
sjg4 = {1,2,3}
print(type(sjg4))
print((sjg4))
print(id(sjg4))
sjg5 = {'a':1,'b':2,'c':3}
print(type(sjg5))
print((sjg5))
#print(id(sjg5))
@unittest.skip("done test_basic2")
def test_basic2(self):
##################################################
L = [1, 2, 3, 4]
print(L)
L.append(5)
print(L)
list2 = [10, 11, 12]
print(list2)
L.extend(list2)
print(L)
L.insert(0,0)
print(L)
L.insert(6,6)
print(L)
L.remove(10)
print(L)
L2 = L.pop(len(L)-2)
print(L2)
ind = L.index(5)
print(ind)
L.insert(5,5)
print(L)
cnt = L.count(5)
print(cnt)
L.reverse()
print(L)
L.sort()
print(L)
##################################################
stack = [3, 4, 5]
stack.append(6)
stack.append(7)
print(stack)
##################################################
from collections import deque
queue = deque(["jiagui", "qiuli", "baba", "Mama"])
queue.append("gege")
queue.append("jiejie")
print(queue)
person = queue.popleft()
print(person)
##################################################
squares = []
for x in range(10):
squares.append(x**2)
print(squares)
squares2 = [x**2 for x in range(10)]
print(squares2)
##################################################
vec = [[1,2,3], [4,5,6], [7,8,9]]
L3 = [num for elem in vec for num in elem]
print(L3)
del L3
##################################################
basket = {'apple', 'orange', 'pear', 'apple'}
print(basket)
print('apple' in basket)
##################################################
a = set('abracadabra')
print('a ', a)
b = set('alasbcdm')
print('b ', b)
c=a-b
print('a-b ',c)
c=a|b
print('a|b ',c)
c=a&b
print('a&b ',c)
c=a^b
print('a^b ',c)
##################################################
tel = {'jack':4909, 'sam':3323, 'Mick':222}
print(tel)
print(tel['jack'])
del tel['jack']
print(tel)
print('sam' in tel)
print('jack' in tel)
tel_l = list(tel.keys())
print('tel.keys()',tel.keys())
print(tel_l)
knights = {'gallahad': 'the pure', 'robin': 'the brave'}
for k, v in knights.items():
print(k, v)
for i, v in enumerate(['tic', 'tac', 'toe']):
print(i,v)
##################################################
import os
abs_path = os.path.abspath(os.system("pwd"))
print(abs_path)
print(type(abs_path))
##################################################
def scope_test():
def do_local():
spam = "local spam"
#def do_nonlocal():
# nonlocal spam
# spam = "nonlocal spam"
def do_global():
global spam
spam = "global spam"
spam = "test spam"
do_local()
print("After local assignment:", spam)
#do_nonlocal()
#print("After nonlocal assignment:", spam)
do_global()
print("After global assignment:", spam)
scope_test()
print("In global scope:", spam)
@unittest.skip("done test_basic3")
def test_basic3(self):
##################################################
the_world_is_flat=1
if the_world_is_flat:
print("be careful not fall off!")
##################################################
a,b=0,1
while b<10:
print('b is:',b,)
a,b=b,a+b
##################################################
#x=int(input("please enter an integer: "))
#if x<0:
# x=0
# print('Negative changed to 0')
#elif x==0:
# print('zero')
#elif x==1:
# print('one')
#else:
# print('more')
##################################################
a=['cat','dog','desktop']
for x in a[:]:
print(x, len(x))
if len(x)>6 :
a.insert(0,x)
print(a)
##################################################
for i in range(10,1,-1):
print(i)
##################################################
a=list(range(1,10))
print(a)
for i in a[:]:
print(i)
##################################################
def ask_ok(prompt, retries=4, complaint='yes or not, please'):
"""Ask ok
yes, ye, y--> yes
nope,nop,no,n --> no
"""
while True:
ok=input(prompt)
if ok in ('y','ye','yes'):
return True
elif ok in ('n','no','nop','nope'):
return False
retries=retries-1
if retries<0:
raise IOError('refusenik user')
print(complaint)
print(ask_ok.__doc__)
##################################################
def my_function():
"""Do nothing, but doc
No,really ,it dose not do anything.
"""
pass
print(my_function.__doc__)
@unittest.skip("done test_MyComplex")
def test_MyComplex(self):
values = {'a': 'sideEffectA', 'b': 'sideEffectB', 'c': 'sideEffectC'}
def side_effect(arg):
return values[arg]
MyComplex.foo = MagicMock(return_value = 1234)
MyComplex.foo.side_effect = side_effect
myC = MyComplex(1,2)
print(myC.foo1("overide parent Complex"))
print(myC.foo('a'))
@unittest.skip("done test_shuffleMock")
def test_shuffleMock(self):
# make sure the shuffled sequence does not lose any elements
random.shuffle = MagicMock(return_value=3)
print(random.shuffle(self.seq))
@unittest.skip("done test_shuffle")
def test_shuffle(self):
# make sure the shuffled sequence does not lose any elements
random.shuffle(self.seq)
self.seq.sort()
self.assertEqual(self.seq, list(range(10)))
# should raise an exception for an immutable sequence
self.assertRaises(TypeError, random.shuffle, (1,2,3))
@unittest.skip("done test_choice")
def test_choice(self):
element = random.choice(self.seq)
self.assertTrue(element in self.seq)
@unittest.skip("done test_sample")
def test_sample(self):
with self.assertRaises(ValueError):
random.sample(self.seq, 20)
for element in random.sample(self.seq, 5):
self.assertTrue(element in self.seq)
@unittest.skip("demonstrating skipping")
def test_nothing(self):
self.fail("shouldn't happen")
@unittest.skipIf(__version__ < 3.0,
"not supported in this library version")
def test_format(self):
# Tests that work for only a certain version of the library.
pass
@unittest.skip("done test_fail")
@unittest.expectedFailure
def test_fail(self):
self.assertEqual(1, 0, "broken")
@unittest.skip("done hello test")
def test_hello(self):
hello.a = 'aa' #aaa
hello.b = 'ba' #bbb
print(hello.__doc__)
hello.__name__ = "helloJiaugi"
print(hello.__name__)
print(hello.__module__)
print(hello.__defaults__)
print(hello.__code__)
print(hello.__globals__)
print(hello.__dict__)
print(hello.__closure__)
print(hello.__annotations__)
print(hello.__kwdefaults__)
if __name__ == '__main__':
#unittest.main()
#unittest.TextTestRunner(verbosity=2).run(unittest.TestLoader().loadTestsFromTestCase(HelloPy))
suite = unittest.TestLoader().loadTestsFromTestCase(HelloPy)
unittest.TextTestRunner(verbosity=2).run(suite)
| {
"content_hash": "c203f473a1a9c0c252ad98f3731501b1",
"timestamp": "",
"source": "github",
"line_count": 1754,
"max_line_length": 199,
"avg_line_length": 30.14994298745724,
"alnum_prop": 0.5302460147873608,
"repo_name": "shennjia/weblte",
"id": "c6dbcdb0efcd0b11b15f845229f09ab2ae2ba73a",
"size": "52905",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test_xlsxwriter/test_py.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "911"
},
{
"name": "Python",
"bytes": "239591"
}
],
"symlink_target": ""
} |
from io import BytesIO
import PIL.Image
from django.core.files.images import ImageFile
from wagtail.images import get_image_model
Image = get_image_model()
def get_test_image_file(filename='test.png', colour='white', size=(640, 480)):
f = BytesIO()
image = PIL.Image.new('RGBA', size, colour)
image.save(f, 'PNG')
return ImageFile(f, name=filename)
def get_test_image_file_jpeg(filename='test.jpg', colour='white', size=(640, 480)):
f = BytesIO()
image = PIL.Image.new('RGB', size, colour)
image.save(f, 'JPEG')
return ImageFile(f, name=filename)
def get_test_image_file_webp(filename='test.webp', colour='white', size=(640, 480)):
f = BytesIO()
image = PIL.Image.new('RGB', size, colour)
image.save(f, 'WEBP')
return ImageFile(f, name=filename)
| {
"content_hash": "7c1d6499a60f679350d7633c7defeddb",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 84,
"avg_line_length": 27.689655172413794,
"alnum_prop": 0.6699875466998755,
"repo_name": "timorieber/wagtail",
"id": "2c48b1266d117e06d1113f3bce30c6c91760be04",
"size": "803",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "wagtail/images/tests/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "185324"
},
{
"name": "Dockerfile",
"bytes": "703"
},
{
"name": "HTML",
"bytes": "383475"
},
{
"name": "JavaScript",
"bytes": "267615"
},
{
"name": "Makefile",
"bytes": "992"
},
{
"name": "Python",
"bytes": "3711005"
},
{
"name": "Shell",
"bytes": "8867"
}
],
"symlink_target": ""
} |
"""
This module started out as largely a copy paste from the stdlib's
optparse module with the features removed that we do not need from
optparse because we implement them in Click on a higher level (for
instance type handling, help formatting and a lot more).
The plan is to remove more and more from here over time.
The reason this is a different module and not optparse from the stdlib
is that there are differences in 2.x and 3.x about the error messages
generated and optparse in the stdlib uses gettext for no good reason
and might cause us issues.
Click uses parts of optparse written by Gregory P. Ward and maintained
by the Python Software Foundation. This is limited to code in parser.py.
Copyright 2001-2006 Gregory P. Ward. All rights reserved.
Copyright 2002-2006 Python Software Foundation. All rights reserved.
"""
# This code uses parts of optparse written by Gregory P. Ward and
# maintained by the Python Software Foundation.
# Copyright 2001-2006 Gregory P. Ward
# Copyright 2002-2006 Python Software Foundation
from collections import deque
from .exceptions import BadArgumentUsage
from .exceptions import BadOptionUsage
from .exceptions import NoSuchOption
from .exceptions import UsageError
# Sentinel value that indicates an option was passed as a flag without a
# value but is not a flag option. Option.consume_value uses this to
# prompt or use the flag_value.
_flag_needs_value = object()
def _unpack_args(args, nargs_spec):
"""Given an iterable of arguments and an iterable of nargs specifications,
it returns a tuple with all the unpacked arguments at the first index
and all remaining arguments as the second.
The nargs specification is the number of arguments that should be consumed
or `-1` to indicate that this position should eat up all the remainders.
Missing items are filled with `None`.
"""
args = deque(args)
nargs_spec = deque(nargs_spec)
rv = []
spos = None
def _fetch(c):
try:
if spos is None:
return c.popleft()
else:
return c.pop()
except IndexError:
return None
while nargs_spec:
nargs = _fetch(nargs_spec)
if nargs == 1:
rv.append(_fetch(args))
elif nargs > 1:
x = [_fetch(args) for _ in range(nargs)]
# If we're reversed, we're pulling in the arguments in reverse,
# so we need to turn them around.
if spos is not None:
x.reverse()
rv.append(tuple(x))
elif nargs < 0:
if spos is not None:
raise TypeError("Cannot have two nargs < 0")
spos = len(rv)
rv.append(None)
# spos is the position of the wildcard (star). If it's not `None`,
# we fill it with the remainder.
if spos is not None:
rv[spos] = tuple(args)
args = []
rv[spos + 1 :] = reversed(rv[spos + 1 :])
return tuple(rv), list(args)
def split_opt(opt):
first = opt[:1]
if first.isalnum():
return "", opt
if opt[1:2] == first:
return opt[:2], opt[2:]
return first, opt[1:]
def normalize_opt(opt, ctx):
if ctx is None or ctx.token_normalize_func is None:
return opt
prefix, opt = split_opt(opt)
return f"{prefix}{ctx.token_normalize_func(opt)}"
def split_arg_string(string):
"""Split an argument string as with :func:`shlex.split`, but don't
fail if the string is incomplete. Ignores a missing closing quote or
incomplete escape sequence and uses the partial token as-is.
.. code-block:: python
split_arg_string("example 'my file")
["example", "my file"]
split_arg_string("example my\\")
["example", "my"]
:param string: String to split.
"""
import shlex
lex = shlex.shlex(string, posix=True)
lex.whitespace_split = True
lex.commenters = ""
out = []
try:
for token in lex:
out.append(token)
except ValueError:
# Raised when end-of-string is reached in an invalid state. Use
# the partial token as-is. The quote or escape character is in
# lex.state, not lex.token.
out.append(lex.token)
return out
class Option:
def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None):
self._short_opts = []
self._long_opts = []
self.prefixes = set()
for opt in opts:
prefix, value = split_opt(opt)
if not prefix:
raise ValueError(f"Invalid start character for option ({opt})")
self.prefixes.add(prefix[0])
if len(prefix) == 1 and len(value) == 1:
self._short_opts.append(opt)
else:
self._long_opts.append(opt)
self.prefixes.add(prefix)
if action is None:
action = "store"
self.dest = dest
self.action = action
self.nargs = nargs
self.const = const
self.obj = obj
@property
def takes_value(self):
return self.action in ("store", "append")
def process(self, value, state):
if self.action == "store":
state.opts[self.dest] = value
elif self.action == "store_const":
state.opts[self.dest] = self.const
elif self.action == "append":
state.opts.setdefault(self.dest, []).append(value)
elif self.action == "append_const":
state.opts.setdefault(self.dest, []).append(self.const)
elif self.action == "count":
state.opts[self.dest] = state.opts.get(self.dest, 0) + 1
else:
raise ValueError(f"unknown action '{self.action}'")
state.order.append(self.obj)
class Argument:
def __init__(self, dest, nargs=1, obj=None):
self.dest = dest
self.nargs = nargs
self.obj = obj
def process(self, value, state):
if self.nargs > 1:
holes = sum(1 for x in value if x is None)
if holes == len(value):
value = None
elif holes != 0:
raise BadArgumentUsage(
f"argument {self.dest} takes {self.nargs} values"
)
if self.nargs == -1 and self.obj.envvar is not None:
value = None
state.opts[self.dest] = value
state.order.append(self.obj)
class ParsingState:
def __init__(self, rargs):
self.opts = {}
self.largs = []
self.rargs = rargs
self.order = []
class OptionParser:
"""The option parser is an internal class that is ultimately used to
parse options and arguments. It's modelled after optparse and brings
a similar but vastly simplified API. It should generally not be used
directly as the high level Click classes wrap it for you.
It's not nearly as extensible as optparse or argparse as it does not
implement features that are implemented on a higher level (such as
types or defaults).
:param ctx: optionally the :class:`~click.Context` where this parser
should go with.
"""
def __init__(self, ctx=None):
#: The :class:`~click.Context` for this parser. This might be
#: `None` for some advanced use cases.
self.ctx = ctx
#: This controls how the parser deals with interspersed arguments.
#: If this is set to `False`, the parser will stop on the first
#: non-option. Click uses this to implement nested subcommands
#: safely.
self.allow_interspersed_args = True
#: This tells the parser how to deal with unknown options. By
#: default it will error out (which is sensible), but there is a
#: second mode where it will ignore it and continue processing
#: after shifting all the unknown options into the resulting args.
self.ignore_unknown_options = False
if ctx is not None:
self.allow_interspersed_args = ctx.allow_interspersed_args
self.ignore_unknown_options = ctx.ignore_unknown_options
self._short_opt = {}
self._long_opt = {}
self._opt_prefixes = {"-", "--"}
self._args = []
def add_option(self, opts, dest, action=None, nargs=1, const=None, obj=None):
"""Adds a new option named `dest` to the parser. The destination
is not inferred (unlike with optparse) and needs to be explicitly
provided. Action can be any of ``store``, ``store_const``,
``append``, ``appnd_const`` or ``count``.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
if obj is None:
obj = dest
opts = [normalize_opt(opt, self.ctx) for opt in opts]
option = Option(opts, dest, action=action, nargs=nargs, const=const, obj=obj)
self._opt_prefixes.update(option.prefixes)
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
def add_argument(self, dest, nargs=1, obj=None):
"""Adds a positional argument named `dest` to the parser.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
if obj is None:
obj = dest
self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
def parse_args(self, args):
"""Parses positional arguments and returns ``(values, args, order)``
for the parsed options and arguments as well as the leftover
arguments if there are any. The order is a list of objects as they
appear on the command line. If arguments appear multiple times they
will be memorized multiple times as well.
"""
state = ParsingState(args)
try:
self._process_args_for_options(state)
self._process_args_for_args(state)
except UsageError:
if self.ctx is None or not self.ctx.resilient_parsing:
raise
return state.opts, state.largs, state.order
def _process_args_for_args(self, state):
pargs, args = _unpack_args(
state.largs + state.rargs, [x.nargs for x in self._args]
)
for idx, arg in enumerate(self._args):
arg.process(pargs[idx], state)
state.largs = args
state.rargs = []
def _process_args_for_options(self, state):
while state.rargs:
arg = state.rargs.pop(0)
arglen = len(arg)
# Double dashes always handled explicitly regardless of what
# prefixes are valid.
if arg == "--":
return
elif arg[:1] in self._opt_prefixes and arglen > 1:
self._process_opts(arg, state)
elif self.allow_interspersed_args:
state.largs.append(arg)
else:
state.rargs.insert(0, arg)
return
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt, explicit_value, state):
if opt not in self._long_opt:
from difflib import get_close_matches
possibilities = get_close_matches(opt, self._long_opt)
raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx)
option = self._long_opt[opt]
if option.takes_value:
# At this point it's safe to modify rargs by injecting the
# explicit value, because no exception is raised in this
# branch. This means that the inserted value will be fully
# consumed.
if explicit_value is not None:
state.rargs.insert(0, explicit_value)
value = self._get_value_from_state(opt, option, state)
elif explicit_value is not None:
raise BadOptionUsage(opt, f"{opt} option does not take a value")
else:
value = None
option.process(value, state)
def _match_short_opt(self, arg, state):
stop = False
i = 1
prefix = arg[0]
unknown_options = []
for ch in arg[1:]:
opt = normalize_opt(f"{prefix}{ch}", self.ctx)
option = self._short_opt.get(opt)
i += 1
if not option:
if self.ignore_unknown_options:
unknown_options.append(ch)
continue
raise NoSuchOption(opt, ctx=self.ctx)
if option.takes_value:
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
state.rargs.insert(0, arg[i:])
stop = True
value = self._get_value_from_state(opt, option, state)
else:
value = None
option.process(value, state)
if stop:
break
# If we got any unknown options we re-combinate the string of the
# remaining options and re-attach the prefix, then report that
# to the state as new larg. This way there is basic combinatorics
# that can be achieved while still ignoring unknown arguments.
if self.ignore_unknown_options and unknown_options:
state.largs.append(f"{prefix}{''.join(unknown_options)}")
def _get_value_from_state(self, option_name, option, state):
nargs = option.nargs
if len(state.rargs) < nargs:
if option.obj._flag_needs_value:
# Option allows omitting the value.
value = _flag_needs_value
else:
n_str = "an argument" if nargs == 1 else f"{nargs} arguments"
raise BadOptionUsage(
option_name, f"{option_name} option requires {n_str}."
)
elif nargs == 1:
next_rarg = state.rargs[0]
if (
option.obj._flag_needs_value
and isinstance(next_rarg, str)
and next_rarg[:1] in self._opt_prefixes
and len(next_rarg) > 1
):
# The next arg looks like the start of an option, don't
# use it as the value if omitting the value is allowed.
value = _flag_needs_value
else:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
return value
def _process_opts(self, arg, state):
explicit_value = None
# Long option handling happens in two parts. The first part is
# supporting explicitly attached values. In any case, we will try
# to long match the option first.
if "=" in arg:
long_opt, explicit_value = arg.split("=", 1)
else:
long_opt = arg
norm_long_opt = normalize_opt(long_opt, self.ctx)
# At this point we will match the (assumed) long option through
# the long option matching code. Note that this allows options
# like "-foo" to be matched as long options.
try:
self._match_long_opt(norm_long_opt, explicit_value, state)
except NoSuchOption:
# At this point the long option matching failed, and we need
# to try with short options. However there is a special rule
# which says, that if we have a two character options prefix
# (applies to "--foo" for instance), we do not dispatch to the
# short option code and will instead raise the no option
# error.
if arg[:2] not in self._opt_prefixes:
return self._match_short_opt(arg, state)
if not self.ignore_unknown_options:
raise
state.largs.append(arg)
| {
"content_hash": "8ad68f17882eff5fd54041b7c3b84ee8",
"timestamp": "",
"source": "github",
"line_count": 468,
"max_line_length": 85,
"avg_line_length": 36.05769230769231,
"alnum_prop": 0.5839407407407408,
"repo_name": "mitsuhiko/click",
"id": "d730e010669445e9a5c0a58a2e13116391d32787",
"size": "16875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/click/parser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "Makefile",
"bytes": "252"
},
{
"name": "Python",
"bytes": "306395"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = 'libcloudforensics'
copyright = '2020, Google'
author = 'Google'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'recommonmark',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints',
]
# set_type_checking_flag = True
master_doc = 'index'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
}
from recommonmark.transform import AutoStructify
github_doc_root = 'https://github.com/google/libcloudforensics/tree/master/doc/'
def setup(app):
app.add_config_value('recommonmark_config', {
'enable_auto_doc_ref': False,
}, True)
app.add_transform(AutoStructify)
| {
"content_hash": "2f6f9439595380923781d95700af69ac",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 80,
"avg_line_length": 28.78125,
"alnum_prop": 0.6536373507057546,
"repo_name": "google/cloud-forensics-utils",
"id": "66c99eaaa208ba5b2535baf97f47a2a62c7e5df2",
"size": "2395",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "843359"
},
{
"name": "Shell",
"bytes": "3622"
}
],
"symlink_target": ""
} |
from nhl_schedule_translate import app
if __name__ == '__main__':
app.debug = True
app.run() | {
"content_hash": "3dec71ba10e883a1bc653a61ba452db1",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 38,
"avg_line_length": 20.2,
"alnum_prop": 0.6039603960396039,
"repo_name": "glebb/Nhl-Schedule-translate-2013-2014",
"id": "733b2004afdbb97537ee723c3f86255777b98adc",
"size": "101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15895"
},
{
"name": "JavaScript",
"bytes": "2272"
},
{
"name": "Python",
"bytes": "7793"
}
],
"symlink_target": ""
} |
from telemetry.page import page as page_module
from telemetry import story
class ToughCanvasCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(ToughCanvasCasesPage, self).__init__(url=url, page_set=page_set)
self.archive_data_file = 'data/tough_canvas_cases.json'
def RunNavigateSteps(self, action_runner):
super(ToughCanvasCasesPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
"document.readyState == 'complete'")
def RunPageInteractions(self, action_runner):
with action_runner.CreateInteraction('CanvasAnimation'):
action_runner.Wait(5)
class MicrosofFirefliesPage(ToughCanvasCasesPage):
def __init__(self, page_set):
super(MicrosofFirefliesPage, self).__init__(
# pylint: disable=line-too-long
url='http://ie.microsoft.com/testdrive/Performance/Fireflies/Default.html',
page_set=page_set)
class ToughCanvasCasesPageSet(story.StorySet):
"""
Description: Self-driven Canvas2D animation examples
"""
def __init__(self):
super(ToughCanvasCasesPageSet, self).__init__(
archive_data_file='data/tough_canvas_cases.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
# Crashes on Galaxy Nexus. crbug.com/314131
# self.AddStory(MicrosofFirefliesPage(self))
# Failing on Nexus 5 (http://crbug.com/364248):
# 'http://geoapis.appspot.com/agdnZW9hcGlzchMLEgtFeGFtcGxlQ29kZRjh1wIM',
urls_list = [
'http://runway.countlessprojects.com/prototype/performance_test.html',
# pylint: disable=line-too-long
'http://ie.microsoft.com/testdrive/Performance/FishIETank/Default.html',
'http://ie.microsoft.com/testdrive/Performance/SpeedReading/Default.html',
'http://www.kevs3d.co.uk/dev/canvask3d/k3d_test.html',
'http://www.megidish.net/awjs/',
'http://themaninblue.com/experiment/AnimationBenchmark/canvas/',
'http://mix10k.visitmix.com/Entry/Details/169',
'http://www.craftymind.com/factory/guimark2/HTML5ChartingTest.html',
'http://www.chiptune.com/starfield/starfield.html',
'http://jarrodoverson.com/static/demos/particleSystem/',
'http://www.effectgames.com/demos/canvascycle/',
'http://spielzeugz.de/html5/liquid-particles.html',
'http://hakim.se/experiments/html5/magnetic/02/',
'http://ie.microsoft.com/testdrive/Performance/LetItSnow/',
'http://ie.microsoft.com/testdrive/Graphics/WorkerFountains/Default.html',
'http://ie.microsoft.com/testdrive/Graphics/TweetMap/Default.html',
'http://ie.microsoft.com/testdrive/Graphics/VideoCity/Default.html',
'http://ie.microsoft.com/testdrive/Performance/AsteroidBelt/Default.html',
'http://www.smashcat.org/av/canvas_test/',
# pylint: disable=line-too-long
'file://tough_canvas_cases/canvas2d_balls_common/bouncing_balls.html?ball=image_with_shadow&back=image',
# pylint: disable=line-too-long
'file://tough_canvas_cases/canvas2d_balls_common/bouncing_balls.html?ball=text&back=white&ball_count=15',
'file://tough_canvas_cases/canvas-font-cycler.html',
'file://tough_canvas_cases/canvas-animation-no-clear.html',
'file://tough_canvas_cases/canvas_toBlob.html',
'file://../../../chrome/test/data/perf/canvas_bench/many_images.html',
'file://tough_canvas_cases/rendering_throughput/canvas_arcs.html',
'file://tough_canvas_cases/rendering_throughput/canvas_lines.html',
'file://tough_canvas_cases/rendering_throughput/put_get_image_data.html',
'file://tough_canvas_cases/rendering_throughput/fill_shapes.html',
'file://tough_canvas_cases/rendering_throughput/stroke_shapes.html',
'file://tough_canvas_cases/rendering_throughput/bouncing_clipped_rectangles.html',
'file://tough_canvas_cases/rendering_throughput/bouncing_gradient_circles.html',
'file://tough_canvas_cases/rendering_throughput/bouncing_svg_images.html',
'file://tough_canvas_cases/rendering_throughput/bouncing_png_images.html'
]
for url in urls_list:
self.AddStory(ToughCanvasCasesPage(url, self))
| {
"content_hash": "104f88999fd2004ead334e25e1243326",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 111,
"avg_line_length": 46.68181818181818,
"alnum_prop": 0.7156767283349562,
"repo_name": "Samsung/ChromiumGStreamerBackend",
"id": "28ce0bb8158ccb82cf0d4b72cd7513612d09b51a",
"size": "4270",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tools/perf/page_sets/tough_canvas_cases.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from core.himesis import Himesis
import uuid
class HState2CProcDef(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule State2CProcDef.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HState2CProcDef, self).__init__(name='HState2CProcDef', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """State2CProcDef"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'State2CProcDef')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["attr1"] = """State2CProcDef"""
# match class State() node
self.add_node()
self.vs[3]["mm__"] = """State"""
self.vs[3]["attr1"] = """+"""
# match class Transition() node
self.add_node()
self.vs[4]["mm__"] = """Transition"""
self.vs[4]["attr1"] = """1"""
# match class EntryPoint() node
self.add_node()
self.vs[5]["mm__"] = """EntryPoint"""
self.vs[5]["attr1"] = """1"""
# match class StateMachine() node
self.add_node()
self.vs[6]["mm__"] = """StateMachine"""
self.vs[6]["attr1"] = """1"""
# apply class LocalDef() node
self.add_node()
self.vs[7]["mm__"] = """LocalDef"""
self.vs[7]["attr1"] = """1"""
# apply class ProcDef() node
self.add_node()
self.vs[8]["mm__"] = """ProcDef"""
self.vs[8]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[9]["mm__"] = """Name"""
self.vs[9]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[10]["mm__"] = """Name"""
self.vs[10]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[11]["mm__"] = """Name"""
self.vs[11]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[12]["mm__"] = """Name"""
self.vs[12]["attr1"] = """1"""
# apply class ConditionSet() node
self.add_node()
self.vs[13]["mm__"] = """ConditionSet"""
self.vs[13]["attr1"] = """1"""
# apply class Inst() node
self.add_node()
self.vs[14]["mm__"] = """Inst"""
self.vs[14]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[15]["mm__"] = """Name"""
self.vs[15]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[16]["mm__"] = """Name"""
self.vs[16]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[17]["mm__"] = """Name"""
self.vs[17]["attr1"] = """1"""
# apply class Name() node
self.add_node()
self.vs[18]["mm__"] = """Name"""
self.vs[18]["attr1"] = """1"""
# match association State--initialTransition-->Transition node
self.add_node()
self.vs[19]["attr1"] = """initialTransition"""
self.vs[19]["mm__"] = """directLink_S"""
# match association Transition--dest-->EntryPoint node
self.add_node()
self.vs[20]["attr1"] = """dest"""
self.vs[20]["mm__"] = """directLink_S"""
# match association EntryPoint--owningStateMachine-->StateMachine node
self.add_node()
self.vs[21]["attr1"] = """owningStateMachine"""
self.vs[21]["mm__"] = """directLink_S"""
# apply association LocalDef--def-->ProcDef node
self.add_node()
self.vs[22]["attr1"] = """def"""
self.vs[22]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[23]["attr1"] = """channelNames"""
self.vs[23]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[24]["attr1"] = """channelNames"""
self.vs[24]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[25]["attr1"] = """channelNames"""
self.vs[25]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[26]["attr1"] = """channelNames"""
self.vs[26]["mm__"] = """directLink_T"""
# apply association ProcDef--p-->ConditionSet node
self.add_node()
self.vs[27]["attr1"] = """p"""
self.vs[27]["mm__"] = """directLink_T"""
# apply association ConditionSet--alternative-->Inst node
self.add_node()
self.vs[28]["attr1"] = """alternative"""
self.vs[28]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[29]["attr1"] = """channelNames"""
self.vs[29]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[30]["attr1"] = """channelNames"""
self.vs[30]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[31]["attr1"] = """channelNames"""
self.vs[31]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[32]["attr1"] = """channelNames"""
self.vs[32]["mm__"] = """directLink_T"""
# backward association State---->LocalDef node
self.add_node()
self.vs[33]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,3), # matchmodel -> match_class State()
(0,4), # matchmodel -> match_class Transition()
(0,5), # matchmodel -> match_class EntryPoint()
(0,6), # matchmodel -> match_class StateMachine()
(1,7), # applymodel -> -> apply_class LocalDef()
(1,8), # applymodel -> -> apply_class ProcDef()
(1,9), # applymodel -> -> apply_class Name()
(1,10), # applymodel -> -> apply_class Name()
(1,11), # applymodel -> -> apply_class Name()
(1,12), # applymodel -> -> apply_class Name()
(1,13), # applymodel -> -> apply_class ConditionSet()
(1,14), # applymodel -> -> apply_class Inst()
(1,15), # applymodel -> -> apply_class Name()
(1,16), # applymodel -> -> apply_class Name()
(1,17), # applymodel -> -> apply_class Name()
(1,18), # applymodel -> -> apply_class Name()
(3,19), # match_class State() -> association initialTransition
(19,4), # association initialTransition -> match_class Transition()
(4,20), # match_class Transition() -> association dest
(20,5), # association dest -> match_class EntryPoint()
(5,21), # match_class EntryPoint() -> association owningStateMachine
(21,6), # association owningStateMachine -> match_class StateMachine()
(7,22), # apply_class LocalDef() -> association def
(22,8), # association def -> apply_class ProcDef()
(8,23), # apply_class ProcDef() -> association channelNames
(23,9), # association channelNames -> apply_class Name()
(8,24), # apply_class ProcDef() -> association channelNames
(24,10), # association channelNames -> apply_class Name()
(8,25), # apply_class ProcDef() -> association channelNames
(25,11), # association channelNames -> apply_class Name()
(8,26), # apply_class ProcDef() -> association channelNames
(26,12), # association channelNames -> apply_class Name()
(8,27), # apply_class ProcDef() -> association p
(27,13), # association p -> apply_class ConditionSet()
(13,28), # apply_class ConditionSet() -> association alternative
(28,14), # association alternative -> apply_class Inst()
(14,29), # apply_class Inst() -> association channelNames
(29,15), # association channelNames -> apply_class Name()
(14,30), # apply_class Inst() -> association channelNames
(30,16), # association channelNames -> apply_class Name()
(14,31), # apply_class Inst() -> association channelNames
(31,17), # association channelNames -> apply_class Name()
(14,32), # apply_class Inst() -> association channelNames
(32,18), # association channelNames -> apply_class Name()
(7,33), # apply_class LocalDef() -> backward_association
(33,3), # backward_association -> apply_class State()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((3,'isComposite'),('constant','true')), ((7,'__ApplyAttribute'),('constant','localdefcompstate')), ((8,'name'),('constant','C')), ((9,'literal'),('constant','exit')), ((10,'literal'),('constant','exack')), ((11,'literal'),('constant','enp')), ((12,'literal'),('constant','sh')), ((14,'name'),('concat',(('constant','S'),(6,'name')))), ((15,'literal'),('constant','exit_in')), ((16,'literal'),('constant','exack_in')), ((17,'literal'),('concat',(('constant','A'),('concat',((5,'name'),('constant','A')))))), ((18,'literal'),('constant','sh_in')), ]
| {
"content_hash": "d19e653706a8a2579f8e7433d21cf74d",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 577,
"avg_line_length": 42.29752066115702,
"alnum_prop": 0.49833919499804613,
"repo_name": "levilucio/SyVOLT",
"id": "fccdfb4fa0326a82f5ea1acc3231277299600edc",
"size": "10236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/transformation/no_contains/HState2CProcDef.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class ComposeConfig(AppConfig):
name = 'pdc.apps.compose'
def ready(self):
from pdc.apps.utils.utils import connect_app_models_pre_save_signal
connect_app_models_pre_save_signal(self)
| {
"content_hash": "c250abbf18490dbedd75a42173faf2c0",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 75,
"avg_line_length": 27.22222222222222,
"alnum_prop": 0.7142857142857143,
"repo_name": "release-engineering/product-definition-center",
"id": "3b983bcb4b1b11fac1bf4ca8d106abfb44ee628b",
"size": "354",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pdc/apps/compose/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2128"
},
{
"name": "HTML",
"bytes": "49722"
},
{
"name": "JavaScript",
"bytes": "6629"
},
{
"name": "Makefile",
"bytes": "2828"
},
{
"name": "Python",
"bytes": "1237442"
},
{
"name": "Shell",
"bytes": "94"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lotes', '0038_leadcolecao_lm_cor'),
]
operations = [
migrations.CreateModel(
name='RegraLeadTamanho',
fields=[
('tamanho', models.CharField(max_length=3, primary_key=True, serialize=False, verbose_name='Tamanho')),
('min_para_lm', models.IntegerField(blank=True, default=0, null=True, verbose_name='% mínimo para aplicação do lote mínimo por tamanho')),
('lm_cor_sozinha', models.CharField(default='s', max_length=1, verbose_name='Aplica lote mínimo por cor quando único tamanho')),
],
options={
'verbose_name': 'Regra de Lead por tamanho',
'db_table': 'fo2_lot_regra_lead_tamanho',
},
),
]
| {
"content_hash": "f9405b0a7905ee42e8c940a26844393a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 154,
"avg_line_length": 36.6,
"alnum_prop": 0.5879781420765028,
"repo_name": "anselmobd/fo2",
"id": "9f01a3a1f0f4e6960988fd156d3eacffd0e648b8",
"size": "995",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lotes/migrations/0039_regraleadtamanho.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160899"
},
{
"name": "HTML",
"bytes": "855985"
},
{
"name": "JavaScript",
"bytes": "203109"
},
{
"name": "PLSQL",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "3228268"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
} |
import json
from telegram import ParseMode
class TestParseMode:
markdown_text = "*bold* _italic_ [link](http://google.com)."
html_text = '<b>bold</b> <i>italic</i> <a href="http://google.com">link</a>.'
formatted_text_formatted = u'bold italic link.'
def test_send_message_with_parse_mode_markdown(self, bot, chat_id):
message = bot.sendMessage(chat_id=chat_id, text=self.markdown_text,
parse_mode=ParseMode.MARKDOWN)
json.loads(message.to_json())
assert message.text == self.formatted_text_formatted
def test_send_message_with_parse_mode_html(self, bot, chat_id):
message = bot.sendMessage(chat_id=chat_id, text=self.html_text,
parse_mode=ParseMode.HTML)
json.loads(message.to_json())
assert message.text == self.formatted_text_formatted
| {
"content_hash": "93571c578e3dbf5596b1efefd77e20da",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 81,
"avg_line_length": 39.34782608695652,
"alnum_prop": 0.6143646408839779,
"repo_name": "rogerscristo/BotFWD",
"id": "d64b66433887f5dc61038c718b8b0c3acc73dc67",
"size": "1735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "env/lib/python3.6/site-packages/pytests/test_parsemode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13999"
}
],
"symlink_target": ""
} |
"""Base class for optimizers."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import slot_creator
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
def _get_variable_for(v):
"""Returns the ResourceVariable responsible for v, or v if not necessary."""
if context.in_eager_mode():
return v
if v.op.type == "VarHandleOp":
for var in variables.trainable_variables():
if (isinstance(var, resource_variable_ops.ResourceVariable)
and var.handle.op is v.op):
return var
raise ValueError("Got %s but could not locate source variable." % (str(v)))
return v
def _deduplicate_indexed_slices(values, indices):
"""Sums `values` associated with any non-unique `indices`.
Args:
values: A `Tensor` with rank >= 1.
indices: A one-dimensional integer `Tensor`, indexing into the first
dimension of `values` (as in an IndexedSlices object).
Returns:
A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
de-duplicated version of `indices` and `summed_values` contains the sum of
`values` slices associated with each unique index.
"""
unique_indices, new_index_positions = array_ops.unique(indices)
summed_values = math_ops.unsorted_segment_sum(
values, new_index_positions,
array_ops.shape(unique_indices)[0])
return (summed_values, unique_indices)
def _var_key(var):
if context.in_eager_mode():
return var._shared_name # pylint: disable=protected-access
return (var.op.graph, var.op.name)
class _OptimizableVariable(object):
"""Interface for abstracting over variables in the optimizers."""
@abc.abstractmethod
def target(self):
"""Returns the optimization target for this variable."""
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def update_op(self, optimizer, g):
"""Returns the update ops for updating the variable."""
raise NotImplementedError("Calling an abstract method.")
class _RefVariableProcessor(_OptimizableVariable):
"""Processor for Variable."""
def __init__(self, v):
self._v = v
def target(self):
return self._v._ref() # pylint: disable=protected-access
def update_op(self, optimizer, g):
if isinstance(g, ops.Tensor):
update_op = optimizer._apply_dense(g, self._v) # pylint: disable=protected-access
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
else:
assert isinstance(g, ops.IndexedSlices), ("Gradient ", g, " is neither a "
"tensor nor IndexedSlices.")
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
# pylint: disable=protected-access
return optimizer._apply_sparse_duplicate_indices(g, self._v)
class _DenseReadResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
# pylint: disable=protected-access
update_op = optimizer._resource_apply_dense(g, self._v.op.inputs[0])
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _DenseResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
# pylint: disable=protected-access
if isinstance(g, ops.IndexedSlices):
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
return optimizer._resource_apply_sparse_duplicate_indices(
g.values, self._v, g.indices)
update_op = optimizer._resource_apply_dense(g, self._v)
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _StreamingModelPortProcessor(_OptimizableVariable):
"""Processor for streaming ModelPorts."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
return g
def _get_processor(v):
"""The processor of v."""
if context.in_eager_mode():
return _DenseResourceVariableProcessor(v)
if v.op.type == "VarHandleOp":
return _DenseResourceVariableProcessor(v)
if isinstance(v, variables.Variable):
return _RefVariableProcessor(v)
if v.op.type == "SubmodelPort":
return _StreamingModelPortProcessor(v)
raise NotImplementedError("Trying to optimize unsupported type ", v)
@tf_export("train.Optimizer")
class Optimizer(object):
"""Base class for optimizers.
This class defines the API to add Ops to train a model. You never use this
class directly, but instead instantiate one of its subclasses such as
`GradientDescentOptimizer`, `AdagradOptimizer`, or `MomentumOptimizer`.
### Usage
```python
# Create an optimizer with the desired parameters.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Add Ops to the graph to minimize a cost by updating a list of variables.
# "cost" is a Tensor, and the list of variables contains tf.Variable
# objects.
opt_op = opt.minimize(cost, var_list=<list of variables>)
```
In the training program you will just have to run the returned Op.
```python
# Execute opt_op to do one step of training:
opt_op.run()
```
### Processing gradients before applying them.
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `compute_gradients()`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Compute the gradients for a list of variables.
grads_and_vars = opt.compute_gradients(loss, <list of variables>)
# grads_and_vars is a list of tuples (gradient, variable). Do whatever you
# need to the 'gradient' part, for example cap them, etc.
capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]
# Ask the optimizer to apply the capped gradients.
opt.apply_gradients(capped_grads_and_vars)
```
### Gating Gradients
Both `minimize()` and `compute_gradients()` accept a `gate_gradients`
argument that controls the degree of parallelism during the application of
the gradients.
The possible values are: `GATE_NONE`, `GATE_OP`, and `GATE_GRAPH`.
<b>`GATE_NONE`</b>: Compute and apply gradients in parallel. This provides
the maximum parallelism in execution, at the cost of some non-reproducibility
in the results. For example the two gradients of `matmul` depend on the input
values: With `GATE_NONE` one of the gradients could be applied to one of the
inputs _before_ the other gradient is computed resulting in non-reproducible
results.
<b>`GATE_OP`</b>: For each Op, make sure all gradients are computed before
they are used. This prevents race conditions for Ops that generate gradients
for multiple inputs where the gradients depend on the inputs.
<b>`GATE_GRAPH`</b>: Make sure all gradients for all variables are computed
before any one of them is used. This provides the least parallelism but can
be useful if you want to process all gradients before applying any of them.
### Slots
Some optimizer subclasses, such as `MomentumOptimizer` and `AdagradOptimizer`
allocate and manage additional variables associated with the variables to
train. These are called <i>Slots</i>. Slots have names and you can ask the
optimizer for the names of the slots that it uses. Once you have a slot name
you can ask the optimizer for the variable it created to hold the slot value.
This can be useful if you want to log debug a training algorithm, report stats
about the slots, etc.
"""
# Values for gate_gradients.
GATE_NONE = 0
GATE_OP = 1
GATE_GRAPH = 2
def __init__(self, use_locking, name):
"""Create a new Optimizer.
This must be called by the constructors of subclasses.
Args:
use_locking: Bool. If True apply use locks to prevent concurrent updates
to variables.
name: A non-empty string. The name to use for accumulators created
for the optimizer.
Raises:
ValueError: If name is malformed.
"""
if not name:
raise ValueError("Must specify the optimizer name")
self._use_locking = use_locking
self._name = name
# Dictionary of slots.
# {slot_name : { variable_to_train: slot_for_the_variable, ...}, ... }
self._slots = {}
self._non_slot_dict = {}
def get_name(self):
return self._name
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=GATE_OP, aggregation_method=None,
colocate_gradients_with_ops=False, name=None,
grad_loss=None):
"""Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before applying
them call `compute_gradients()` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
var_list: Optional list or tuple of `Variable` objects to update to
minimize `loss`. Defaults to the list of variables collected in
the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
Raises:
ValueError: If some of the variables are not `Variable` objects.
@compatibility(eager)
When eager execution is enabled, `loss` should be a Python function that
takes elements of `var_list` as arguments and computes the value to be
minimized. If `var_list` is None, `loss` should take no arguments.
Minimization (and gradient computation) is done with respect to the
elements of `var_list` if not None, else with respect to any trainable
variables created during the execution of the `loss` function.
`gate_gradients`, `aggregation_method`, `colocate_gradients_with_ops` and
`grad_loss` are ignored when eager execution is enabled.
@end_compatibility
"""
grads_and_vars = self.compute_gradients(
loss, var_list=var_list, gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
vars_with_grad = [v for g, v in grads_and_vars if g is not None]
if not vars_with_grad:
raise ValueError(
"No gradients provided for any variable, check your graph for ops"
" that do not support gradients, between variables %s and loss %s." %
([str(v) for _, v in grads_and_vars], loss))
return self.apply_gradients(grads_and_vars, global_step=global_step,
name=name)
def compute_gradients(self, loss, var_list=None,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A Tensor containing the value to minimize.
var_list: Optional list or tuple of `tf.Variable` to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid.
RuntimeError: If called with eager execution enabled and if `grad_loss`
is not `None` or `loss` is not callable.
@compatibility(eager)
When eager execution is enabled, `loss` should be a Python function that
takes elements of `var_list` as arguments and computes the value to be
minimized. If `var_list` is None, `loss` should take no arguments.
Gradient computation is done with respect to the elements of `var_list` if
not None, else with respect to any trainable variables created during the
execution of the `loss` function.
`gate_gradients`, `aggregation_method`, `colocate_gradients_with_ops` and
`grad_loss` are ignored when eager execution is enabled.
@end_compatibility
"""
if context.in_eager_mode():
if grad_loss is not None:
raise RuntimeError(
"`grad_loss` argument to Optimizer.compute_gradients "
"not supported when eager execution is enabled.")
if not callable(loss):
raise RuntimeError(
"`loss` passed to Optimizer.compute_gradients should "
"be a function when eager execution is enabled.")
# TODO(agarwal): consider passing parameters to the `loss` function.
if var_list is None:
return backprop.implicit_grad(loss)()
else:
var_list = nest.flatten(var_list)
grads = backprop.gradients_function(loss)(*var_list)
grads_and_vars = list(zip(grads, var_list))
return grads_and_vars
if gate_gradients not in [Optimizer.GATE_NONE, Optimizer.GATE_OP,
Optimizer.GATE_GRAPH]:
raise ValueError("gate_gradients must be one of: Optimizer.GATE_NONE, "
"Optimizer.GATE_OP, Optimizer.GATE_GRAPH. Not %s" %
gate_gradients)
self._assert_valid_dtypes([loss])
if grad_loss is not None:
self._assert_valid_dtypes([grad_loss])
if var_list is None:
var_list = (
variables.trainable_variables() +
ops.get_collection(ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
else:
var_list = nest.flatten(var_list)
# pylint: disable=protected-access
var_list += ops.get_collection(ops.GraphKeys._STREAMING_MODEL_PORTS)
# pylint: enable=protected-access
processors = [_get_processor(v) for v in var_list]
if not var_list:
raise ValueError("No variables to optimize.")
var_refs = [p.target() for p in processors]
grads = gradients.gradients(
loss, var_refs, grad_ys=grad_loss,
gate_gradients=(gate_gradients == Optimizer.GATE_OP),
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
if gate_gradients == Optimizer.GATE_GRAPH:
grads = control_flow_ops.tuple(grads)
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes(
[v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource])
return grads_and_vars
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
# This is a default implementation of apply_gradients() that can be shared
# by most optimizers. It relies on the subclass implementing the following
# methods: _create_slots(), _prepare(), _apply_dense(), and _apply_sparse().
grads_and_vars = tuple(grads_and_vars) # Make sure repeat iteration works.
if not grads_and_vars:
raise ValueError("No variables provided.")
converted_grads_and_vars = []
for g, v in grads_and_vars:
if g is not None:
try:
# Convert the grad to Tensor or IndexedSlices if necessary.
g = ops.convert_to_tensor_or_indexed_slices(g)
except TypeError:
raise TypeError(
"Gradient must be convertible to a Tensor"
" or IndexedSlices, or None: %s" % g)
if not isinstance(g, (ops.Tensor, ops.IndexedSlices)):
raise TypeError(
"Gradient must be a Tensor, IndexedSlices, or None: %s" % g)
p = _get_processor(v)
converted_grads_and_vars.append((g, v, p))
converted_grads_and_vars = tuple(converted_grads_and_vars)
var_list = [v for g, v, _ in converted_grads_and_vars if g is not None]
if not var_list:
raise ValueError("No gradients provided for any variable: %s." %
([str(v) for _, _, v in converted_grads_and_vars],))
with ops.init_scope():
self._create_slots([_get_variable_for(v) for v in var_list])
update_ops = []
with ops.name_scope(name, self._name) as name:
self._prepare()
for grad, var, processor in converted_grads_and_vars:
if grad is None:
continue
# We colocate all ops created in _apply_dense or _apply_sparse
# on the same device as the variable.
# TODO(apassos): figure out how to get the variable name here.
scope_name = var.op.name if context.in_graph_mode() else ""
with ops.name_scope("update_" + scope_name), ops.colocate_with(var):
update_ops.append(processor.update_op(self, grad))
if global_step is None:
apply_updates = self._finish(update_ops, name)
else:
with ops.control_dependencies([self._finish(update_ops, "update")]):
with ops.colocate_with(global_step):
if isinstance(global_step, resource_variable_ops.ResourceVariable):
# TODO(apassos): the implicit read in assign_add is slow; consider
# making it less so.
apply_updates = resource_variable_ops.assign_add_variable_op(
global_step.handle,
ops.convert_to_tensor(1, dtype=global_step.dtype),
name=name)
else:
apply_updates = state_ops.assign_add(global_step, 1, name=name)
if context.in_graph_mode():
if isinstance(apply_updates, ops.Tensor):
apply_updates = apply_updates.op
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if apply_updates not in train_op:
train_op.append(apply_updates)
return apply_updates
def get_slot(self, var, name):
"""Return a slot named `name` created for `var` by the Optimizer.
Some `Optimizer` subclasses use additional variables. For example
`Momentum` and `Adagrad` use variables to accumulate updates. This method
gives access to these `Variable` objects if for some reason you need them.
Use `get_slot_names()` to get the list of slot names created by the
`Optimizer`.
Args:
var: A variable passed to `minimize()` or `apply_gradients()`.
name: A string.
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
named_slots = self._slots.get(name, None)
if not named_slots:
return None
return named_slots.get(_var_key(var), None)
def get_slot_names(self):
"""Return a list of the names of slots created by the `Optimizer`.
See `get_slot()`.
Returns:
A list of strings.
"""
return sorted(self._slots.keys())
def variables(self):
"""A list of variables which encode the current state of `Optimizer`.
Includes slot variables and additional global variables created by the
optimizer in the current default graph.
Returns:
A list of variables.
"""
executing_eagerly = context.in_eager_mode()
current_graph = ops.get_default_graph()
def _from_current_graph(variable):
if executing_eagerly:
# No variable.op in eager mode. We don't expect lots of eager graphs,
# but behavior should be consistent with graph mode.
return variable._graph_key == current_graph._graph_key # pylint: disable=protected-access
else:
return variable.op.graph is current_graph
optimizer_variables = [v for v in self._non_slot_variables()
if _from_current_graph(v)]
for _, variable_dict in self._slots.items():
for _, slot_for_variable in variable_dict.items():
if _from_current_graph(slot_for_variable):
optimizer_variables.append(slot_for_variable)
# Sort variables by name so that the return is deterministic.
return sorted(optimizer_variables, key=lambda v: v.name)
def _create_non_slot_variable(self, initial_value, name, colocate_with):
"""Add an extra variable, not associated with a slot."""
if context.in_graph_mode():
graph = colocate_with.graph
else:
graph = None
key = (name, graph)
v = self._non_slot_dict.get(key, None)
if v is None:
with ops.colocate_with(colocate_with):
v = variable_scope.variable(initial_value, name=name, trainable=False)
self._non_slot_dict[key] = v
return v
def _get_non_slot_variable(self, name, graph=None):
return self._non_slot_dict.get((name, graph), None)
def _non_slot_variables(self):
"""Additional variables created by the `Optimizer`.
Returns:
A list or tuple of variables.
"""
return self._non_slot_dict.values()
def _assert_valid_dtypes(self, tensors):
"""Asserts tensors are all valid types (see `_valid_dtypes`).
Args:
tensors: Tensors to check.
Raises:
ValueError: If any tensor is not a valid type.
"""
valid_dtypes = self._valid_dtypes()
for t in tensors:
dtype = t.dtype.base_dtype
if dtype not in valid_dtypes:
raise ValueError(
"Invalid type %r for %s, expected: %s." % (
dtype, t.name, [v for v in valid_dtypes]))
# --------------
# Methods to be implemented by subclasses if they want to use the
# inherited implementation of apply_gradients() or compute_gradients().
# --------------
def _valid_dtypes(self):
"""Valid types for loss, variables and gradients.
Subclasses should override to allow other float types.
Returns:
Valid types for loss, variables and gradients.
"""
return set(
[dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])
def _create_slots(self, var_list):
"""Create all slots needed by the variables.
Args:
var_list: A list of `Variable` objects.
"""
# No slots needed by default
pass
def _prepare(self):
"""Create all needed tensors before applying gradients.
This is called with the name_scope using the "name" that
users have chosen for the application of gradients.
"""
pass
def _apply_dense(self, grad, var):
"""Add ops to apply dense gradients to `var`.
Args:
grad: A `Tensor`.
var: A `Variable` object.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _resource_apply_dense(self, grad, handle):
"""Add ops to apply dense gradients to the variable `handle`.
Args:
grad: a `Tensor` representing the gradient.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices may be repeated.
Returns:
An `Operation` which updates the value of the variable.
"""
summed_grad, unique_indices = _deduplicate_indexed_slices(
values=grad, indices=indices)
return self._resource_apply_sparse(summed_grad, handle, unique_indices)
def _resource_apply_sparse(self, grad, handle, indices):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices are unique.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _apply_sparse_duplicate_indices(self, grad, var):
"""Add ops to apply sparse gradients to `var`, with repeated sparse indices.
Optimizers which override this method must deal with IndexedSlices objects
such as the following:
IndexedSlicesValue(values=[1, 1], indices=[0, 0], dense_shape=[1])
The correct interpretation is:
IndexedSlicesValue(values=[2], indices=[0], dense_shape=[1])
Many optimizers deal incorrectly with repeated indices when updating based
on sparse gradients (e.g. summing squares rather than squaring the sum, or
applying momentum terms multiple times). Adding first is always the correct
behavior, so this is enforced here by reconstructing the IndexedSlices to
have only unique indices, then calling _apply_sparse.
Optimizers which deal correctly with repeated indices may instead override
this method to avoid the overhead of summing indices.
Args:
grad: `IndexedSlices`.
var: A `Variable` object.
Returns:
An `Operation`.
"""
summed_values, unique_indices = _deduplicate_indexed_slices(
values=grad.values, indices=grad.indices)
gradient_no_duplicate_indices = ops.IndexedSlices(
indices=unique_indices,
values=summed_values,
dense_shape=grad.dense_shape)
return self._apply_sparse(gradient_no_duplicate_indices, var)
def _apply_sparse(self, grad, var):
"""Add ops to apply sparse gradients to `var`.
The IndexedSlices object passed to `grad` in this function is by default
pre-processed in `_apply_sparse_duplicate_indices` to remove duplicate
indices (see its docstring for details). Optimizers which can tolerate or
have correct special cases for duplicate sparse indices may override
`_apply_sparse_duplicate_indices` instead of this function, avoiding that
overhead.
Args:
grad: `IndexedSlices`, with no repeated indices.
var: A `Variable` object.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _finish(self, update_ops, name_scope):
"""Do what is needed to finish the update.
This is called with the `name_scope` using the "name" that
users have chosen for the application of gradients.
Args:
update_ops: List of `Operation` objects to update variables. This list
contains the values returned by the `_apply_dense()` and
`_apply_sparse()` calls.
name_scope: String. Name to use for the returned operation.
Returns:
The operation to apply updates.
"""
return control_flow_ops.group(*update_ops, name=name_scope)
# --------------
# Utility methods for subclasses.
# --------------
def _slot_dict(self, slot_name):
"""Returns a dict for caching slots created under the given name.
Args:
slot_name: Name for the slot.
Returns:
A dict that maps primary `Variable` objects to the slot created
for that variable, under the given slot name.
"""
named_slots = self._slots.get(slot_name, None)
if named_slots is None:
named_slots = {}
self._slots[slot_name] = named_slots
return named_slots
def _get_or_make_slot(self, var, val, slot_name, op_name):
"""Find or create a slot for a variable.
Args:
var: A `Variable` object.
val: A `Tensor`. The initial value of the slot.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
named_slots[_var_key(var)] = slot_creator.create_slot(var, val, op_name)
return named_slots[_var_key(var)]
def _get_or_make_slot_with_initializer(self, var, initializer, shape, dtype,
slot_name, op_name):
"""Find or create a slot for a variable, using an Initializer.
Args:
var: A `Variable` object.
initializer: An `Initializer`. The initial value of the slot.
shape: Shape of the initial value of the slot.
dtype: Type of the value of the slot.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
named_slots[_var_key(var)] = slot_creator.create_slot_with_initializer(
var, initializer, shape, dtype, op_name)
return named_slots[_var_key(var)]
def _zeros_slot(self, var, slot_name, op_name):
"""Find or create a slot initialized with 0.0.
Args:
var: A `Variable` object.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
named_slots[_var_key(var)] = slot_creator.create_zeros_slot(var, op_name)
return named_slots[_var_key(var)]
| {
"content_hash": "c96ee0a0383ce8e6b0013ed04f67f61a",
"timestamp": "",
"source": "github",
"line_count": 896,
"max_line_length": 98,
"avg_line_length": 37.4453125,
"alnum_prop": 0.6720813090518911,
"repo_name": "nolanliou/tensorflow",
"id": "9ec588bac96d8c8404dee994bc5991f897abbf77",
"size": "34241",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/optimizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8535"
},
{
"name": "C",
"bytes": "315308"
},
{
"name": "C++",
"bytes": "35015139"
},
{
"name": "CMake",
"bytes": "187223"
},
{
"name": "Go",
"bytes": "1016631"
},
{
"name": "Java",
"bytes": "541479"
},
{
"name": "Jupyter Notebook",
"bytes": "1940884"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "44805"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94716"
},
{
"name": "PHP",
"bytes": "1429"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "30431649"
},
{
"name": "Ruby",
"bytes": "547"
},
{
"name": "Shell",
"bytes": "402294"
}
],
"symlink_target": ""
} |
from lettuce import world, after, before
from tools import terrain_steps
from tools import environment_request
from tools.environment_request import EnvironmentRequest
from tools.constants import PAAS, KEYSTONE_URL, PAASMANAGER_URL, TENANT, USER,\
PASSWORD, VDC, SDC_URL
@before.each_feature
def before_each_scenario(feature):
world.env_requests = EnvironmentRequest(world.config[PAAS][KEYSTONE_URL],
world.config[PAAS][PAASMANAGER_URL],
world.config[PAAS][TENANT],
world.config[PAAS][USER],
world.config[PAAS][PASSWORD],
world.config[PAAS][VDC],
world.config[PAAS][SDC_URL])
# Create product in SDC to be used by this feature
terrain_steps.init_products_in_sdc()
@after.each_scenario
def after_each_scenario(scenario):
# Delete the environments created in the scenario.
environment_request.delete_created_abstract_environments()
@after.each_feature
def after_each_feature(feature):
""" Remove testing products in SDC """
terrain_steps.remove_testing_products_in_sdc()
| {
"content_hash": "48a35ee8bac456b859af10478c122d86",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 33,
"alnum_prop": 0.7301136363636364,
"repo_name": "telefonicaid/fiware-paas",
"id": "453105e25915ab4ffd1c1af8472ec78d83dffdb3",
"size": "1847",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "test/acceptance/integration/catalog/update_abstract_environment/terrain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "9635"
},
{
"name": "API Blueprint",
"bytes": "43554"
},
{
"name": "CSS",
"bytes": "64"
},
{
"name": "Cucumber",
"bytes": "243175"
},
{
"name": "Java",
"bytes": "2164026"
},
{
"name": "Python",
"bytes": "582746"
},
{
"name": "Shell",
"bytes": "48419"
}
],
"symlink_target": ""
} |
"""AsyncIO TCP Server for Kytos."""
import asyncio
import logging
from kytos.core.connection import Connection
from kytos.core.events import KytosEvent
LOG = logging.getLogger("atcp_server")
def exception_handler(loop, context):
"""Exception handler to avoid tracebacks because of network timeouts."""
if isinstance(context.get('exception'), TimeoutError):
LOG.info('Lost connection on socket %r', context['transport'])
else:
loop.default_exception_handler(context)
class KytosServer:
"""Abstraction of a TCP Server to listen to packages from the network.
The KytosServer will listen on the specified port
for any new TCP request from the network and then instantiate the
specified RequestHandler to handle the new request.
It creates a new thread for each Handler.
"""
def __init__(self, server_address, server_protocol, controller,
protocol_name):
"""Create the object without starting the server.
Args:
server_address (tuple): Address where the server is listening.
example: ('127.0.0.1', 80)
server_protocol(asyncio.Protocol):
Class that will be instantiated to handle each request.
controller (:class:`~kytos.core.controller.Controller`):
An instance of Kytos Controller class.
protocol_name (str): Southbound protocol name that will be used
"""
self.server_address = server_address
self.server_protocol = server_protocol
self.controller = controller
self.protocol_name = protocol_name
# This will be an `asyncio.Server` instance after `serve_forever` is
# called
self._server = None
# Here we compose the received `server_protocol` class with a `server`
# object pointing to this instance
self.server_protocol.server = self
self.loop = asyncio.get_event_loop()
self.loop.set_exception_handler(exception_handler)
def serve_forever(self):
"""Handle requests until an explicit shutdown() is called."""
addr, port = self.server_address[0], self.server_address[1]
self._server = self.loop.create_server(self.server_protocol,
addr, port)
try:
task = self.loop.create_task(self._server)
LOG.info("Kytos listening at %s:%s", addr, port)
except Exception:
LOG.error('Failed to start Kytos TCP Server at %s:%s', addr, port)
task.close()
raise
def shutdown(self):
"""Call .close() on underlying TCP server, closing client sockets."""
self._server.close()
# self.loop.run_until_complete(self._server.wait_closed())
class KytosServerProtocol(asyncio.Protocol):
"""Kytos' main request handler.
It is instantiated once per connection between each switch and the
controller.
The setup method will dispatch a KytosEvent (``kytos/core.connection.new``)
on the controller, that will be processed by a Core App.
The finish method will close the connection and dispatch a KytosEvent
(``kytos/core.connection.closed``) on the controller.
"""
known_ports = {
6633: 'openflow',
6653: 'openflow'
}
def __init__(self):
"""Initialize protocol and check if server attribute was set."""
self._loop = asyncio.get_event_loop()
self.connection = None
self.transport = None
self._rest = b''
# server attribute is set outside this class, in KytosServer.init()
# Here we initialize it to None to avoid pylint warnings
if not getattr(self, 'server'):
self.server = None
# Then we check if it was really set
if not self.server:
raise ValueError("server instance must be assigned before init")
def connection_made(self, transport):
"""Handle new client connection, passing it to the controller.
Build a new Kytos `Connection` and send a ``kytos/core.connection.new``
KytosEvent through the app buffer.
"""
self.transport = transport
addr, port = transport.get_extra_info('peername')
_, server_port = transport.get_extra_info('sockname')
socket = transport.get_extra_info('socket')
LOG.info("New connection from %s:%s", addr, port)
self.connection = Connection(addr, port, socket)
# ASYNC TODO:
# if self.server.protocol_name:
# self.known_ports[server_port] = self.server.protocol_name
if server_port in self.known_ports:
protocol_name = self.known_ports[server_port]
else:
protocol_name = f'{server_port:04d}'
self.connection.protocol.name = protocol_name
# ASYNC TODO:
# self.request.settimeout(70)
event_name = 'kytos/core.connection.new'
# f'kytos/core.{self.connection.protocol.name}.connection.new'
event = KytosEvent(name=event_name,
content={'source': self.connection})
self._loop.create_task(self.server.controller.buffers.raw.aput(event))
def data_received(self, data):
"""Handle each request and place its data in the raw event buffer.
Sends the received binary data in a ``kytos/core.{protocol}.raw.in``
event on the raw buffer.
"""
data = self._rest + data
LOG.debug("New data from %s:%s (%s bytes)",
self.connection.address, self.connection.port, len(data))
# LOG.debug("New data from %s:%s (%s bytes): %s", self.addr, self.port,
# len(data), binascii.hexlify(data))
content = {'source': self.connection, 'new_data': data}
event_name = f'kytos/core.{self.connection.protocol.name}.raw.in'
event = KytosEvent(name=event_name, content=content)
self._loop.create_task(self.server.controller.buffers.raw.aput(event))
def connection_lost(self, exc):
"""Close the connection socket and generate connection lost event.
Emits a ``kytos/core.connection.lost`` event through the App buffer.
"""
LOG.info("Connection lost with client %s:%s. Reason: %s",
self.connection.address, self.connection.port, exc)
self.connection.close()
content = {'source': self.connection}
if exc:
content['exception'] = exc
event_name = \
f'kytos/core.{self.connection.protocol.name}.connection.lost'
event = KytosEvent(name=event_name, content=content)
self._loop.create_task(self.server.controller.buffers.app.aput(event))
| {
"content_hash": "9e90f03f1241b28970a148f1a29cb5b8",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 79,
"avg_line_length": 36.43783783783784,
"alnum_prop": 0.6280967215546654,
"repo_name": "macartur/kytos",
"id": "58f6c544652ab00711f171150121a5ff5f945764",
"size": "6741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kytos/core/atcp_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "187067"
}
],
"symlink_target": ""
} |
import sys; import os
sys.path.insert(0, os.path.abspath('..'))
from testing import detector_lof2d_testing, netjinn_ip_testing, netjinn_tcp_testing, tshark_testing
from unittest import TestLoader, TextTestRunner, TestSuite
if __name__ == "__main__":
# TODO: WHY THIS DOES NOT WORK?
loader = TestLoader()
suite = TestSuite((
loader.loadTestsFromTestCase(netjinn_ip_testing.IPJinnTestCase.__class__),
loader.loadTestsFromTestCase(netjinn_tcp_testing.TcpJinnTestCase.__class__),
loader.loadTestsFromTestCase(detector_lof2d_testing.LOF2DTestCase.__class__),
loader.loadTestsFromTestCase(tshark_testing.TsharkTestCase.__class__),
))
runner = TextTestRunner(verbosity = 2)
runner.run(suite) | {
"content_hash": "2e4e1ecc84c6e1f37f6997932bb7b849",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 99,
"avg_line_length": 36.94736842105263,
"alnum_prop": 0.7606837606837606,
"repo_name": "mnmnc/campephilus",
"id": "8e3a044522674decec18f2c95c4e1465a326ae69",
"size": "702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/All_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "32648"
}
],
"symlink_target": ""
} |
import unittest
from wethepeople import Url
class url_test(unittest.TestCase):
def test_url_is_string(self):
test_url = Url("http://myurl.com")
self.assertIsInstance(str(test_url), str)
| {
"content_hash": "c3dc137c2d781b55e91d238441e731ec",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 49,
"avg_line_length": 15.285714285714286,
"alnum_prop": 0.6682242990654206,
"repo_name": "Dolphman/WeThePeople",
"id": "dc156e076ba2882d63e54098503204841b6ecc88",
"size": "214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_url.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10751"
}
],
"symlink_target": ""
} |
"""Google Cloud Game Servers sample for getting a game server cluster.
Example usage:
python get_cluster.py --project-id <project-id> --location <location> --realm-id <realm-id> --cluster-id <cluster-id>
"""
import argparse
from google.cloud import gaming
from google.cloud.gaming_v1.types import game_server_clusters
# [START cloud_game_servers_cluster_get]
def get_cluster(project_id, location, realm_id, cluster_id):
"""Gets a game server cluster."""
client = gaming.GameServerClustersServiceClient()
request = game_server_clusters.GetGameServerClusterRequest(
name=f"projects/{project_id}/locations/{location}/realms/{realm_id}/gameServerClusters/{cluster_id}",
view=game_server_clusters.GameServerClusterView.FULL,
)
response = client.get_game_server_cluster(request)
print(f"Get cluster response:\n{response}")
return response
# [END cloud_game_servers_cluster_get]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--project-id", help="Your cloud project ID.", required=True)
parser.add_argument("--location", help="Your realm location.", required=True)
parser.add_argument("--realm-id", help="Your realm ID.", required=True)
parser.add_argument(
"--cluster-id", help="Your game server cluster ID.", required=True
)
args = parser.parse_args()
get_cluster(args.project_id, args.location, args.realm_id, args.cluster_id)
| {
"content_hash": "3c5e91f8758705f4b07b3bde564e0fa5",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 121,
"avg_line_length": 33.97674418604651,
"alnum_prop": 0.7063655030800822,
"repo_name": "googleapis/python-game-servers",
"id": "6a3cbba46f60e46213911e38e1c9d6beeafa3bec",
"size": "2082",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/snippets/get_cluster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2394740"
},
{
"name": "Shell",
"bytes": "30678"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.contrib.auth.models import User
from django.contrib.auth import logout
from django.http import HttpResponse, HttpResponseRedirect, \
HttpResponseNotFound
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from .forms import DocumentForm
from .models import Document
from . import queries
import pprint
import datetime
import json
import random
pp = pprint.PrettyPrinter(indent=4)
def make_data(request, username):
homepage = False
if username == request.user.username:
homepage = True
cust_id = request.user.first_name
user_info = queries.get_user_info_by_id(cust_id)
data = {"username": request.user.username,
"user_id": request.user.first_name,
"first_name": user_info[1],
"last_name": user_info[2],
"homepage": homepage}
return data
def sort_posts(posts):
posts = sorted(posts, key=lambda x: x[2], reverse=True)
return posts
def build_page(username, user_info, user_id, circles, circle_name, circle_id):
page_info = queries.get_page(user_id, circle_name)
print("IM HERE")
pp.pprint(page_info)
page_id = page_info[4]
posts_info = queries.get_posts(page_id)
posts = []
for post in posts_info:
author_info = queries.get_username_and_name_by_id(post[5])
comment_info = queries.get_comments(post[0])
comments = []
for comment in comment_info:
comment_author_info = queries.get_username_and_name_by_id(
comment[4])
num_likes, is_liked = queries.get_likes_by_comment((comment[5],),
user_id)
comments.append(comment + comment_author_info
+ (num_likes,) + (is_liked,))
comments = sorted(comments, key=lambda x: x[2], reverse=True)
num_likes, is_liked = queries.get_likes_by_post((post[0],), user_id)
post = post + author_info + (comments,) + (num_likes,) + (is_liked,)
posts.append(post)
posts = sort_posts(posts)
page_data = {"username": username,
"first_name": user_info[1],
"last_name": user_info[2],
"circles": circles,
"current_circle": {'name': circle_name, 'id': circle_id},
"posts": posts}
return page_data
def upload_image(data, request, username, page_owner):
if request.user.username == page_owner:
data['form'] = DocumentForm(request.POST, request.FILES)
if data['form'].is_valid():
newdoc = Document(username=page_owner,
docfile=request.FILES['docfile'])
newdoc.save()
# Redirect to the document list after POST
return HttpResponseRedirect('/accounts/' + page_owner)
else:
return render(request, "profile.html", dictionary=data)
@login_required
def profile_view(request, page_owner, sub_page=None):
""" Simple view to test querying the DB """
# Redirect if the signed in user is an employee
if request.user.last_name == 'employee':
return HttpResponseRedirect('/employee')
if request.user.last_name == 'manager':
return HttpResponseRedirect('/manager')
# Get the page owner's user object
user = User.objects.filter(username=page_owner)
if user.count() == 0:
return HttpResponseRedirect('../../')
# Get the page owner's information and circles
user = user[0]
user_id = user.first_name
user_info = queries.get_user_info_by_id(user_id)
# Get the page owner's circles
circles = queries.get_user_circles_info(user_id)
circle_name, circle_id = get_current_circle(circles, sub_page)
# Redirect if the circle for this page does not exist
if not circle_id:
return HttpResponseNotFound('<h1>Page not found</h1>')
# Get the page's posts, comments and all related data
page_data = build_page(page_owner, user_info, user_id,
circles, circle_name, circle_id)
# Add the page data to our data object
data = make_data(request, page_owner)
data['page_data'] = page_data
# Add the current navigation item indicator
if page_owner == request.user.username:
data['nbar'] = "nav_home"
if request.method == 'POST':
# Handle file upload
return upload_image(data, request, request.user.username, page_owner)
data['form'] = DocumentForm() # A empty, unbound form
val = queries.adv_list()
data['ad_id'] = random.choice(val)
return render(request, "profile.html", dictionary=data)
def get_current_circle(circles, sub_page):
if sub_page:
circle_name = sub_page.replace('_', ' ')
else:
circle_name = 'Friends'
circle_id = None
for circle in circles:
print(circle)
if circle[2] == circle_name:
circle_id = circle[0]
return circle_name, circle_id
@login_required
def messages_view(request):
""" Display users messages page """
messages = queries.get_user_messages(request.user.first_name)
conversations = {}
def build_conversations(message):
""" Add the message to the conversations """
# Extract data from the message
message_id, content, sender_id = message[0], message[1], message[2]
reciever_id, subject, date = message[3], message[4], message[5]
# Get sender and reciever user info
sender_info = queries.get_user_info_by_id(str(sender_id))
sender_name = sender_info[1] + " " + sender_info[2]
reciever_info = queries.get_user_info_by_id(str(reciever_id))
reciever_name = reciever_info[1] + " " + reciever_info[2]
# Get the conversation to add this to
if str(sender_id) != str(request.user.first_name):
convo_name = sender_name
convo_username = User.objects.filter(first_name=sender_id)[0]
convo_id = sender_id
else:
convo_name = reciever_name
convo_username = User.objects.filter(first_name=reciever_id)[0]
convo_id = reciever_id
# Add the message to that conversation
if convo_name not in conversations.keys():
conversations[convo_username] = []
conversations[convo_username].append({'message_id': message_id,
'subject': subject,
'date': date,
'content': content,
'sender': sender_id,
'reciever': reciever_id,
'sender_name': sender_name,
'reciever_name': reciever_name,
'convo_name': convo_name,
'convo_id': convo_id})
# Loop over the messages and build the conversations
for message in messages:
build_conversations(message)
data = make_data(request, request.user.first_name)
data['nbar'] = 'nav_messages'
data['conversations'] = conversations
return render(request, "messages.html", dictionary=data)
@login_required
def employee_view(request):
""" Employee dashboard view """
return render(request, "employee.html")
@login_required
def manager_view(request):
""" Manager dashboard view """
data = {'cust_rep_high_rev': queries.customer_rep_highest_revenue()}
return render(request, "manager.html", dictionary=data)
@login_required
def get_messages_ajax(request):
convo_user = User.objects.filter(username=request.POST["convo_user"])[0]
messages = queries.get_conversation_messages([request.user.first_name,
convo_user.first_name])
data = {'messages': messages}
return HttpResponse(json.dumps(data), content_type="application/json")
@login_required
def get_friends_ajax(request):
name = request.POST["name"]
friends = queries.get_users_by_firstname(name)
friends = [f + (User.objects.filter(first_name=f[0])[0].username,)
for f in friends]
data = {'friends': friends}
return HttpResponse(json.dumps(data), content_type="application/json")
@login_required
def update_customer_ajax(request):
val = request.POST.getlist('ar[]')
# val = vals[0]
# 100100102 Bob Wonderwall M 21 MajorApt,Oak St. NewYork NY 11700 4314649882 bob@blah.com 5 08-06-1988
id = val[0]
first= val[1]
last = val[2]
gender = val[3]
address = val[4]
state = val[5]
city = val[6]
zip = val[7]
phone = val[8]
email = val[9]
rating = val[10]
dob = val[11]
# id firstname lastname gender address city state zipcode telephone email rating date_of_birth
queries.update_customer(id, rating, first, last, gender, address, city, state, zip, phone, email)
# print("bob")
return HttpResponse(json.dumps({}), content_type="application/json")
@login_required
def update_employee_ajax(request):
val = request.POST.getlist('ar[]')
print(val)
print('\n\n\n\nPPOOOOSLE')
# val = vals[0]
# 100100102 Bob Wonderwall M 21 MajorApt,Oak St. NewYork NY 11700 4314649882 bob@blah.com 5 08-06-1988
first= val[0]
last= val[1]
gender = val[2]
address = val[3]
city = val[4]
state = val[5]
zip = val[6]
phone = val[7]
hourly = val[9]
role = val[10]
emp_id = val[11]
# id firstname lastname gender address city state zipcode telephone email rating date_of_birth
queries.update_employee(first, last, '', gender, address, city, state, zip, phone, hourly, role, emp_id)
# print("bob")
return HttpResponse(json.dumps({}), content_type="application/json")
@login_required
def submit_like_ajax(request):
post_id = request.POST.get("post_id")
data = {'success': False}
post_type = request.POST.get("text_type")
like_type = request.POST.get("like_type")
if like_type == 'like':
if post_type == 'post':
num_likes, is_liked =\
queries.get_likes_by_post((post_id,),
request.user.first_name)
elif post_type == 'comment':
num_likes, is_liked =\
queries.get_likes_by_comment((post_id,),
request.user.first_name)
if is_liked:
return HttpResponse(json.dumps(data),
content_type="application/json")
data['success'] = True
if post_type == 'post':
queries.like_post(post_id, request.user.first_name)
elif post_type == 'comment':
queries.like_comment(post_id, request.user.first_name)
return HttpResponse(json.dumps(data), content_type="application/json")
elif like_type == 'unlike':
if post_type == 'post':
num_likes, is_liked =\
queries.get_likes_by_post((post_id,),
request.user.first_name)
elif post_type == 'comment':
num_likes, is_liked =\
queries.get_likes_by_comment((post_id,),
request.user.first_name)
if not is_liked:
return HttpResponse(json.dumps(data),
content_type="application/json")
data['success'] = True
if post_type == 'post':
queries.unlike_a_post(post_id, request.user.first_name)
elif post_type == 'comment':
queries.unlike_a_comment(post_id, request.user.first_name)
return HttpResponse(json.dumps(data), content_type="application/json")
@login_required
def delete_advertisement_ajax(request):
id_ = request.POST.get("id")
# print(id_)
queries.delete_advertisement(id_)
return HttpResponse(json.dumps({}) ,content_type="application/json")
@login_required
def list_all_customers_ajax(request):
val = queries.customer_list()
return HttpResponse(json.dumps({'items':val}) ,content_type="application/json")
@login_required
def list_all_employees_ajax(request):
val = queries.employee_list()
return HttpResponse(json.dumps({'items':val}) ,content_type="application/json")
@login_required
def del_customer_ajax(request):
id_ = request.POST.get("id")
val = queries.remove_customer(id_)
return HttpResponse(json.dumps({}) ,content_type="application/json")
@login_required
def del_employee_ajax(request):
id_ = request.POST.get("id")
val = queries.remove_employee((id_,))
return HttpResponse(json.dumps({}) ,content_type="application/json")
def generate_mailing_list_ajax(request):
val = queries.customer_mailing_list()
print(val)
return HttpResponse(json.dumps({'items':val}) ,content_type="application/json")
@login_required
def produce_list_of_all_items_advertised_ajax(request):
print("here")
val = queries.produce_list_of_all_items_advertised()
print(val)
return HttpResponse(json.dumps({'items':val}) ,content_type="application/json")
@login_required
def list_item_suggestions_ajax(request):
cust_id = request.POST.get("cust_id")
val = queries.item_suggestions(request.user.first_name, cust_id)
print(val)
return HttpResponse(json.dumps({'items':val}) ,content_type="application/json")
@login_required
def create_advertisement_ajax(request):
# 'item_name': $('#item_name').val(),
# 'num_aval_units': $('#num_aval_units').val(),
# 'unit_price': $('#unit_price').val(),
# 'content': $('#content').val(),
# 'type': $('#type').val(),
# 'company': $('#company').val(),
# print("things went well")
item_name = request.POST.get("item_name")
num_units = request.POST.get("num_aval_units")
unit_price = request.POST.get("unit_price")
content = request.POST.get("content")
type_ad = request.POST.get("type")
company = request.POST.get("company")
employee_id = request.user.first_name
# print("bob")
#pdate = datetime.datetime.now()
# product_id =33331
# val= queries.list_users_by_product(product_id)
# print (val)
# print("bob")
queries.create_advertisement(item_name, num_units, unit_price, content, employee_id, type_ad,company)
return HttpResponse(json.dumps({}) , content_type="application/json")
def submit_post_ajax(request):
# userID = request.user.first_name
page_id = queries.get_page_by_circle_id(request.POST.get('circle_id'))[0]
data = {'post_text': request.POST.get('post_text'),
'page_name': request.POST.get('page_name'),
'page_id': page_id}
queries.make_a_post(data['post_text'], request.user.first_name,
data['page_id'])
return HttpResponse(json.dumps(data), content_type="application/json")
def submit_comment_ajax(request):
data = {'comment_text': request.POST.get('comment_text'),
'post_id': request.POST.get('post_id')}
queries.make_a_comment(data['comment_text'], data['post_id'],
request.user.first_name)
return HttpResponse(json.dumps(data), content_type="application/json")
@login_required
def purchase_item_ajax(request):
item_id = request.POST.get("item_id_input")
quant_id = request.POST.get("quantity_input")
cust_id = request.user.first_name
tvalue = queries.validate_purchase_quantity((item_id,), (quant_id,))
if not tvalue:
return HttpResponse(json.dumps({}), content_type="application/json")
else:
queries.buy_item((item_id,), (quant_id,), (cust_id,))
return HttpResponseRedirect("/")
@login_required
def redirect_user(request):
return HttpResponseRedirect("../" + str(request.user.username))
@login_required
def redirect_to_home(request):
return HttpResponseRedirect("accounts/profile/")
def list_view(request):
# Handle file upload
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
username = request.user.username
newdoc = Document(username=username,
docfile=request.FILES['docfile'])
newdoc.save()
# Redirect to the document list after POST
return HttpResponseRedirect('/accounts/' + username)
else:
form = DocumentForm() # A empty, unbound form
# Load documents for the list page
documents = Document.objects.all()
# Render list page with the documents and the form
return render_to_response(
'list.html',
{'documents': documents, 'form': form},
context_instance=RequestContext(request)
)
def create_employee_account_view(request):
if request.method == 'GET':
return render(request, "employeeregistration.html")
elif request.method == 'POST':
# print('poodle')
print("checkValid")
is_valid, data = validate_new_employee(request)
if is_valid:
# Data is valid and let's store it in the db
print("employee creation")
date = data['smonth'] + "-" + data['sday'] + "-" + data['syear']
cust_id = queries.add_employee(firstname=data['first_name'], lastname=data['last_name'], password=data['pw'], gender=data['gender'], address=data['address'], city=data[
'city'], state=data['state'], zipcode=data['zipcode'], telephone=data['telephone'], ssn=data['ssn'], start_date=date, hourly_rate=data['rate'], role=data['role'])
user = User.objects.create_user(username=data['username'],
password=data['pw'],
first_name=cust_id, last_name = "employee")
print(cust_id)
user.first_name = cust_id
user.is_active = True
user.save()
print("employee created")
# month day year
return HttpResponseRedirect("/login")
else:
print("invalid")
return render(request, 'employeeregistration.html', dictionary=data)
def create_account_view(request):
if request.method == 'GET':
return render(request, "registration.html")
elif request.method == 'POST':
# print('poodle')
print("checkValid")
is_valid, data = validate_new_user(request)
if is_valid:
# Data is valid and let's store it in the db
print("valid")
dob = data['month'] + "-" + data['day'] + "-" + data['year']
cust_id = queries.add_customer(firstname_=data['first_name'], lastname_=data['last_name'], password_=data['pw'], gender_=data['gender'], address_=data[
'address'], city_=data['city'], state_=data['state'], zipcode_=data['zipcode'], telephone_=data[
'telephone'], email_=data['email'], dob_=dob, credit_card_num = data['credit'])
user = User.objects.create_user(username=data['username'],
password=data['pw'],
first_name=cust_id, last_name = "customer")
print(cust_id)
user.first_name = cust_id
user.is_active = True
user.save()
# month day year
if not request.POST.get("employee_create", False):
return HttpResponseRedirect("/login")
else:
return HttpResponseRedirect("/employee")
else:
if not request.POST.get("employee_create", False):
return HttpResponseRedirect("/login")
else:
return HttpResponseRedirect("/employee")
print("invalid")
def validate_new_user(request):
""" return (True if data is valid, Dictionary of input and errors)
validate the user data that was entered in request
"""
data = {}
data['name'] = request.POST.get('name', False).strip().split()
data['email'] = request.POST.get('email', False)
data['pw'] = request.POST.get('password', False)
data['month'] = request.POST.get('month', False)
data['day'] = request.POST.get('day', False)
data['year'] = request.POST.get('year', False)
data['gender'] = request.POST.get('gender', False)
data['username'] = request.POST.get('username', False)
data['password'] = request.POST.get('password', False)
data['address'] = request.POST.get('address', False)
data['city'] = request.POST.get('city', False)
data['state'] = request.POST.get('state', False)
data['zipcode'] = request.POST.get('zipcode', False)
data['telephone'] = request.POST.get('telephone', False)
data['credit'] = request.POST.get('credit', False)
# username gender
valid_data = True
# If any data is invalid, set valid_data to False and print error
if len(data['name']) < 2 or len(data['name']) > 2:
valid_data = False
data['err_studName'] = "Please enter a valid name"
else:
data['first_name'] = data['name'][0]
data['last_name'] = data['name'][1]
if validate_email(data['email']):
valid_data = False
data['err_email'] = "Invalid email"
if User.objects.filter(username=data['username']).count():
valid_data = False
data['err_email'] = "A user with that email already exists"
if len(data['address'].strip()) == 0:
valid_data = False
data['err_address'] = "Please enter an address"
if len(data['pw'].strip()) == 0:
valid_data = False
data['err_pw'] = "Please enter a password"
if len(data['year'].strip()) == 0 or len(data['day'].strip()) == 0 or len(data['month'].strip()) == 0:
valid_data = False
data['err_date'] = "Please enter a date"
# elif datetime.datetime(year=1900, month=1, day=1) <
# datetime.datetime(year=int(data['year']), month=int(data['month']),
# day=int(data['day'])) <= datetime.datetime.now():
elif validate_date(str(data['month'] + "/" + data['day'] + "/" + data['year'])) == False:
# print('money')
valid_data = False
data['err_date'] = "Please enter a date"
if len(data['city'].strip()) == 0:
valid_data = False
data['err_city'] = "Please enter a city"
if len(data['state'].strip()) == 0:
valid_data = False
data['err_state'] = "Please enter a state"
if len(data['zipcode'].strip()) == 0 or isInt(data['zipcode'])==False:
valid_data = False
data['err_zipcode'] = "Please enter a zip"
if len(data['telephone'].strip()) == 0 or isInt(data['telephone'])==False:
valid_data = False
data['err_telephone'] = "Please enter a telephone"
if len(data['credit'].strip()) ==0 or isInt(data['credit'])==False:
print('credit')
valid_data = False
data['err_credit'] = "Please enter a credit"
# Return if the valid
return valid_data, data
def isInt(s):
try:
int(s)
return True
except ValueError:
return False
def validate_new_employee(request):
""" return (True if data is valid, Dictionary of input and errors)
validate the user data that was entered in request
"""
data = {}
data['name'] = request.POST.get('name', False).strip().split()
data['pw'] = request.POST.get('password', False)
data['gender'] = request.POST.get('gender', False)
data['username'] = request.POST.get('username', False)
data['password'] = request.POST.get('password', False)
data['address'] = request.POST.get('address', False)
data['city'] = request.POST.get('city', False)
data['state'] = request.POST.get('state', False)
data['zipcode'] = request.POST.get('zipcode', False)
data['telephone'] = request.POST.get('telephone', False)
data['smonth'] = request.POST.get('smonth', False)
data['sday'] = request.POST.get('sday', False)
data['syear'] = request.POST.get('syear', False)
data['ssn'] = request.POST.get('ssn', False)
data['rate'] = request.POST.get('rate', False)
data['role'] = request.POST.get('role', False)
# ssn: <input type ="text" name="ssn"><br>
# Hourly rate: <input type ="text" name="rate"><br>
# Role: <input type ="text" name="role"><br>
# username gender
valid_data = True
error_location = 0
# If any data is invalid, set valid_data to False and print error
if len(data['name']) < 2 or len(data['name']) > 2:
valid_data = False
error_location = 1
data['err_studName'] = "Please enter a valid name"
else:
data['first_name'] = data['name'][0]
data['last_name'] = data['name'][1]
if User.objects.filter(username=data['username']).count():
valid_data = False
error_location = 2
data['err_email'] = "A user with that email already exists"
if len(data['address'].strip()) == 0:
valid_data = False
error_location = 3
data['err_address'] = "Please enter an address"
if len(data['pw'].strip()) == 0:
valid_data = False
error_location = 4
data['err_pw'] = "Please enter a password"
if len(data['syear'].strip()) == 0 or len(data['sday'].strip()) == 0 or len(data['smonth'].strip()) == 0:
valid_data = False
error_location = 5
data['err_date'] = "Please enter a date"
# elif datetime.datetime(year=1900, month=1, day=1) <
# datetime.datetime(year=int(data['year']), month=int(data['month']),
# day=int(data['day'])) <= datetime.datetime.now():
elif validate_date(str(data['smonth'] + "/" + data['sday'] + "/" + data['syear'])) == False:
# print('money')
valid_data = False
error_location = 6
print(str(data['smonth'] + "/" + data['sday'] + "/" + data['syear']))
data['err_date'] = "Please enter a date"
if len(data['city'].strip()) == 0:
valid_data = False
error_location = 7
data['err_city'] = "Please enter a city"
if len(data['state'].strip()) == 0:
valid_data = False
error_location = 8
data['err_state'] = "Please enter a state"
if len(data['zipcode'].strip()) == 0:
valid_data = False
error_location = 9
data['err_zipcode'] = "Please enter a zip"
if len(data['telephone'].strip()) == 0:
valid_data = False
error_location = 10
data['err_telephone'] = "Please enter a telephone"
if len(data['ssn'].strip()) == 0:
valid_data = False
error_location = 11
data['err_ssn'] = "Please enter a ssn"
if len(data['rate'].strip()) == 0:
valid_data = False
error_location = 12
data['err_rate'] = "Please enter a rate"
print(error_location)
# Return if the valid
return valid_data, data
def validate_email(email):
""" validate an email string """
import re
a = re.compile("^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$")
if a.match(email):
return False
return True
def logout_view(request):
""" log current user out """
# Log the user out using Django Auth
logout(request)
return HttpResponseRedirect("/login")
def validate_date(d):
try:
datetime.datetime.strptime(d, '%m/%d/%Y')
return True
except ValueError:
return False
| {
"content_hash": "4bafd24ba49225870dbea397510e2635",
"timestamp": "",
"source": "github",
"line_count": 727,
"max_line_length": 205,
"avg_line_length": 38.18707015130674,
"alnum_prop": 0.5913478855990203,
"repo_name": "karlfloersch/friends-r-us",
"id": "cc3253fc3b2aa5aa098efe765eab601612ef36d0",
"size": "27762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "social_network/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5309"
},
{
"name": "HTML",
"bytes": "23831"
},
{
"name": "JavaScript",
"bytes": "26724"
},
{
"name": "Python",
"bytes": "65730"
}
],
"symlink_target": ""
} |
"""Use Bayesian Inference to trigger a binary sensor."""
from collections import OrderedDict
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import (
CONF_ABOVE,
CONF_BELOW,
CONF_DEVICE_CLASS,
CONF_ENTITY_ID,
CONF_NAME,
CONF_PLATFORM,
CONF_STATE,
CONF_VALUE_TEMPLATE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
from homeassistant.exceptions import ConditionError, TemplateError
from homeassistant.helpers import condition
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import (
TrackTemplate,
async_track_state_change_event,
async_track_template_result,
)
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.template import result_as_boolean
from . import DOMAIN, PLATFORMS
ATTR_OBSERVATIONS = "observations"
ATTR_OCCURRED_OBSERVATION_ENTITIES = "occurred_observation_entities"
ATTR_PROBABILITY = "probability"
ATTR_PROBABILITY_THRESHOLD = "probability_threshold"
CONF_OBSERVATIONS = "observations"
CONF_PRIOR = "prior"
CONF_TEMPLATE = "template"
CONF_PROBABILITY_THRESHOLD = "probability_threshold"
CONF_P_GIVEN_F = "prob_given_false"
CONF_P_GIVEN_T = "prob_given_true"
CONF_TO_STATE = "to_state"
DEFAULT_NAME = "Bayesian Binary Sensor"
DEFAULT_PROBABILITY_THRESHOLD = 0.5
_LOGGER = logging.getLogger(__name__)
NUMERIC_STATE_SCHEMA = vol.Schema(
{
CONF_PLATFORM: "numeric_state",
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_ABOVE): vol.Coerce(float),
vol.Optional(CONF_BELOW): vol.Coerce(float),
vol.Required(CONF_P_GIVEN_T): vol.Coerce(float),
vol.Optional(CONF_P_GIVEN_F): vol.Coerce(float),
},
required=True,
)
STATE_SCHEMA = vol.Schema(
{
CONF_PLATFORM: CONF_STATE,
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TO_STATE): cv.string,
vol.Required(CONF_P_GIVEN_T): vol.Coerce(float),
vol.Optional(CONF_P_GIVEN_F): vol.Coerce(float),
},
required=True,
)
TEMPLATE_SCHEMA = vol.Schema(
{
CONF_PLATFORM: CONF_TEMPLATE,
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Required(CONF_P_GIVEN_T): vol.Coerce(float),
vol.Optional(CONF_P_GIVEN_F): vol.Coerce(float),
},
required=True,
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_DEVICE_CLASS): cv.string,
vol.Required(CONF_OBSERVATIONS): vol.Schema(
vol.All(
cv.ensure_list,
[vol.Any(NUMERIC_STATE_SCHEMA, STATE_SCHEMA, TEMPLATE_SCHEMA)],
)
),
vol.Required(CONF_PRIOR): vol.Coerce(float),
vol.Optional(
CONF_PROBABILITY_THRESHOLD, default=DEFAULT_PROBABILITY_THRESHOLD
): vol.Coerce(float),
}
)
def update_probability(prior, prob_given_true, prob_given_false):
"""Update probability using Bayes' rule."""
numerator = prob_given_true * prior
denominator = numerator + prob_given_false * (1 - prior)
return numerator / denominator
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Bayesian Binary sensor."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
name = config[CONF_NAME]
observations = config[CONF_OBSERVATIONS]
prior = config[CONF_PRIOR]
probability_threshold = config[CONF_PROBABILITY_THRESHOLD]
device_class = config.get(CONF_DEVICE_CLASS)
async_add_entities(
[
BayesianBinarySensor(
name, prior, observations, probability_threshold, device_class
)
]
)
class BayesianBinarySensor(BinarySensorEntity):
"""Representation of a Bayesian sensor."""
def __init__(self, name, prior, observations, probability_threshold, device_class):
"""Initialize the Bayesian sensor."""
self._name = name
self._observations = observations
self._probability_threshold = probability_threshold
self._device_class = device_class
self._deviation = False
self._callbacks = []
self.prior = prior
self.probability = prior
self.current_observations = OrderedDict({})
self.observations_by_entity = self._build_observations_by_entity()
self.observations_by_template = self._build_observations_by_template()
self.observation_handlers = {
"numeric_state": self._process_numeric_state,
"state": self._process_state,
}
async def async_added_to_hass(self):
"""
Call when entity about to be added.
All relevant update logic for instance attributes occurs within this closure.
Other methods in this class are designed to avoid directly modifying instance
attributes, by instead focusing on returning relevant data back to this method.
The goal of this method is to ensure that `self.current_observations` and `self.probability`
are set on a best-effort basis when this entity is register with hass.
In addition, this method must register the state listener defined within, which
will be called any time a relevant entity changes its state.
"""
@callback
def async_threshold_sensor_state_listener(event):
"""
Handle sensor state changes.
When a state changes, we must update our list of current observations,
then calculate the new probability.
"""
new_state = event.data.get("new_state")
if new_state is None or new_state.state == STATE_UNKNOWN:
return
entity = event.data.get("entity_id")
self.current_observations.update(self._record_entity_observations(entity))
self.async_set_context(event.context)
self._recalculate_and_write_state()
self.async_on_remove(
async_track_state_change_event(
self.hass,
list(self.observations_by_entity),
async_threshold_sensor_state_listener,
)
)
@callback
def _async_template_result_changed(event, updates):
track_template_result = updates.pop()
template = track_template_result.template
result = track_template_result.result
entity = event and event.data.get("entity_id")
if isinstance(result, TemplateError):
_LOGGER.error(
"TemplateError('%s') "
"while processing template '%s' "
"in entity '%s'",
result,
template,
self.entity_id,
)
should_trigger = False
else:
should_trigger = result_as_boolean(result)
for obs in self.observations_by_template[template]:
if should_trigger:
obs_entry = {"entity_id": entity, **obs}
else:
obs_entry = None
self.current_observations[obs["id"]] = obs_entry
if event:
self.async_set_context(event.context)
self._recalculate_and_write_state()
for template in self.observations_by_template:
info = async_track_template_result(
self.hass,
[TrackTemplate(template, None)],
_async_template_result_changed,
)
self._callbacks.append(info)
self.async_on_remove(info.async_remove)
info.async_refresh()
self.current_observations.update(self._initialize_current_observations())
self.probability = self._calculate_new_probability()
self._deviation = bool(self.probability >= self._probability_threshold)
@callback
def _recalculate_and_write_state(self):
self.probability = self._calculate_new_probability()
self._deviation = bool(self.probability >= self._probability_threshold)
self.async_write_ha_state()
def _initialize_current_observations(self):
local_observations = OrderedDict({})
for entity in self.observations_by_entity:
local_observations.update(self._record_entity_observations(entity))
return local_observations
def _record_entity_observations(self, entity):
local_observations = OrderedDict({})
for entity_obs in self.observations_by_entity[entity]:
platform = entity_obs["platform"]
should_trigger = self.observation_handlers[platform](entity_obs)
if should_trigger:
obs_entry = {"entity_id": entity, **entity_obs}
else:
obs_entry = None
local_observations[entity_obs["id"]] = obs_entry
return local_observations
def _calculate_new_probability(self):
prior = self.prior
for obs in self.current_observations.values():
if obs is not None:
prior = update_probability(
prior,
obs["prob_given_true"],
obs.get("prob_given_false", 1 - obs["prob_given_true"]),
)
return prior
def _build_observations_by_entity(self):
"""
Build and return data structure of the form below.
{
"sensor.sensor1": [{"id": 0, ...}, {"id": 1, ...}],
"sensor.sensor2": [{"id": 2, ...}],
...
}
Each "observation" must be recognized uniquely, and it should be possible
for all relevant observations to be looked up via their `entity_id`.
"""
observations_by_entity = {}
for ind, obs in enumerate(self._observations):
obs["id"] = ind
if "entity_id" not in obs:
continue
entity_ids = [obs["entity_id"]]
for e_id in entity_ids:
observations_by_entity.setdefault(e_id, []).append(obs)
return observations_by_entity
def _build_observations_by_template(self):
"""
Build and return data structure of the form below.
{
"template": [{"id": 0, ...}, {"id": 1, ...}],
"template2": [{"id": 2, ...}],
...
}
Each "observation" must be recognized uniquely, and it should be possible
for all relevant observations to be looked up via their `template`.
"""
observations_by_template = {}
for ind, obs in enumerate(self._observations):
obs["id"] = ind
if "value_template" not in obs:
continue
template = obs.get(CONF_VALUE_TEMPLATE)
observations_by_template.setdefault(template, []).append(obs)
return observations_by_template
def _process_numeric_state(self, entity_observation):
"""Return True if numeric condition is met."""
entity = entity_observation["entity_id"]
try:
return condition.async_numeric_state(
self.hass,
entity,
entity_observation.get("below"),
entity_observation.get("above"),
None,
entity_observation,
)
except ConditionError:
return False
def _process_state(self, entity_observation):
"""Return True if state conditions are met."""
entity = entity_observation["entity_id"]
try:
return condition.state(
self.hass, entity, entity_observation.get("to_state")
)
except ConditionError:
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return true if sensor is on."""
return self._deviation
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_class(self):
"""Return the sensor class of the sensor."""
return self._device_class
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
attr_observations_list = [
obs.copy() for obs in self.current_observations.values() if obs is not None
]
for item in attr_observations_list:
item.pop("value_template", None)
return {
ATTR_OBSERVATIONS: attr_observations_list,
ATTR_OCCURRED_OBSERVATION_ENTITIES: list(
{
obs.get("entity_id")
for obs in self.current_observations.values()
if obs is not None and obs.get("entity_id") is not None
}
),
ATTR_PROBABILITY: round(self.probability, 2),
ATTR_PROBABILITY_THRESHOLD: self._probability_threshold,
}
async def async_update(self):
"""Get the latest data and update the states."""
if not self._callbacks:
self._recalculate_and_write_state()
return
# Force recalc of the templates. The states will
# update automatically.
for call in self._callbacks:
call.async_refresh()
| {
"content_hash": "ecf2437410f923c2c98493de45691979",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 100,
"avg_line_length": 32.45454545454545,
"alnum_prop": 0.5989237800383311,
"repo_name": "adrienbrault/home-assistant",
"id": "6879e278bab03f536ab1e1ddd93223d2c746c967",
"size": "13566",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/bayesian/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
import os
import subprocess
import sys
import phoenix_utils
import atexit
import urlparse
global childProc
childProc = None
def kill_child():
if childProc is not None:
childProc.terminate()
childProc.kill()
if os.name != 'nt':
os.system("reset")
atexit.register(kill_child)
phoenix_utils.setPath()
url = "localhost:8765"
sqlfile = ""
serialization_key = 'phoenix.queryserver.serialization'
def usage_and_exit():
sys.exit("usage: sqlline-thin.py [host[:port]] [sql_file]")
def cleanup_url(url):
parsed = urlparse.urlparse(url)
if parsed.scheme == "":
url = "http://" + url
parsed = urlparse.urlparse(url)
if ":" not in parsed.netloc:
url = url + ":8765"
return url
def get_serialization():
default_serialization='PROTOBUF'
env=os.environ.copy()
hbase_cmd = phoenix_utils.which('hbase')
if hbase_cmd is None:
print 'Failed to find hbase executable on PATH, defaulting serialization to %s.' % default_serialization
return default_serialization
env['HBASE_CONF_DIR'] = phoenix_utils.hbase_conf_dir
proc = subprocess.Popen([hbase_cmd, 'org.apache.hadoop.hbase.util.HBaseConfTool', serialization_key],
env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
print 'Failed to extract serialization from hbase-site.xml, defaulting to %s.' % default_serialization
return default_serialization
# Don't expect this to happen, but give a default value just in case
if stdout is None:
return default_serialization
stdout = stdout.strip()
if stdout == 'null':
return default_serialization
return stdout
if len(sys.argv) == 1:
pass
elif len(sys.argv) == 2:
if os.path.isfile(sys.argv[1]):
sqlfile = sys.argv[1]
else:
url = sys.argv[1]
elif len(sys.argv) == 3:
url = sys.argv[1]
sqlfile = sys.argv[2]
else:
usage_and_exit()
url = cleanup_url(url)
if sqlfile != "":
sqlfile = "--run=" + sqlfile
colorSetting = "true"
# disable color setting for windows OS
if os.name == 'nt':
colorSetting = "false"
# HBase configuration folder path (where hbase-site.xml reside) for
# HBase/Phoenix client side property override
hbase_config_path = os.getenv('HBASE_CONF_DIR', phoenix_utils.current_dir)
serialization = get_serialization()
java_home = os.getenv('JAVA_HOME')
# load hbase-env.??? to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR
hbase_env_path = None
hbase_env_cmd = None
if os.name == 'posix':
hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh')
hbase_env_cmd = ['bash', '-c', 'source %s && env' % hbase_env_path]
elif os.name == 'nt':
hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.cmd')
hbase_env_cmd = ['cmd.exe', '/c', 'call %s & set' % hbase_env_path]
if not hbase_env_path or not hbase_env_cmd:
print >> sys.stderr, "hbase-env file unknown on platform %s" % os.name
sys.exit(-1)
hbase_env = {}
if os.path.isfile(hbase_env_path):
p = subprocess.Popen(hbase_env_cmd, stdout = subprocess.PIPE)
for x in p.stdout:
(k, _, v) = x.partition('=')
hbase_env[k.strip()] = v.strip()
if hbase_env.has_key('JAVA_HOME'):
java_home = hbase_env['JAVA_HOME']
if java_home:
java = os.path.join(java_home, 'bin', 'java')
else:
java = 'java'
java_cmd = java + ' -cp "' + phoenix_utils.hbase_conf_dir + os.pathsep + phoenix_utils.phoenix_thin_client_jar + \
os.pathsep + phoenix_utils.hadoop_conf + os.pathsep + phoenix_utils.hadoop_classpath + '" -Dlog4j.configuration=file:' + \
os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
" sqlline.SqlLine -d org.apache.phoenix.queryserver.client.Driver " + \
" -u jdbc:phoenix:thin:url='" + url + ";serialization=" + serialization + "'" + \
" -n none -p none --color=" + colorSetting + " --fastConnect=false --verbose=true " + \
" --isolation=TRANSACTION_READ_COMMITTED " + sqlfile
exitcode = subprocess.call(java_cmd, shell=True)
sys.exit(exitcode)
| {
"content_hash": "b47c66a3508e6c5fd17923404a1c0130",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 126,
"avg_line_length": 32.1796875,
"alnum_prop": 0.6550133527555232,
"repo_name": "djh4230/Apache-Phoenix",
"id": "2e237ed044f895a89076a3b314c5b2dcb76a14fd",
"size": "5080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/sqlline-thin.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "44989"
},
{
"name": "HTML",
"bytes": "18969"
},
{
"name": "Java",
"bytes": "11693296"
},
{
"name": "JavaScript",
"bytes": "203766"
},
{
"name": "Protocol Buffer",
"bytes": "13566"
},
{
"name": "Python",
"bytes": "81176"
},
{
"name": "Scala",
"bytes": "46902"
},
{
"name": "Shell",
"bytes": "62142"
}
],
"symlink_target": ""
} |
"""Generate MRMS MESH Contours."""
import argparse
import datetime
import json
import os
import subprocess
import sys
import pathlib
import tempfile
import pygrib
import numpy as np
from pyiem.util import logger
from pyiem.mrms import NORTH, WEST
import rasterio
from rasterio.transform import from_origin
LOG = logger()
VERSION = 1
ISO9660 = "%Y-%m-%dT%H:%M:00Z"
def pqinsert(tmpfn, ets, interval):
"""pqinsert the GeoJSON."""
routes = "ac" if (interval in (60, 1440) and ets.minute == 0) else "c"
name = (
f"data {routes} {ets.strftime('%Y%m%d%H%M')} "
f"gis/shape/4326/us/mrms_mesh_{interval}min.geojson "
f"GIS/mrms/mesh_{interval}min_{ets.strftime('%Y%m%d%H%M')}.geojson "
"bogus"
)
LOG.debug(name)
cmd = f"pqinsert -i -p '{name}' {tmpfn}.geojson"
if pathlib.Path(f"{tmpfn}.geojson").stat().st_size > 0:
subprocess.call(cmd, shell=True)
cmd = (
f"pqinsert -i -p '{name.replace('.geojson', '_meta.json')}' "
f"{tmpfn}_meta.json"
)
subprocess.call(cmd, shell=True)
def make_metadata(tmpfn, mydict):
"""Make metadata."""
with open(f"{tmpfn}_meta.json", "w") as fp:
fp.write(json.dumps(mydict))
def make_contours(tmpfn):
"""Make a GeoJSON."""
cmd = (
"timeout 120 gdal_contour "
"-fl 0 5 10 15 20 25 30 35 40 45 50 55 60 65 70 75 80 85 "
"90 95 100 150 200 -amin ssize_mm -amax esize_mm "
f"-snodata -1 -p -q {tmpfn}.tif {tmpfn}.geojson"
)
subprocess.call(cmd, shell=True)
def make_raster(vals, tmpfn):
"""Make a TIFF for downstream gdal_contour usage."""
vals = np.where(vals < 0, -1, vals)
transform = from_origin(WEST, NORTH, 0.01, 0.01)
with rasterio.open(
f"{tmpfn}.tif",
"w",
driver="GTiff",
height=vals.shape[0],
width=vals.shape[1],
count=1,
dtype=str(vals.dtype),
crs="+proj=longlat +datum=WGS84 +no_defs",
transform=transform,
) as rst:
rst.write(vals, 1)
def agg(sts, ets):
"""Aggregate up the value."""
interval = datetime.timedelta(minutes=2)
# in the rears
now = sts + datetime.timedelta(minutes=2)
maxval = None
hits = 0
misses = 0
while now <= ets:
fn = now.strftime("/mnt/mrms/MESH/%d%H%M.grib")
if os.path.isfile(fn):
with pygrib.open(fn) as grb:
if maxval is None:
maxval = grb[1].values
else:
maxval = np.maximum(grb[1].values, maxval)
hits += 1
else:
misses += 1
now += interval
return maxval, hits, misses
def usage():
"""Create the argparse instance."""
parser = argparse.ArgumentParser("MRMS MRSH Contours")
parser.add_argument("-i", "--interval", required=True, type=int)
parser.add_argument(
"-t",
"--datetime",
required=True,
type=lambda d: datetime.datetime.strptime(d[:16], "%Y-%m-%dT%H:%M"),
)
return parser
def main(argv):
"""Go Main Go."""
started = datetime.datetime.utcnow()
ctx = usage().parse_args(argv[1:])
ets = ctx.datetime
sts = ets - datetime.timedelta(minutes=ctx.interval)
with tempfile.NamedTemporaryFile() as tmp:
maxval, hits, misses = agg(sts, ets)
if maxval is None:
LOG.debug("Aborting, no data! %s", ctx)
return
make_raster(maxval, tmp.name)
make_contours(tmp.name)
os.unlink(f"{tmp.name}.tif")
mydict = {
"generated_at": datetime.datetime.utcnow().strftime(ISO9660),
"start_time_utc": sts.strftime(ISO9660),
"end_time_utc": ets.strftime(ISO9660),
"2min_files_used": hits,
"2min_files_missed": misses,
"script_version": VERSION,
"script_time_s": (
datetime.datetime.utcnow() - started
).total_seconds(),
}
make_metadata(tmp.name, mydict)
pqinsert(tmp.name, ets, ctx.interval)
os.unlink(f"{tmp.name}.geojson")
os.unlink(f"{tmp.name}_meta.json")
if __name__ == "__main__":
main(sys.argv)
| {
"content_hash": "c7e46893e76e0366b7d69bb4abc51a8a",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 76,
"avg_line_length": 28.571428571428573,
"alnum_prop": 0.570952380952381,
"repo_name": "akrherz/iem",
"id": "18b1f65c581b3f4601f1a3e350bc0e76bc1b0bc9",
"size": "4200",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scripts/mrms/mesh_contours.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16912"
},
{
"name": "HTML",
"bytes": "1092923"
},
{
"name": "Hack",
"bytes": "7078"
},
{
"name": "JavaScript",
"bytes": "244253"
},
{
"name": "PHP",
"bytes": "3492474"
},
{
"name": "Python",
"bytes": "3279270"
},
{
"name": "Rich Text Format",
"bytes": "30075"
},
{
"name": "Shell",
"bytes": "72284"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.views.decorators.csrf import csrf_exempt
from sentry.api.base import Endpoint
from sentry.integrations.pipeline import ensure_integration
from .integration import BitbucketIntegrationProvider
class BitbucketInstalledEndpoint(Endpoint):
authentication_classes = ()
permission_classes = ()
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
return super(BitbucketInstalledEndpoint, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
state = request.data
data = BitbucketIntegrationProvider().build_integration(state)
ensure_integration("bitbucket", data)
return self.respond()
| {
"content_hash": "4380007c30c6b2010ca5af30f0335ed9",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 89,
"avg_line_length": 29.4,
"alnum_prop": 0.7306122448979592,
"repo_name": "beeftornado/sentry",
"id": "85f0b41e29fb234d9bf05836ca0ef609f79f96de",
"size": "735",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/integrations/bitbucket/installed.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
} |
import math
import zope.event
from bika.lims.utils import formatDecimalMark
from Products.Archetypes.event import ObjectInitializedEvent
from Products.CMFCore.WorkflowCore import WorkflowException
from Products.CMFPlone.utils import _createObjectByType
def create_analysis(context, service, keyword, interim_fields):
# Determine if the sampling workflow is enabled
workflow_enabled = context.bika_setup.getSamplingWorkflowEnabled()
# Create the analysis
analysis = _createObjectByType("Analysis", context, keyword)
analysis.setService(service)
analysis.setInterimFields(interim_fields)
analysis.setMaxTimeAllowed(service.getMaxTimeAllowed())
analysis.unmarkCreationFlag()
analysis.reindexObject()
# Trigger the intitialization event of the new object
zope.event.notify(ObjectInitializedEvent(analysis))
# Perform the appropriate workflow action
try:
workflow_action = 'sampling_workflow' if workflow_enabled \
else 'no_sampling_workflow'
context.portal_workflow.doActionFor(analysis, workflow_action)
except WorkflowException:
# The analysis may have been transitioned already!
# I am leaving this code here though, to prevent regression.
pass
# Return the newly created analysis
return analysis
def get_significant_digits(numeric_value):
"""
Returns the precision for a given floatable value.
If value is None or not floatable, returns None.
Will return positive values if the result is below 0 and will
return 0 values if the result is above 0.
:param numeric_value: the value to get the precision from
:return: the numeric_value's precision
Examples:
numeric_value Returns
0 0
0.22 1
1.34 0
0.0021 3
0.013 2
2 0
22 0
"""
try:
numeric_value = float(numeric_value)
except ValueError:
return None
if numeric_value == 0:
return 0
significant_digit = int(math.floor(math.log10(abs(numeric_value))))
return 0 if significant_digit > 0 else abs(significant_digit)
def format_uncertainty(analysis, result, decimalmark='.', sciformat=1):
"""
Returns the formatted uncertainty according to the analysis, result
and decimal mark specified following these rules:
If the "Calculate precision from uncertainties" is enabled in
the Analysis service, and
a) If the the non-decimal number of digits of the result is above
the service's ExponentialFormatPrecision, the uncertainty will
be formatted in scientific notation. The uncertainty exponential
value used will be the same as the one used for the result. The
uncertainty will be rounded according to the same precision as
the result.
Example:
Given an Analysis with an uncertainty of 37 for a range of
results between 30000 and 40000, with an
ExponentialFormatPrecision equal to 4 and a result of 32092,
this method will return 0.004E+04
b) If the number of digits of the integer part of the result is
below the ExponentialFormatPrecision, the uncertainty will be
formatted as decimal notation and the uncertainty will be
rounded one position after reaching the last 0 (precision
calculated according to the uncertainty value).
Example:
Given an Analysis with an uncertainty of 0.22 for a range of
results between 1 and 10 with an ExponentialFormatPrecision
equal to 4 and a result of 5.234, this method will return 0.2
If the "Calculate precision from Uncertainties" is disabled in the
analysis service, the same rules described above applies, but the
precision used for rounding the uncertainty is not calculated from
the uncertainty neither the result. The fixed length precision is
used instead.
For further details, visit
https://jira.bikalabs.com/browse/LIMS-1334
If the result is not floatable or no uncertainty defined, returns
an empty string.
The default decimal mark '.' will be replaced by the decimalmark
specified.
:param analysis: the analysis from which the uncertainty, precision
and other additional info have to be retrieved
:param result: result of the analysis. Used to retrieve and/or
calculate the precision and/or uncertainty
:param decimalmark: decimal mark to use. By default '.'
:param sciformat: 1. The sci notation has to be formatted as aE^+b
2. The sci notation has to be formatted as ax10^b
3. As 2, but with super html entity for exp
4. The sci notation has to be formatted as a·10^b
5. As 4, but with super html entity for exp
By default 1
:return: the formatted uncertainty
"""
try:
result = float(result)
except ValueError:
return ""
objres = None
try:
objres = float(analysis.getResult())
except ValueError:
pass
service = analysis.getService()
uncertainty = None
if result == objres:
# To avoid problems with DLs
uncertainty = analysis.getUncertainty()
else:
uncertainty = analysis.getUncertainty(result)
if uncertainty is None or uncertainty == 0:
return ""
# Scientific notation?
# Get the default precision for scientific notation
threshold = service.getExponentialFormatPrecision()
# Current result precision is above the threshold?
sig_digits = get_significant_digits(result)
negative = sig_digits < 0
sign = '-' if negative else ''
sig_digits = abs(sig_digits)
sci = sig_digits >= threshold and sig_digits > 0
formatted = ''
if sci:
# Scientific notation
# 3.2014E+4
if negative == True:
res = float(uncertainty)*(10**sig_digits)
else:
res = float(uncertainty)/(10**sig_digits)
res = float(str("%%.%sf" % (sig_digits-1)) % res)
res = int(res) if res.is_integer() else res
if sciformat in [2,3,4,5]:
if sciformat == 2:
# ax10^b or ax10^-b
formatted = "%s%s%s%s" % (res,"x10^",sign,sig_digits)
elif sciformat == 3:
# ax10<super>b</super> or ax10<super>-b</super>
formatted = "%s%s%s%s%s" % (res,"x10<sup>",sign,sig_digits,"</sup>")
elif sciformat == 4:
# ax10^b or ax10^-b
formatted = "%s%s%s%s" % (res,"·10^",sign,sig_digits)
elif sciformat == 5:
# ax10<super>b</super> or ax10<super>-b</super>
formatted = "%s%s%s%s%s" % (res,"·10<sup>",sign,sig_digits,"</sup>")
else:
# Default format: aE^+b
sig_digits = "%02d" % sig_digits
formatted = "%s%s%s%s" % (res,"e",sign,sig_digits)
#formatted = str("%%.%se" % sig_digits) % uncertainty
else:
# Decimal notation
prec = analysis.getPrecision(result)
prec = prec if prec else ''
formatted = str("%%.%sf" % prec) % uncertainty
return formatDecimalMark(formatted, decimalmark)
def format_numeric_result(analysis, result, decimalmark='.', sciformat=1):
"""
Returns the formatted number part of a results value. This is
responsible for deciding the precision, and notation of numeric
values in accordance to the uncertainty. If a non-numeric
result value is given, the value will be returned unchanged.
The following rules apply:
If the "Calculate precision from uncertainties" is enabled in
the Analysis service, and
a) If the non-decimal number of digits of the result is above
the service's ExponentialFormatPrecision, the result will
be formatted in scientific notation.
Example:
Given an Analysis with an uncertainty of 37 for a range of
results between 30000 and 40000, with an
ExponentialFormatPrecision equal to 4 and a result of 32092,
this method will return 3.2092E+04
b) If the number of digits of the integer part of the result is
below the ExponentialFormatPrecision, the result will be
formatted as decimal notation and the resulta will be rounded
in accordance to the precision (calculated from the uncertainty)
Example:
Given an Analysis with an uncertainty of 0.22 for a range of
results between 1 and 10 with an ExponentialFormatPrecision
equal to 4 and a result of 5.234, this method will return 5.2
If the "Calculate precision from Uncertainties" is disabled in the
analysis service, the same rules described above applies, but the
precision used for rounding the result is not calculated from
the uncertainty. The fixed length precision is used instead.
For further details, visit
https://jira.bikalabs.com/browse/LIMS-1334
The default decimal mark '.' will be replaced by the decimalmark
specified.
:param analysis: the analysis from which the uncertainty, precision
and other additional info have to be retrieved
:param result: result to be formatted.
:param decimalmark: decimal mark to use. By default '.'
:param sciformat: 1. The sci notation has to be formatted as aE^+b
2. The sci notation has to be formatted as ax10^b
3. As 2, but with super html entity for exp
4. The sci notation has to be formatted as a·10^b
5. As 4, but with super html entity for exp
By default 1
:return: the formatted result
"""
try:
result = float(result)
except ValueError:
return result
# continuing with 'nan' result will cause formatting to fail.
if math.isnan(result):
return result
service = analysis.getService()
# Scientific notation?
# Get the default precision for scientific notation
threshold = service.getExponentialFormatPrecision()
# Current result precision is above the threshold?
sig_digits = get_significant_digits(result)
negative = sig_digits < 0
sign = '-' if negative else ''
sig_digits = abs(sig_digits)
sci = sig_digits >= threshold
formatted = ''
if sci:
# Scientific notation
if sciformat in [2,3,4,5]:
if negative == True:
res = float(result)*(10**sig_digits)
else:
res = float(result)/(10**sig_digits)
res = float(str("%%.%sf" % (sig_digits-1)) % res)
# We have to check if formatted is an integer using "'.' in formatted"
# because ".is_integer" doesn't work with X.0
res = int(res) if '.' not in res else res
if sciformat == 2:
# ax10^b or ax10^-b
formatted = "%s%s%s%s" % (res,"x10^",sign,sig_digits)
elif sciformat == 3:
# ax10<super>b</super> or ax10<super>-b</super>
formatted = "%s%s%s%s%s" % (res,"x10<sup>",sign,sig_digits,"</sup>")
elif sciformat == 4:
# ax10^b or ax10^-b
formatted = "%s%s%s%s" % (res,"·10^",sign,sig_digits)
elif sciformat == 5:
# ax10<super>b</super> or ax10<super>-b</super>
formatted = "%s%s%s%s%s" % (res,"·10<sup>",sign,sig_digits,"</sup>")
else:
# Default format: aE^+b
formatted = str("%%.%se" % sig_digits) % result
else:
# Decimal notation
prec = analysis.getPrecision(result)
prec = prec if prec else ''
formatted = str("%%.%sf" % prec) % result
# We have to check if formatted is an integer using "'.' in formatted"
# because ".is_integer" doesn't work with X.0
formatted = str(int(float(formatted))) if '.' not in formatted else formatted
return formatDecimalMark(formatted, decimalmark)
| {
"content_hash": "c2ece734944f20a12124e9814706d71a",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 85,
"avg_line_length": 40.32119205298013,
"alnum_prop": 0.6325039007965837,
"repo_name": "hocinebendou/bika.gsoc",
"id": "b4a4f069ee7bd40a991f90456193f5d897b056da",
"size": "12208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bika/lims/utils/analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "403"
},
{
"name": "COBOL",
"bytes": "5987"
},
{
"name": "CSS",
"bytes": "29758"
},
{
"name": "JavaScript",
"bytes": "411425"
},
{
"name": "Python",
"bytes": "4330980"
},
{
"name": "RobotFramework",
"bytes": "239735"
},
{
"name": "Shell",
"bytes": "11201"
}
],
"symlink_target": ""
} |
"""circuits
Lightweight Event driven Framework
:copyright: CopyRight (C) 2004-2010 by James Mills
:license: MIT (See: LICENSE)
"""
try:
from __version__ import version as __version__
except ImportError:
__version__ = "unknown"
from core import *
| {
"content_hash": "7cb939963820397ed8d1e8b8f49c19e8",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 50,
"avg_line_length": 18.428571428571427,
"alnum_prop": 0.6937984496124031,
"repo_name": "antont/tundra",
"id": "31c03e69643f6de93c255c4c0fd3dc1eb32468d1",
"size": "374",
"binary": false,
"copies": "1",
"ref": "refs/heads/tundra2",
"path": "src/Application/PythonScriptModule/pymodules_old/circuits/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "110345"
},
{
"name": "C#",
"bytes": "76173"
},
{
"name": "C++",
"bytes": "4959154"
},
{
"name": "CoffeeScript",
"bytes": "2229"
},
{
"name": "JavaScript",
"bytes": "316308"
},
{
"name": "Objective-C",
"bytes": "222359"
},
{
"name": "Python",
"bytes": "999850"
},
{
"name": "Shell",
"bytes": "8224"
},
{
"name": "TypeScript",
"bytes": "230019"
}
],
"symlink_target": ""
} |
import logging
import os.path
import shutil
import time
import subprocess
from shell.cmd import exec_cmd, exec_cmd_out
from vcs import OK, NOT_OK
# try to locate the git command line tool, or raise ImportError
GIT = exec_cmd_out("which git").strip()
if GIT == '':
raise ImportError()
class VCSImpl(object):
def __init__(self, repo_directory):
self.repo_directory = repo_directory
self.ignore_path = os.path.join(repo_directory, '.git')
logging.info("Using VCS: git")
def status(self):
cmd = "%s status --porcelain -uall" % GIT
stdout,stderr = exec_cmd(cmd, self.repo_directory)
return self._parse_status(stdout, stderr)
def add(self, filename='.'):
cmd = "%s add %s" % (GIT, filename)
stdout,stderr = exec_cmd(cmd, self.repo_directory)
if (stderr != ''):
return NOT_OK,stderr
return OK,stdout
def commit(self, message):
cmd = "%s commit -m \"%s\"" % (GIT, message)
stdout,stderr = exec_cmd(cmd, self.repo_directory)
if (stderr != ''):
return NOT_OK,stderr
return OK,stdout
def commit_all(self, message):
cmd = "%s commit --all -m \"%s\"" % (GIT, message)
stdout,stderr = exec_cmd(cmd, self.repo_directory)
if (stderr != ''):
return NOT_OK,stderr
return OK,stdout
def fetch(self):
cmd = "%s fetch" % GIT
stdout,stderr = exec_cmd(cmd, self.repo_directory)
if (stderr != ''):
return NOT_OK,stderr
return OK,stdout
def merge(self, remote='origin', branch='master'):
cmd = "%s merge %s/%s" % (GIT, remote, branch)
stdout,stderr = exec_cmd(cmd, self.repo_directory)
if ('CONFLICT' in stdout):
return NOT_OK, stdout
return OK,stdout
def get_unmerged(self):
cmd = "%s ls-files -u" % GIT
stdout,stderr = exec_cmd(cmd, self.repo_directory)
return self._parse_ls_files(stdout, stderr)
def save_theirs(self, unmerged, id):
# checkout "theirs" and move to a temporary file
cmd = "%s checkout --theirs %s" % (GIT, unmerged[2][3])
stdout,stderr = exec_cmd(cmd, self.repo_directory)
if (stderr != ''):
return NOT_OK,stderr
src = os.path.join(self.repo_directory, unmerged[2][3])
dst = os.path.join(self.repo_directory, "%s.%s.%s" % (id, unmerged[2][1], unmerged[2][3]))
logging.debug("move: %s -> %s" % (src, dst))
shutil.move(src, dst)
return OK,""
def force_ours(self, unmerged):
# checkout "ours"
cmd = "%s checkout --ours %s" % (GIT, unmerged[1][3])
stdout,stderr = exec_cmd(cmd, self.repo_directory)
if (stderr != ''):
return NOT_OK,stderr
return OK,stdout
def pull(self, remote='origin', branch='master'):
cmd = "%s pull --ff-only %s %s" % (GIT, remote, branch)
stdout,stderr = exec_cmd(cmd, self.repo_directory)
if ('error' in stdout):
return NOT_OK,stdout
return OK,stdout
def push(self, remote='origin', branch='master'):
cmd = "%s push --porcelain %s %s" % (GIT, remote, branch)
stdout,stderr = exec_cmd(cmd, self.repo_directory)
if ('[rejected]' in stdout or 'fatal' in stderr):
return NOT_OK,stdout
return OK,stdout
def _parse_status(self, stdout, stderr):
ret = []
for f in stdout.split("\n"):
if not f == '':
ret.append(tuple(f.split()))
return ret
def _parse_ls_files(self, stdout, stderr):
ret = []
for f in stdout.split("\n"):
if not f == '':
ret.append(tuple(f.split()))
# split into groups of 3
return [ret[i:i+3] for i in range(0, len(ret), 3)]
'''
def handle_conflict(self):
print("CONFLICT!!!:")
# fetch state from remote
cmd = "%s fetch" % GIT
stdout,stderr = exec_cmd(cmd, self.repo_directory)
if (stderr != ''):
return NOT_OK,stderr
# check for unmerged files
cmd = "%s ls-files -u" % GIT
stdout,stderr = exec_cmd(cmd, self.repo_directory)
ls_files = self._parse_ls_files(stdout, stderr)
print(ls_files)
if len(ls_files) >= 3 and len(ls_files[2]) >= 4:
# copy 3 ("their's") to a temporary file
cmd = "%s show %s > %s.%s" % (GIT, ls_files[2][1], ls_files[2][1], ls_files[2][3])
stdout,stderr = exec_cmd(cmd, self.repo_directory)
if (stderr != ''):
return NOT_OK,stderr
# checkout "our's"
cmd = "%s checkout --ours %s" % (GIT, ls_files[2][3])
stdout,stderr = exec_cmd(cmd, self.repo_directory)
if (stderr != ''):
return NOT_OK,stderr
# add and commit "our's"
code,message = self.add(ls_files[2][3])
if (code != OK):
return code,message
# push
code,message = self.push()
if (code != OK):
return code,message
return OK,"Conflict found. File renamed to: %s.%s" % (ls_files[2][1], ls_files[2][3])
return OK,"No conflict detected"
'''
| {
"content_hash": "2ea41f4f86293416fd0d6f36ee3e6ab4",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 98,
"avg_line_length": 31.976047904191617,
"alnum_prop": 0.5383895131086143,
"repo_name": "konker/syncilainen",
"id": "cd33542d89bc12216e06d68c3265c746ed494371",
"size": "5465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vcs/impl/git.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66223"
}
],
"symlink_target": ""
} |
"""Nova base exception handling.
Includes decorator for re-raising Nova-type exceptions.
SHOULD include dedicated exception logging.
"""
import functools
import itertools
import webob.exc
from nova import log as logging
from nova.openstack.common import excutils
LOG = logging.getLogger(__name__)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
class ProcessExecutionError(IOError):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = _('Unexpected error while running command.')
if exit_code is None:
exit_code = '-'
message = _('%(description)s\nCommand: %(cmd)s\n'
'Exit code: %(exit_code)s\nStdout: %(stdout)r\n'
'Stderr: %(stderr)r') % locals()
IOError.__init__(self, message)
def wrap_db_error(f):
def _wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except UnicodeEncodeError:
raise InvalidUnicodeParameter()
except Exception, e:
LOG.exception(_('DB exception wrapped.'))
raise DBError(e)
_wrap.func_name = f.func_name
return _wrap
def wrap_exception(notifier=None, publisher_id=None, event_type=None,
level=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It logs the exception as well as optionally sending
it to the notification system.
"""
# TODO(sandy): Find a way to import nova.notifier.api so we don't have
# to pass it in as a parameter. Otherwise we get a cyclic import of
# nova.notifier.api -> nova.utils -> nova.exception :(
# TODO(johannes): Also, it would be nice to use
# utils.save_and_reraise_exception() without an import loop
def inner(f):
def wrapped(*args, **kw):
try:
return f(*args, **kw)
except Exception, e:
with excutils.save_and_reraise_exception():
if notifier:
payload = dict(args=args, exception=e)
payload.update(kw)
# Use a temp vars so we don't shadow
# our outer definitions.
temp_level = level
if not temp_level:
temp_level = notifier.ERROR
temp_type = event_type
if not temp_type:
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
temp_type = f.__name__
context = get_context_from_function_and_args(f,
args,
kw)
notifier.notify(context, publisher_id, temp_type,
temp_level, payload)
return functools.wraps(f)(wrapped)
return inner
class NovaException(Exception):
"""Base Nova Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.message % kwargs
except Exception as e:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
message = self.message
super(NovaException, self).__init__(message)
class EC2APIError(NovaException):
message = _("Unknown")
def __init__(self, message=None, code=None):
self.msg = message
self.code = code
if code:
outstr = '%s: %s' % (code, message)
else:
outstr = '%s' % message
super(EC2APIError, self).__init__(outstr)
class DBError(NovaException):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(str(inner_exception))
class DecryptionFailure(NovaException):
message = _("Failed to decrypt text")
class ImagePaginationFailed(NovaException):
message = _("Failed to paginate through images from image service")
class VirtualInterfaceCreateException(NovaException):
message = _("Virtual Interface creation failed")
class VirtualInterfaceMacAddressException(NovaException):
message = _("5 attempts to create virtual interface"
"with unique mac address failed")
class GlanceConnectionFailed(NovaException):
message = _("Connection to glance failed") + ": %(reason)s"
class MelangeConnectionFailed(NovaException):
message = _("Connection to melange failed") + ": %(reason)s"
class NotAuthorized(NovaException):
message = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges")
class PolicyNotAuthorized(NotAuthorized):
message = _("Policy doesn't allow %(action)s to be performed.")
class ImageNotAuthorized(NovaException):
message = _("Not authorized for image %(image_id)s.")
class Invalid(NovaException):
message = _("Unacceptable parameters.")
code = 400
class InvalidSnapshot(Invalid):
message = _("Invalid snapshot") + ": %(reason)s"
class VolumeUnattached(Invalid):
message = _("Volume %(volume_id)s is not attached to anything")
class InvalidKeypair(Invalid):
message = _("Keypair data is invalid")
class SfJsonEncodeFailure(NovaException):
message = _("Failed to load data into json format")
class InvalidRequest(Invalid):
message = _("The request is invalid.")
class InvalidSignature(Invalid):
message = _("Invalid signature %(signature)s for user %(user)s.")
class InvalidInput(Invalid):
message = _("Invalid input received") + ": %(reason)s"
class InvalidInstanceType(Invalid):
message = _("Invalid instance type %(instance_type)s.")
class InvalidVolumeType(Invalid):
message = _("Invalid volume type") + ": %(reason)s"
class InvalidVolume(Invalid):
message = _("Invalid volume") + ": %(reason)s"
class InvalidMetadata(Invalid):
message = _("Invalid metadata") + ": %(reason)s"
class InvalidPortRange(Invalid):
message = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
class InvalidIpProtocol(Invalid):
message = _("Invalid IP protocol %(protocol)s.")
class InvalidContentType(Invalid):
message = _("Invalid content type %(content_type)s.")
class InvalidCidr(Invalid):
message = _("Invalid cidr %(cidr)s.")
class InvalidUnicodeParameter(Invalid):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
message = _("%(err)s")
class InvalidAggregateAction(Invalid):
message = _("Cannot perform action '%(action)s' on aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidGroup(Invalid):
message = _("Group not valid. Reason: %(reason)s")
class InstanceInvalidState(Invalid):
message = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
"%(method)s while the instance is in this state.")
class InstanceNotRunning(Invalid):
message = _("Instance %(instance_id)s is not running.")
class InstanceNotSuspended(Invalid):
message = _("Instance %(instance_id)s is not suspended.")
class InstanceNotInRescueMode(Invalid):
message = _("Instance %(instance_id)s is not in rescue mode")
class InstanceSuspendFailure(Invalid):
message = _("Failed to suspend instance") + ": %(reason)s"
class InstanceResumeFailure(Invalid):
message = _("Failed to resume server") + ": %(reason)s."
class InstanceRebootFailure(Invalid):
message = _("Failed to reboot instance") + ": %(reason)s"
class InstanceTerminationFailure(Invalid):
message = _("Failed to terminate instance") + ": %(reason)s"
class ServiceUnavailable(Invalid):
message = _("Service is unavailable at this time.")
class ComputeServiceUnavailable(ServiceUnavailable):
message = _("Compute service is unavailable at this time.")
class UnableToMigrateToSelf(Invalid):
message = _("Unable to migrate instance (%(instance_id)s) "
"to current host (%(host)s).")
class DestinationHostUnavailable(Invalid):
message = _("Destination compute host is unavailable at this time.")
class SourceHostUnavailable(Invalid):
message = _("Original compute host is unavailable at this time.")
class InvalidHypervisorType(Invalid):
message = _("The supplied hypervisor type of is invalid.")
class DestinationHypervisorTooOld(Invalid):
message = _("The instance requires a newer hypervisor version than "
"has been provided.")
class DestinationDiskExists(Invalid):
message = _("The supplied disk path (%(path)s) already exists, "
"it is expected not to exist.")
class InvalidDevicePath(Invalid):
message = _("The supplied device path (%(path)s) is invalid.")
class DeviceIsBusy(Invalid):
message = _("The supplied device (%(device)s) is busy.")
class InvalidCPUInfo(Invalid):
message = _("Unacceptable CPU info") + ": %(reason)s"
class InvalidIpAddressError(Invalid):
message = _("%(address)s is not a valid IP v4/6 address.")
class InvalidVLANTag(Invalid):
message = _("VLAN tag is not appropriate for the port group "
"%(bridge)s. Expected VLAN tag is %(tag)s, "
"but the one associated with the port group is %(pgroup)s.")
class InvalidVLANPortGroup(Invalid):
message = _("vSwitch which contains the port group %(bridge)s is "
"not associated with the desired physical adapter. "
"Expected vSwitch is %(expected)s, but the one associated "
"is %(actual)s.")
class InvalidDiskFormat(Invalid):
message = _("Disk format %(disk_format)s is not acceptable")
class ImageUnacceptable(Invalid):
message = _("Image %(image_id)s is unacceptable: %(reason)s")
class InstanceUnacceptable(Invalid):
message = _("Instance %(instance_id)s is unacceptable: %(reason)s")
class InvalidEc2Id(Invalid):
message = _("Ec2 id %(ec2_id)s is unacceptable.")
class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid).")
class NotFound(NovaException):
message = _("Resource could not be found.")
code = 404
class FlagNotSet(NotFound):
message = _("Required flag %(flag)s not set.")
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class SfAccountNotFound(NotFound):
message = _("Unable to locate account %(account_name)s on "
"Solidfire device")
class VolumeNotFoundForInstance(VolumeNotFound):
message = _("Volume not found for instance %(instance_id)s.")
class VolumeMetadataNotFound(NotFound):
message = _("Volume %(volume_id)s has no metadata with "
"key %(metadata_key)s.")
class NoVolumeTypesFound(NotFound):
message = _("Zero volume types found.")
class VolumeTypeNotFound(NotFound):
message = _("Volume type %(volume_type_id)s could not be found.")
class VolumeTypeNotFoundByName(VolumeTypeNotFound):
message = _("Volume type with name %(volume_type_name)s "
"could not be found.")
class VolumeTypeExtraSpecsNotFound(NotFound):
message = _("Volume Type %(volume_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class VolumeIsBusy(NovaException):
message = _("deleting volume %(volume_name)s that has snapshot")
class SnapshotIsBusy(NovaException):
message = _("deleting snapshot %(snapshot_name)s that has "
"dependent volumes")
class ISCSITargetNotFoundForVolume(NotFound):
message = _("No target id found for volume %(volume_id)s.")
class DiskNotFound(NotFound):
message = _("No disk at %(location)s")
class VolumeDriverNotFound(NotFound):
message = _("Could not find a handler for %(driver_type)s volume.")
class InvalidImageRef(Invalid):
message = _("Invalid image href %(image_href)s.")
class ListingImageRefsNotSupported(Invalid):
message = _("Some images have been stored via hrefs."
" This version of the api does not support displaying image hrefs.")
class ImageNotFound(NotFound):
message = _("Image %(image_id)s could not be found.")
class KernelNotFoundForImage(ImageNotFound):
message = _("Kernel not found for image %(image_id)s.")
class UserNotFound(NotFound):
message = _("User %(user_id)s could not be found.")
class ProjectNotFound(NotFound):
message = _("Project %(project_id)s could not be found.")
class ProjectMembershipNotFound(NotFound):
message = _("User %(user_id)s is not a member of project %(project_id)s.")
class UserRoleNotFound(NotFound):
message = _("Role %(role_id)s could not be found.")
class StorageRepositoryNotFound(NotFound):
message = _("Cannot find SR to read/write VDI.")
class NetworkInUse(NovaException):
message = _("Network %(network_id)s is still in use.")
class NetworkNotCreated(NovaException):
message = _("%(req)s is required to create a network.")
class NetworkNotFound(NotFound):
message = _("Network %(network_id)s could not be found.")
class NetworkNotFoundForBridge(NetworkNotFound):
message = _("Network could not be found for bridge %(bridge)s")
class NetworkNotFoundForUUID(NetworkNotFound):
message = _("Network could not be found for uuid %(uuid)s")
class NetworkNotFoundForCidr(NetworkNotFound):
message = _("Network could not be found with cidr %(cidr)s.")
class NetworkNotFoundForInstance(NetworkNotFound):
message = _("Network could not be found for instance %(instance_id)s.")
class NoNetworksFound(NotFound):
message = _("No networks defined.")
class NetworkNotFoundForProject(NotFound):
message = _("Either Network uuid %(network_uuid)s is not present or "
"is not assigned to the project %(project_id)s.")
class NetworkHostNotSet(NovaException):
message = _("Host is not set to the network (%(network_id)s).")
class NetworkBusy(NovaException):
message = _("Network %(network)s has active ports, cannot delete.")
class DatastoreNotFound(NotFound):
message = _("Could not find the datastore reference(s) which the VM uses.")
class FixedIpNotFound(NotFound):
message = _("No fixed IP associated with id %(id)s.")
class FixedIpNotFoundForAddress(FixedIpNotFound):
message = _("Fixed ip not found for address %(address)s.")
class FixedIpNotFoundForInstance(FixedIpNotFound):
message = _("Instance %(instance_id)s has zero fixed ips.")
class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
message = _("Network host %(host)s has zero fixed ips "
"in network %(network_id)s.")
class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
message = _("Instance %(instance_id)s doesn't have fixed ip '%(ip)s'.")
class FixedIpNotFoundForHost(FixedIpNotFound):
message = _("Host %(host)s has zero fixed ips.")
class FixedIpNotFoundForNetwork(FixedIpNotFound):
message = _("Fixed IP address (%(address)s) does not exist in "
"network (%(network_uuid)s).")
class FixedIpAlreadyInUse(NovaException):
message = _("Fixed IP address %(address)s is already in use.")
class FixedIpInvalid(Invalid):
message = _("Fixed IP address %(address)s is invalid.")
class NoMoreFixedIps(NovaException):
message = _("Zero fixed ips available.")
class NoFixedIpsDefined(NotFound):
message = _("Zero fixed ips could be found.")
class FloatingIpNotFound(NotFound):
message = _("Floating ip not found for id %(id)s.")
class FloatingIpDNSExists(Invalid):
message = _("The DNS entry %(name)s already exists in domain %(domain)s.")
class FloatingIpNotFoundForAddress(FloatingIpNotFound):
message = _("Floating ip not found for address %(address)s.")
class FloatingIpNotFoundForHost(FloatingIpNotFound):
message = _("Floating ip not found for host %(host)s.")
class NoMoreFloatingIps(FloatingIpNotFound):
message = _("Zero floating ips available.")
class FloatingIpAssociated(NovaException):
message = _("Floating ip %(address)s is associated.")
class FloatingIpNotAssociated(NovaException):
message = _("Floating ip %(address)s is not associated.")
class NoFloatingIpsDefined(NotFound):
message = _("Zero floating ips exist.")
class NoFloatingIpInterface(NotFound):
message = _("Interface %(interface)s not found.")
class KeypairNotFound(NotFound):
message = _("Keypair %(name)s not found for user %(user_id)s")
class CertificateNotFound(NotFound):
message = _("Certificate %(certificate_id)s not found.")
class ServiceNotFound(NotFound):
message = _("Service %(service_id)s could not be found.")
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
class ComputeHostNotFound(HostNotFound):
message = _("Compute host %(host)s could not be found.")
class HostBinaryNotFound(NotFound):
message = _("Could not find binary %(binary)s on host %(host)s.")
class AuthTokenNotFound(NotFound):
message = _("Auth token %(token)s could not be found.")
class AccessKeyNotFound(NotFound):
message = _("Access Key %(access_key)s could not be found.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
message = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class QuotaNotFound(NotFound):
message = _("Quota could not be found")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class ProjectQuotaNotFound(QuotaNotFound):
message = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
message = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
message = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
message = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(NovaException):
message = _("Quota exceeded for resources: %(overs)s")
class SecurityGroupNotFound(NotFound):
message = _("Security group %(security_group_id)s not found.")
class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
message = _("Security group %(security_group_id)s not found "
"for project %(project_id)s.")
class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
message = _("Security group with rule %(rule_id)s not found.")
class SecurityGroupExistsForInstance(Invalid):
message = _("Security group %(security_group_id)s is already associated"
" with the instance %(instance_id)s")
class SecurityGroupNotExistsForInstance(Invalid):
message = _("Security group %(security_group_id)s is not associated with"
" the instance %(instance_id)s")
class MigrationNotFound(NotFound):
message = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
message = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class ConsolePoolNotFound(NotFound):
message = _("Console pool %(pool_id)s could not be found.")
class ConsolePoolNotFoundForHostType(NotFound):
message = _("Console pool of type %(console_type)s "
"for compute host %(compute_host)s "
"on proxy host %(host)s not found.")
class ConsoleNotFound(NotFound):
message = _("Console %(console_id)s could not be found.")
class ConsoleNotFoundForInstance(ConsoleNotFound):
message = _("Console for instance %(instance_id)s could not be found.")
class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
message = _("Console for instance %(instance_id)s "
"in pool %(pool_id)s could not be found.")
class ConsoleTypeInvalid(Invalid):
message = _("Invalid console type %(console_type)s ")
class NoInstanceTypesFound(NotFound):
message = _("Zero instance types found.")
class InstanceTypeNotFound(NotFound):
message = _("Instance type %(instance_type_id)s could not be found.")
class InstanceTypeNotFoundByName(InstanceTypeNotFound):
message = _("Instance type with name %(instance_type_name)s "
"could not be found.")
class FlavorNotFound(NotFound):
message = _("Flavor %(flavor_id)s could not be found.")
class CellNotFound(NotFound):
message = _("Cell %(cell_id)s could not be found.")
class SchedulerHostFilterNotFound(NotFound):
message = _("Scheduler Host Filter %(filter_name)s could not be found.")
class SchedulerCostFunctionNotFound(NotFound):
message = _("Scheduler cost function %(cost_fn_str)s could"
" not be found.")
class SchedulerWeightFlagNotFound(NotFound):
message = _("Scheduler weight flag not found: %(flag_name)s")
class InstanceMetadataNotFound(NotFound):
message = _("Instance %(instance_id)s has no metadata with "
"key %(metadata_key)s.")
class InstanceSystemMetadataNotFound(NotFound):
message = _("Instance %(instance_uuid)s has no system metadata with "
"key %(metadata_key)s.")
class InstanceTypeExtraSpecsNotFound(NotFound):
message = _("Instance Type %(instance_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class LDAPObjectNotFound(NotFound):
message = _("LDAP object could not be found")
class LDAPUserNotFound(LDAPObjectNotFound):
message = _("LDAP user %(user_id)s could not be found.")
class LDAPGroupNotFound(LDAPObjectNotFound):
message = _("LDAP group %(group_id)s could not be found.")
class LDAPGroupMembershipNotFound(NotFound):
message = _("LDAP user %(user_id)s is not a member of group %(group_id)s.")
class FileNotFound(NotFound):
message = _("File %(file_path)s could not be found.")
class NoFilesFound(NotFound):
message = _("Zero files could be found.")
class SwitchNotFoundForNetworkAdapter(NotFound):
message = _("Virtual switch associated with the "
"network adapter %(adapter)s not found.")
class NetworkAdapterNotFound(NotFound):
message = _("Network adapter %(adapter)s could not be found.")
class ClassNotFound(NotFound):
message = _("Class %(class_name)s could not be found: %(exception)s")
class NotAllowed(NovaException):
message = _("Action not allowed.")
class GlobalRoleNotAllowed(NotAllowed):
message = _("Unable to use global role %(role_id)s")
class ImageRotationNotAllowed(NovaException):
message = _("Rotation is not allowed for snapshots")
class RotationRequiredForBackup(NovaException):
message = _("Rotation param is required for backup image_type")
#TODO(bcwaldon): EOL this exception!
class Duplicate(NovaException):
pass
class KeyPairExists(Duplicate):
message = _("Key pair %(key_name)s already exists.")
class UserExists(Duplicate):
message = _("User %(user)s already exists.")
class LDAPUserExists(UserExists):
message = _("LDAP user %(user)s already exists.")
class LDAPGroupExists(Duplicate):
message = _("LDAP group %(group)s already exists.")
class LDAPMembershipExists(Duplicate):
message = _("User %(uid)s is already a member of "
"the group %(group_dn)s")
class ProjectExists(Duplicate):
message = _("Project %(project)s already exists.")
class InstanceExists(Duplicate):
message = _("Instance %(name)s already exists.")
class InstanceTypeExists(Duplicate):
message = _("Instance Type %(name)s already exists.")
class VolumeTypeExists(Duplicate):
message = _("Volume Type %(name)s already exists.")
class InvalidSharedStorage(NovaException):
message = _("%(path)s is on shared storage: %(reason)s")
class MigrationError(NovaException):
message = _("Migration error") + ": %(reason)s"
class MalformedRequestBody(NovaException):
message = _("Malformed message body: %(reason)s")
class ConfigNotFound(NotFound):
message = _("Could not find config at %(path)s")
class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s")
class CannotResizeToSameSize(NovaException):
message = _("When resizing, instances must change size!")
class ImageTooLarge(NovaException):
message = _("Image is larger than instance type allows")
class ZoneRequestError(NovaException):
message = _("1 or more Zones could not complete the request")
class InstanceTypeMemoryTooSmall(NovaException):
message = _("Instance type's memory is too small for requested image.")
class InstanceTypeDiskTooSmall(NovaException):
message = _("Instance type's disk is too small for requested image.")
class InsufficientFreeMemory(NovaException):
message = _("Insufficient free memory on compute node to start %(uuid)s.")
class CouldNotFetchMetrics(NovaException):
message = _("Could not fetch bandwidth/cpu/disk metrics for this host.")
class NoValidHost(NovaException):
message = _("No valid host was found. %(reason)s")
class WillNotSchedule(NovaException):
message = _("Host %(host)s is not up or doesn't exist.")
class QuotaError(NovaException):
message = _("Quota exceeded") + ": code=%(code)s"
class TooManyInstances(QuotaError):
message = _("Quota exceeded: already used %(used)d of %(allowed)d"
" instances")
class VolumeSizeTooLarge(QuotaError):
message = _("Maximum volume size exceeded")
class FloatingIpLimitExceeded(QuotaError):
message = _("Maximum number of floating ips exceeded")
class MetadataLimitExceeded(QuotaError):
message = _("Maximum number of metadata items exceeds %(allowed)d")
class OnsetFileLimitExceeded(QuotaError):
message = _("Personality file limit exceeded")
class OnsetFilePathLimitExceeded(QuotaError):
message = _("Personality file path too long")
class OnsetFileContentLimitExceeded(QuotaError):
message = _("Personality file content too long")
class KeypairLimitExceeded(QuotaError):
message = _("Maximum number of key pairs exceeded")
class AggregateError(NovaException):
message = _("Aggregate %(aggregate_id)s: action '%(action)s' "
"caused an error: %(reason)s.")
class AggregateNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s could not be found.")
class AggregateNameExists(Duplicate):
message = _("Aggregate %(aggregate_name)s already exists.")
class AggregateHostNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s has no host %(host)s.")
class AggregateMetadataNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s has no metadata with "
"key %(metadata_key)s.")
class AggregateHostConflict(Duplicate):
message = _("Host %(host)s already member of another aggregate.")
class AggregateHostExists(Duplicate):
message = _("Aggregate %(aggregate_id)s already has host %(host)s.")
class DuplicateSfVolumeNames(Duplicate):
message = _("Detected more than one volume with name %(vol_name)s")
class VolumeTypeCreateFailed(NovaException):
message = _("Cannot create volume_type with "
"name %(name)s and specs %(extra_specs)s")
class InstanceTypeCreateFailed(NovaException):
message = _("Unable to create instance type")
class SolidFireAPIException(NovaException):
message = _("Bad response from SolidFire API")
class SolidFireAPIStatusException(SolidFireAPIException):
message = _("Error in SolidFire API response: status=%(status)s")
class SolidFireAPIDataException(SolidFireAPIException):
message = _("Error in SolidFire API response: data=%(data)s")
class DuplicateVlan(Duplicate):
message = _("Detected existing vlan with id %(vlan)d")
class InstanceNotFound(NotFound):
message = _("Instance %(instance_id)s could not be found.")
class InvalidInstanceIDMalformed(Invalid):
message = _("Invalid id: %(val)s (expecting \"i-...\").")
class CouldNotFetchImage(NovaException):
message = _("Could not fetch image %(image)s")
def get_context_from_function_and_args(function, args, kwargs):
"""Find an arg of type RequestContext and return it.
This is useful in a couple of decorators where we don't
know much about the function we're wrapping.
"""
# import here to avoid circularity:
from nova import context
for arg in itertools.chain(kwargs.values(), args):
if isinstance(arg, context.RequestContext):
return arg
return None
| {
"content_hash": "dad5d26716fb16a4bf43dbfffb0a06e9",
"timestamp": "",
"source": "github",
"line_count": 1092,
"max_line_length": 79,
"avg_line_length": 27.686813186813186,
"alnum_prop": 0.668981940861282,
"repo_name": "josephsuh/extra-specs",
"id": "1e1818e7d044537cc25f3f51befbca938a97ac0b",
"size": "31011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/exception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "6005171"
},
{
"name": "Shell",
"bytes": "26155"
}
],
"symlink_target": ""
} |
"""subcmd_plot.py
Provides the plot subcommand for pdp
(c) The James Hutton Institute 2017-19
Author: Leighton Pritchard
Contact: leighton.pritchard@hutton.ac.uk
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD2 5DA,
Scotland,
UK
The MIT License
Copyright (c) 2017-19 The James Hutton Institute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from diagnostic_primers import plot
from diagnostic_primers.scripts.tools import create_output_directory
def subcmd_plot(args, logger):
"""Generate graphical output for pdp."""
# Create output directory, if required
create_output_directory(args.outdir, args.pl_force, logger)
# Plot marker amplicon scatterplot of distance
if args.markerscatter is not None:
logger.info("Generating scatterplot of marker distances")
plot.markerscatter(args.markerscatter, args.outdir)
| {
"content_hash": "e7fb91c3bcb791872f56c95db9fe9a8d",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 77,
"avg_line_length": 35.24074074074074,
"alnum_prop": 0.7877036258539148,
"repo_name": "widdowquinn/find_differential_primers",
"id": "5caa479758737ee814d85dba26a890646726127e",
"size": "1950",
"binary": false,
"copies": "1",
"ref": "refs/heads/diagnostic_primers",
"path": "diagnostic_primers/scripts/subcommands/subcmd_plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "416651"
},
{
"name": "Shell",
"bytes": "4171"
}
],
"symlink_target": ""
} |
""" Test cases for Usage Test Path
"""
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import (cleanup_resources,
validateList,
verifyRouterState,
get_process_status)
from marvin.lib.base import (Account,
ServiceOffering,
VirtualMachine,
Template,
Iso,
DiskOffering,
Volume,
Snapshot,
PublicIPAddress,
LoadBalancerRule,
EgressFireWallRule,
Router,
VmSnapshot,
Usage,
Configurations,
FireWallRule,
NATRule,
StaticNATRule,
Network,
Vpn,
VpnUser,
VpcOffering,
VPC,
NetworkACL)
from marvin.lib.common import (get_domain,
get_zone,
get_template,
createEnabledNetworkOffering,
get_builtin_template_info,
findSuitableHostForMigration,
list_hosts,
list_volumes,
list_routers)
from marvin.codes import (PASS, FAIL, ERROR_NO_HOST_FOR_MIGRATION)
from marvin.sshClient import SshClient
import time
def CreateEnabledNetworkOffering(apiclient, networkServices):
"""Create network offering of given services and enable it"""
result = createEnabledNetworkOffering(apiclient, networkServices)
assert result[0] == PASS,\
"Network offering creation/enabling failed due to %s" % result[2]
return result[1]
class TestUsage(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestUsage, cls).getClsTestClient()
cls.hypervisor = testClient.getHypervisorInfo()
cls.apiclient = testClient.getApiClient()
cls.testdata = testClient.getParsedTestDataConfig()
cls._cleanup = []
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
isUsageJobRunning = cls.IsUsageJobRunning()
cls.usageJobNotRunning = False
if not isUsageJobRunning:
cls.usageJobNotRunning = True
return
if cls.testdata["configurableData"][
"setUsageConfigurationThroughTestCase"]:
cls.setUsageConfiguration()
cls.RestartServers()
else:
currentMgtSvrTime = cls.getCurrentMgtSvrTime()
dateTimeSplit = currentMgtSvrTime.split("/")
cls.curDate = dateTimeSplit[0]
cls.hypervisor = testClient.getHypervisorInfo()
cls.template = get_template(
cls.apiclient,
cls.zone.id,
cls.testdata["ostype"])
try:
# If local storage is enabled, alter the offerings to use
# localstorage
if cls.zone.localstorageenable:
cls.testdata["service_offering"]["storagetype"] = 'local'
# Create 2 service offerings with different values for
# for cpunumber, cpuspeed, and memory
cls.testdata["service_offering"]["cpunumber"] = "1"
cls.testdata["service_offering"]["cpuspeed"] = "128"
cls.testdata["service_offering"]["memory"] = "256"
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.testdata["service_offering"]["cpunumber"] = "2"
cls.testdata["service_offering"]["cpuspeed"] = "256"
cls.testdata["service_offering"]["memory"] = "512"
cls.service_offering_2 = ServiceOffering.create(
cls.apiclient,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering_2)
# Create isolated network offering
cls.isolated_network_offering = CreateEnabledNetworkOffering(
cls.apiclient,
cls.testdata["isolated_network_offering"]
)
cls._cleanup.append(cls.isolated_network_offering)
cls.isolated_network_offering_2 = CreateEnabledNetworkOffering(
cls.apiclient,
cls.testdata["isolated_network_offering"]
)
cls._cleanup.append(cls.isolated_network_offering_2)
cls.isolated_network_offering_vpc = CreateEnabledNetworkOffering(
cls.apiclient,
cls.testdata["nw_offering_isolated_vpc"]
)
cls._cleanup.append(cls.isolated_network_offering_vpc)
cls.testdata["shared_network_offering_all_services"][
"specifyVlan"] = "True"
cls.testdata["shared_network_offering_all_services"][
"specifyIpRanges"] = "True"
cls.shared_network_offering = CreateEnabledNetworkOffering(
cls.apiclient,
cls.testdata["shared_network_offering_all_services"]
)
cls._cleanup.append(cls.shared_network_offering)
configs = Configurations.list(
cls.apiclient,
name='usage.stats.job.aggregation.range'
)
# Set the value for one more minute than
# actual range to be on safer side
cls.usageJobAggregationRange = (
int(configs[0].value) + 1) * 60 # in seconds
except Exception as e:
cls.tearDownClass()
raise e
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
if self.usageJobNotRunning:
self.skipTest("Skipping test because usage job not running")
# Create an account
self.account = Account.create(
self.apiclient,
self.testdata["account"],
domainid=self.domain.id
)
self.cleanup.append(self.account)
# Create user api client of the account
self.userapiclient = self.testClient.getUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain
)
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def setUsageConfiguration(cls):
""" Set the configuration parameters so that usage job runs
every 10 miuntes """
Configurations.update(
cls.apiclient,
name="enable.usage.server",
value="true"
)
Configurations.update(
cls.apiclient,
name="usage.aggregation.timezone",
value="GMT"
)
Configurations.update(
cls.apiclient,
name="usage.execution.timezone",
value="GMT"
)
Configurations.update(
cls.apiclient,
name="usage.stats.job.aggregation.range",
value="10"
)
currentMgtSvrTime = cls.getCurrentMgtSvrTime()
dateTimeSplit = currentMgtSvrTime.split("/")
cls.curDate = dateTimeSplit[0]
timeSplit = dateTimeSplit[1].split(":")
minutes = int(timeSplit[1])
minutes += 5
usageJobExecTime = timeSplit[0] + ":" + str(minutes)
Configurations.update(
cls.apiclient,
name="usage.stats.job.exec.time",
value=usageJobExecTime
)
return
@classmethod
def getCurrentMgtSvrTime(cls, format='%Y-%m-%d/%H:%M'):
""" Get the current time from Management Server """
sshClient = SshClient(
cls.mgtSvrDetails["mgtSvrIp"],
22,
cls.mgtSvrDetails["user"],
cls.mgtSvrDetails["passwd"]
)
command = "date +%s" % format
return sshClient.execute(command)[0]
@classmethod
def RestartServers(cls):
""" Restart management server and usage server """
sshClient = SshClient(
cls.mgtSvrDetails["mgtSvrIp"],
22,
cls.mgtSvrDetails["user"],
cls.mgtSvrDetails["passwd"]
)
command = "service cloudstack-management restart"
sshClient.execute(command)
command = "service cloudstack-usage restart"
sshClient.execute(command)
return
@classmethod
def IsUsageJobRunning(cls):
""" Check that usage job is running on Management server or not"""
sshClient = SshClient(
cls.mgtSvrDetails["mgtSvrIp"],
22,
cls.mgtSvrDetails["user"],
cls.mgtSvrDetails["passwd"]
)
command = "service cloudstack-usage status"
response = str(sshClient.execute(command)).lower()
if "running" not in response:
return False
return True
def getLatestUsageJobExecutionTime(self):
""" Get the end time of latest usage job that has run successfully"""
try:
qresultset = self.dbclient.execute(
"SELECT max(end_date) FROM usage_job WHERE success=1;",
db="cloud_usage")
self.assertNotEqual(
len(qresultset),
0,
"Check DB Query result set"
)
lastUsageJobExecutionTime = qresultset[0][0]
self.debug(
"last usage job exec time: %s" %
lastUsageJobExecutionTime)
return [PASS, lastUsageJobExecutionTime]
except Exception as e:
return [FAIL, e]
def getEventCreatedDateTime(self, resourceName):
""" Get the created date/time of particular entity
from cloud_usage.usage_event table """
try:
# Checking exact entity creation time
qresultset = self.dbclient.execute(
"select created from usage_event where resource_name = '%s';" %
str(resourceName), db="cloud_usage")
self.assertNotEqual(
len(qresultset),
0,
"Check DB Query result set"
)
eventCreatedDateTime = qresultset[0][0]
except Exception as e:
return [FAIL, e]
return [PASS, eventCreatedDateTime]
def listUsageRecords(self, usagetype, apiclient=None, startdate=None,
enddate=None, account=None, sleep=True):
"""List and return the usage record for given account
and given usage type"""
if sleep:
# Sleep till usage job has run at least once after the operation
self.debug(
"Sleeping for %s seconds" %
self.usageJobAggregationRange)
time.sleep(self.usageJobAggregationRange)
if not startdate:
startdate = self.curDate
if not enddate:
enddate = self.curDate
if not account:
account = self.account
if not apiclient:
self.apiclient
Usage.generateRecords(
self.apiclient,
startdate=startdate,
enddate=enddate)
try:
usageRecords = Usage.listRecords(
self.apiclient,
startdate=startdate,
enddate=enddate,
account=account.name,
domainid=account.domainid,
type=usagetype)
self.assertEqual(
validateList(usageRecords)[0],
PASS,
"usage records list validation failed")
return [PASS, usageRecords]
except Exception as e:
return [FAIL, e]
return
def getCommandResultFromRouter(self, router, command):
"""Run given command on router and return the result"""
if (self.hypervisor.lower() == 'vmware'
or self.hypervisor.lower() == 'hyperv'):
result = get_process_status(
self.apiclient.connection.mgtSvr,
22,
self.apiclient.connection.user,
self.apiclient.connection.passwd,
router.linklocalip,
command,
hypervisor=self.hypervisor
)
else:
hosts = list_hosts(
self.apiclient,
id=router.hostid,
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check for list hosts response return valid data"
)
host = hosts[0]
host.user = self.testdata["configurableData"]["host"]["username"]
host.passwd = self.testdata["configurableData"]["host"]["password"]
result = get_process_status(
host.ipaddress,
22,
host.user,
host.passwd,
router.linklocalip,
command
)
return result
@attr(tags=["advanced"], required_hardware="True")
def test_01_positive_tests_usage(self):
""" Positive test for usage test path
# 1. Register a template and verify that usage usage is generated
for correct size of template
# 2. Register an ISO, verify usage is generate for the correct size
of ISO
# 3. Deploy a VM from the template and verify usage is generated
for the VM with correct Service Offering and template id
# 4. Delete template and iso
# 5. Stop and start the VM
# 6. Verify that allocated VM usage should be greater than
running VM usage
# 7. Destroy the Vm and recover it
# 8. Verify that the running VM usage stays the same after delete and
and after recover operation
# 9. Verify that allocated VM usage should be greater after recover
operation than after destroy operation
# 10. Change service offering of the VM
# 11. Verify that VM usage is generated for the VM with correct
service offering
# 12. Start the VM
# 13. Verify that the running VM usage after start operation is less
than the allocated VM usage
# 14. Verify that the running VM usage after start vm opearation
is greater running VM usage after recover VM operation
"""
# Step 1
# Register a private template in the account
builtin_info = get_builtin_template_info(
self.apiclient,
self.zone.id
)
self.testdata["privatetemplate"]["url"] = builtin_info[0]
self.testdata["privatetemplate"]["hypervisor"] = builtin_info[1]
self.testdata["privatetemplate"]["format"] = builtin_info[2]
# Register new template
template = Template.register(
self.userapiclient,
self.testdata["privatetemplate"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.cleanup.append(template)
template.download(self.userapiclient)
templates = Template.list(
self.userapiclient,
listall=True,
id=template.id,
templatefilter="self")
self.assertEqual(
validateList(templates)[0],
PASS,
"Templates list validation failed")
# Checking template usage
response = self.listUsageRecords(usagetype=7)
self.assertEqual(response[0], PASS, response[1])
templateUsageRecords = [record for record in response[1]
if template.id == record.usageid]
self.assertEqual(templateUsageRecords[0].virtualsize,
templates[0].size,
"The template size in the usage record and \
does not match with the created template size")
# Getting last usage job execution time
response = self.getLatestUsageJobExecutionTime()
self.assertEqual(response[0], PASS, response[1])
lastUsageJobExecTime = response[1]
# Checking exact template creation time
response = self.getEventCreatedDateTime(template.name)
self.assertEqual(response[0], PASS, response[1])
templateCreatedDateTime = response[1]
self.debug("Template creation date: %s" % templateCreatedDateTime)
# We have to get the expected usage count in hours as the rawusage returned by listUsageRecords
# is also in hours
expectedUsage = format(
((lastUsageJobExecTime - templateCreatedDateTime).total_seconds() / 3600),
".2f")
actualUsage = format(sum(float(record.rawusage)
for record in templateUsageRecords), ".2f")
self.assertEqual(
expectedUsage,
actualUsage,
"expected usage %s and actual usage %s not matching" %
(expectedUsage,
actualUsage))
# step 2
iso = Iso.create(
self.userapiclient,
self.testdata["iso"],
account=self.account.name,
domainid=self.account.domainid,
zoneid=self.zone.id
)
self.cleanup.append(iso)
iso.download(self.apiclient)
isos = Iso.list(
self.userapiclient,
id=iso.id,
listall=True)
self.assertEqual(
validateList(isos)[0],
PASS,
"Iso list validation failed"
)
# Checking usage for Iso
response = self.listUsageRecords(usagetype=8)
self.assertEqual(response[0], PASS, response[1])
isoUsageRecords = [record for record in response[1]
if iso.id == record.usageid]
self.assertEqual(isoUsageRecords[0].size,
isos[0].size,
"The iso size in the usage record and \
does not match with the created iso size")
# Getting last usage job execution time
response = self.getLatestUsageJobExecutionTime()
self.assertEqual(response[0], PASS, response[1])
lastUsageJobExecTime = response[1]
# Checking exact Iso creation time
response = self.getEventCreatedDateTime(iso.name)
self.assertEqual(response[0], PASS, response[1])
isoCreatedDateTime = response[1]
self.debug("Iso creation date: %s" % isoCreatedDateTime)
# We have to get the expected usage count in hours as the rawusage returned by listUsageRecords
# is also in hours
expectedUsage = format(
((lastUsageJobExecTime - isoCreatedDateTime).total_seconds() / 3600),
".2f")
actualUsage = format(sum(float(record.rawusage)
for record in isoUsageRecords), ".2f")
self.assertEqual(
expectedUsage,
actualUsage,
"expected usage %s and actual usage %s not matching" %
(expectedUsage,
actualUsage))
# step 3
# Create VM in account
vm = VirtualMachine.create(
self.userapiclient,
self.testdata["small"],
templateid=template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id
)
# Checking running VM usage
response = self.listUsageRecords(usagetype=1)
self.assertEqual(response[0], PASS, response[1])
vmRunningUsageRecords = [record for record in response[1]
if record.virtualmachineid == vm.id]
vmRunningRawUsage = sum(float(record.rawusage)
for record in vmRunningUsageRecords)
self.assertEqual(vmRunningUsageRecords[0].offeringid,
self.service_offering.id,
"The service offering id in the usage record\
does not match with id of service offering\
with which the VM was created")
self.assertEqual(vmRunningUsageRecords[0].templateid,
template.id,
"The template id in the usage record\
does not match with id of template\
with which the VM was created")
response = self.listUsageRecords(usagetype=2, sleep=False)
self.assertEqual(response[0], PASS, response[1])
vmAllocatedUsageRecords = [record for record in response[1]
if record.virtualmachineid == vm.id]
vmAllocatedRawUsage = sum(float(record.rawusage)
for record in vmAllocatedUsageRecords)
self.debug("running vm usage: %s" % vmRunningRawUsage)
self.debug("allocated vm usage: %s" % vmAllocatedRawUsage)
self.assertTrue(
vmRunningRawUsage < vmAllocatedRawUsage,
"Allocated VM usage should be greater than Running VM usage")
# Getting last usage job execution time
response = self.getLatestUsageJobExecutionTime()
self.assertEqual(response[0], PASS, response[1])
lastUsageJobExecTime = response[1]
# Checking exact VM creation time
response = self.getEventCreatedDateTime(vm.name)
self.assertEqual(response[0], PASS, response[1])
vmCreatedDateTime = response[1]
self.debug("Vm creation date: %s" % vmCreatedDateTime)
# We have to get the expected usage count in hours as the rawusage returned by listUsageRecords
# is also in hours
expectedUsage = format(
((lastUsageJobExecTime - vmCreatedDateTime).total_seconds() / 3600),
".2f")
self.debug("VM expected usage: %s" % expectedUsage)
actualUsage = format(vmAllocatedRawUsage, ".2f")
self.assertEqual(
expectedUsage,
actualUsage,
"expected usage %s and actual usage %s not matching" %
(expectedUsage,
actualUsage))
# Step 4 - Deleting template and ISO
template.delete(self.userapiclient)
self.cleanup.remove(template)
iso.delete(self.userapiclient)
self.cleanup.remove(iso)
# Verifying that usage for template and ISO is stopped
response = self.listUsageRecords(usagetype=7)
self.assertEqual(response[0], PASS, response[1])
templateUsageRecords = response[1]
usageForTemplateAfterDeletion_1 = sum(
float(
record.rawusage) for record in [
record for record in templateUsageRecords
if template.id == record.usageid])
response = self.listUsageRecords(usagetype=8, sleep=False)
self.assertEqual(response[0], PASS, response[1])
isoUsageRecords = response[1]
usageForIsoAfterDeletion_1 = sum(
float(
record.rawusage) for record in [
record for record in isoUsageRecords
if iso.id == record.usageid])
response = self.listUsageRecords(usagetype=7)
self.assertEqual(response[0], PASS, response[1])
templateUsageRecords = response[1]
usageForTemplateAfterDeletion_2 = sum(
float(
record.rawusage) for record in [
record for record in templateUsageRecords
if template.id == record.usageid])
response = self.listUsageRecords(usagetype=8, sleep=False)
self.assertEqual(response[0], PASS, response[1])
isoUsageRecords = response[1]
usageForIsoAfterDeletion_2 = sum(
float(
record.rawusage) for record in [
record for record in isoUsageRecords
if iso.id == record.usageid])
self.assertTrue(usageForTemplateAfterDeletion_1 ==
usageForTemplateAfterDeletion_2,
"usage for template after deletion should remain the same\
after specific intervals of time")
self.assertTrue(usageForIsoAfterDeletion_1 ==
usageForIsoAfterDeletion_2,
"usage for iso after deletion should remain the same\
after specific intervals of time")
# Step 5
vm.stop(self.userapiclient)
# Sleep to get difference between allocated and running usage
time.sleep(120)
vm.start(self.userapiclient)
# Step 6: Verifying allocated usage is greater than running usage
response = self.listUsageRecords(usagetype=1)
self.assertEqual(response[0], PASS, response[1])
vmRunningUsageRecords = [record for record in response[1]
if record.virtualmachineid == vm.id]
vmRunningRawUsage = sum(float(record.rawusage)
for record in vmRunningUsageRecords)
response = self.listUsageRecords(usagetype=2, sleep=False)
self.assertEqual(response[0], PASS, response[1])
vmAllocatedUsageRecords = [record for record in response[1]
if record.virtualmachineid == vm.id]
vmAllocatedRawUsage = sum(float(record.rawusage)
for record in vmAllocatedUsageRecords)
self.debug("running vm usage: %s" % vmRunningRawUsage)
self.debug("allocated vm usage: %s" % vmAllocatedRawUsage)
self.assertTrue(
vmRunningRawUsage < vmAllocatedRawUsage,
"Allocated VM usage should be greater than Running VM usage")
# Step 7
vm.delete(self.userapiclient, expunge=False)
response = self.listUsageRecords(usagetype=1, sleep=False)
self.assertEqual(response[0], PASS, response[1])
vmRunningUsageRecordAfterDestroy = sum(
float(
record.rawusage) for record in response[1] if
record.virtualmachineid == vm.id)
response = self.listUsageRecords(usagetype=2, sleep=False)
self.assertEqual(response[0], PASS, response[1])
vmAllocatedUsageRecordAfterDestroy = sum(
float(
record.rawusage) for record in response[1] if record.virtualmachineid == vm.id)
vm.recover(self.apiclient)
# Step 8
response = self.listUsageRecords(usagetype=1)
self.assertEqual(response[0], PASS, response[1])
vmRunningUsageRecordAfterRecover = sum(
float(
record.rawusage) for record in response[1] if
record.virtualmachineid == vm.id)
response = self.listUsageRecords(usagetype=2, sleep=False)
self.assertEqual(response[0], PASS, response[1])
vmAllocatedUsageRecordAfterRecover = sum(
float(
record.rawusage) for record in response[1] if
record.virtualmachineid == vm.id)
self.debug(
"running vm usage T1: %s" %
vmRunningUsageRecordAfterDestroy)
self.debug(
"allocated vm usage T1: %s" %
vmRunningUsageRecordAfterRecover)
self.assertEqual(
format(vmRunningUsageRecordAfterDestroy, ".1f"),
format(vmRunningUsageRecordAfterRecover, ".1f"),
"Running usage should remain the same")
self.debug(
"allocated vm usage T2: %s" %
vmAllocatedUsageRecordAfterDestroy)
self.debug(
"allocated vm usage T2: %s" %
vmAllocatedUsageRecordAfterRecover)
# Step 9
self.assertTrue(
vmAllocatedUsageRecordAfterDestroy <
vmAllocatedUsageRecordAfterRecover,
"Allocated VM usage after recover should be greater than\
before")
# Step 10
# Change service offering of VM and verify that it is changed
vm.change_service_offering(
self.userapiclient,
serviceOfferingId=self.service_offering_2.id
)
response = self.listUsageRecords(usagetype=2)
self.assertEqual(response[0], PASS, response[1])
vmAllocatedUsageRecord = response[1][-1]
# Step 11: Veriying vm usage for new service offering
self.assertEqual(vmAllocatedUsageRecord.offeringid,
self.service_offering_2.id,
"The service offering id in the usage record\
does not match with id of new service offering")
# Step 12
vm.start(self.userapiclient)
response = self.listUsageRecords(usagetype=1)
self.assertEqual(response[0], PASS, response[1])
vmRunningUsageRecordAfterStart = sum(
float(
record.rawusage) for record in response[1] if
record.virtualmachineid == vm.id)
response = self.listUsageRecords(usagetype=2, sleep=False)
self.assertEqual(response[0], PASS, response[1])
vmAllocatedUsageRecordAfterStart = sum(
float(
record.rawusage) for record in response[1] if
record.virtualmachineid == vm.id)
self.debug("running vm usage T3: %s" % vmRunningUsageRecordAfterStart)
self.debug(
"allocated vm usage T3: %s" %
vmAllocatedUsageRecordAfterStart)
# Step 13
self.assertTrue(
vmRunningUsageRecordAfterStart <
vmAllocatedUsageRecordAfterStart,
"Allocated VM usage should be greater than Running usage")
# Step 14
self.assertTrue(
vmRunningUsageRecordAfterRecover <
vmRunningUsageRecordAfterStart,
"Running VM usage after start VM should be greater than\
that after recover operation")
return
@attr(tags=["advanced"], required_hardware="true")
def test_02_positive_tests_usage(self):
""" Positive test for usage test path
# 1. Scale up VM and check that usage is generated for
new cpu and ram value (Check in usage_vm_instance table)
# 2. Scale down VM and check that usage is generated for
new cpu and ram value (Check in usage_vm_instance table)
# 3. Attach disk to VM and check that volume usage is
generated for correct disk offering
# 4. Detach volume from and verify that usage for volue remains
the same there afterwards
# 5. Create snapshot of the root disk and verify correct usage is
generated for snapshot with correct size
# 6. Create template from root disk and check correct usage is
generated for template with correct size
# 7. Delete the template and verify that usage is stopped for
template
# 8. Create volume from snaopshot and verify correct disk usage
is generated
# 9. Delete the volume and verify that the usage is stopped
# 10. Create template from snapshot and verify correct usage
is generated for the template with correct size
"""
# Step 1
# Create dynamic and static service offering
self.testdata["service_offering"]["cpunumber"] = ""
self.testdata["service_offering"]["cpuspeed"] = ""
self.testdata["service_offering"]["memory"] = ""
serviceOffering_dynamic = ServiceOffering.create(
self.apiclient,
self.testdata["service_offering"]
)
self.cleanup.append(serviceOffering_dynamic)
customcpunumber = 1
customcpuspeed = 256
custommemory = 128
# Deploy VM with dynamic service offering
virtualMachine = VirtualMachine.create(
self.userapiclient,
self.testdata["virtual_machine"],
serviceofferingid=serviceOffering_dynamic.id,
templateid=self.template.id,
zoneid=self.zone.id,
accountid=self.account.name,
domainid=self.account.domainid,
customcpunumber=customcpunumber,
customcpuspeed=customcpuspeed,
custommemory=custommemory
)
# Stop VM and verify that it is in stopped state
virtualMachine.stop(self.userapiclient)
scaledcpunumber = 2
scaledcpuspeed = 512
scaledmemory = 256
# Scale up VM
virtualMachine.scale(
self.userapiclient,
serviceOfferingId=serviceOffering_dynamic.id,
customcpunumber=scaledcpunumber,
customcpuspeed=scaledcpuspeed,
custommemory=scaledmemory
)
self.listUsageRecords(usagetype=1)
qresultset = self.dbclient.execute(
"select cpu_cores, memory, cpu_speed from usage_vm_instance where vm_name = '%s';" %
str(virtualMachine.name), db="cloud_usage")
self.assertNotEqual(
len(qresultset),
0,
"Check DB Query result set"
)
dbcpucores = qresultset[-1][0]
dbmemory = qresultset[-1][1]
dbcpuspeed = qresultset[-1][2]
self.assertEqual(int(dbcpucores), scaledcpunumber,
"scaled cpu number not matching with db record")
self.assertEqual(int(dbmemory), scaledmemory,
"scaled memory not matching with db record")
self.assertEqual(int(dbcpuspeed), scaledcpuspeed,
"scaled cpu speed not matching with db record")
scaledcpunumber = 1
scaledcpuspeed = 512
scaledmemory = 256
# Step 2
# Scale down VM
virtualMachine.scale(
self.userapiclient,
serviceOfferingId=serviceOffering_dynamic.id,
customcpunumber=scaledcpunumber,
customcpuspeed=scaledcpuspeed,
custommemory=scaledmemory
)
self.listUsageRecords(usagetype=1)
qresultset = self.dbclient.execute(
"select cpu_cores, memory, cpu_speed from usage_vm_instance where vm_name = '%s';" %
str(virtualMachine.name), db="cloud_usage")
self.assertNotEqual(
len(qresultset),
0,
"Check DB Query result set"
)
dbcpucores = qresultset[-1][0]
dbmemory = qresultset[-1][1]
dbcpuspeed = qresultset[-1][2]
self.assertEqual(int(dbcpucores), scaledcpunumber,
"scaled cpu number not matching with db record")
self.assertEqual(int(dbmemory), scaledmemory,
"scaled memory not matching with db record")
self.assertEqual(int(dbcpuspeed), scaledcpuspeed,
"scaled cpu speed not matching with db record")
disk_offering = DiskOffering.create(
self.apiclient,
self.testdata["disk_offering"]
)
self.cleanup.append(disk_offering)
# Step 3
volume = Volume.create(
self.userapiclient, self.testdata["volume"],
zoneid=self.zone.id, account=self.account.name,
domainid=self.account.domainid, diskofferingid=disk_offering.id
)
# Create VM in account
virtual_machine = VirtualMachine.create(
self.userapiclient,
self.testdata["small"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id
)
virtual_machine.attach_volume(self.userapiclient, volume=volume)
# Verifying usage for Volume - START
response = self.listUsageRecords(usagetype=6)
self.assertEqual(response[0], PASS, response[1])
volumeUsageRecords = [record for record in response[1]
if volume.id == record.usageid]
self.assertTrue(
len(volumeUsageRecords) >= 1,
"Volume usage record for attached volume is not generated")
volumeRawUsageBeforeDetach = sum(float(record.rawusage) for
record in [
record for record in volumeUsageRecords
if volume.id == record.usageid])
# Getting last usage job execution time
response = self.getLatestUsageJobExecutionTime()
self.assertEqual(response[0], PASS, response[1])
lastUsageJobExecTime = response[1]
# Checking exact Volume creation time
response = self.getEventCreatedDateTime(volume.name)
self.assertEqual(response[0], PASS, response[1])
volumeCreatedDateTime = response[1]
self.debug("Volume creation date: %s" % volumeCreatedDateTime)
# We have to get the expected usage count in hours as the rawusage returned by listUsageRecords
# is also in hours
expectedUsage = format(
((lastUsageJobExecTime - volumeCreatedDateTime).total_seconds() / 3600),
".2f")
self.debug("Volume expected usage: %s" % expectedUsage)
actualUsage = format(volumeRawUsageBeforeDetach, ".2f")
self.assertEqual(
expectedUsage,
actualUsage,
"expected usage %s and actual usage %s not matching" %
(expectedUsage,
actualUsage))
# Verifying usage for Volume - END
# Step 4
virtual_machine.detach_volume(self.userapiclient, volume=volume)
# Verifying usage for Volume after detaching - START
response = self.listUsageRecords(usagetype=6)
self.assertEqual(response[0], PASS, response[1])
volumeUsageRecords = response[1]
volumeRawUsageAfterDetach_time_1 = sum(float(record.rawusage) for
record in [
record for record in volumeUsageRecords
if volume.id == record.usageid])
response = self.listUsageRecords(usagetype=6)
self.assertEqual(response[0], PASS, response[1])
volumeUsageRecords = response[1]
volumeRawUsageAfterDetach_time_2 = sum(float(record.rawusage) for
record in [
record for record in volumeUsageRecords
if volume.id == record.usageid])
self.debug(volumeRawUsageAfterDetach_time_1)
self.debug(volumeRawUsageAfterDetach_time_2)
self.assertTrue(
volumeRawUsageAfterDetach_time_1 <
volumeRawUsageAfterDetach_time_2,
"Raw volume usage should continue running after detach operation"
)
# Verifying usage for Volume after detaching - END
volumes = Volume.list(
self.userapiclient,
virtualmachineid=virtual_machine.id,
type='ROOT',
listall=True
)
self.assertEqual(
validateList(volumes)[0],
PASS,
"Volumes list validation failed"
)
rootVolume = volumes[0]
# Step 5
# Create a snapshot from the ROOTDISK
snapshotFromRootVolume = Snapshot.create(
self.userapiclient,
rootVolume.id)
# Verifying usage for Snapshot - START
response = self.listUsageRecords(usagetype=9)
self.assertEqual(response[0], PASS, response[1])
snapshotUsageRecords = [record for record in response[1]
if snapshotFromRootVolume.id == record.usageid]
self.assertEqual(snapshotUsageRecords[0].size,
snapshotFromRootVolume.physicalsize,
"The snapshot size in the usage record and \
does not match with the created snapshot size")
# Getting last usage job execution time
response = self.getLatestUsageJobExecutionTime()
self.assertEqual(response[0], PASS, response[1])
lastUsageJobExecTime = response[1]
# Checking exact snapshot creation time
response = self.getEventCreatedDateTime(snapshotFromRootVolume.name)
self.assertEqual(response[0], PASS, response[1])
snapshotCreatedDateTime = response[1]
self.debug("Snapshot creation date: %s" % snapshotCreatedDateTime)
# We have to get the expected usage count in hours as the rawusage returned by listUsageRecords
# is also in hours
expectedUsage = format(
((lastUsageJobExecTime - snapshotCreatedDateTime).total_seconds() / 3600),
".2f")
actualUsage = format(sum(float(record.rawusage)
for record in snapshotUsageRecords), ".2f")
self.assertEqual(
expectedUsage,
actualUsage,
"expected usage %s and actual usage %s not matching" %
(expectedUsage,
actualUsage))
# Verifying usage for Snapshot - END
virtual_machine.stop(self.userapiclient)
# Step 6
templateFromVolume = Template.create(
self.userapiclient,
self.testdata["templates"],
rootVolume.id,
self.account.name,
self.account.domainid
)
templates = Template.list(
self.userapiclient,
listall=True,
id=templateFromVolume.id,
templatefilter="self"
)
self.assertEqual(
validateList(templates)[0],
PASS,
"templates list validation failed"
)
# Verifying usage for Template - START
response = self.listUsageRecords(usagetype=7)
self.assertEqual(response[0], PASS, response[1])
templateUsageRecords = [record for record in response[1]
if templateFromVolume.id == record.usageid]
self.assertEqual(templateUsageRecords[0].virtualsize,
templates[-1].size,
"The template size in the usage record and \
does not match with the created template size")
templateRawUsage = sum(float(record.rawusage)
for record in templateUsageRecords)
# Getting last usage job execution time
response = self.getLatestUsageJobExecutionTime()
self.assertEqual(response[0], PASS, response[1])
lastUsageJobExecTime = response[1]
# Checking exact Template creation time
response = self.getEventCreatedDateTime(templateFromVolume.name)
self.assertEqual(response[0], PASS, response[1])
templateCreatedDateTime = response[1]
self.debug("Template creation date: %s" % templateCreatedDateTime)
# We have to get the expected usage count in hours as the rawusage returned by listUsageRecords
# is also in hours
expectedUsage = format(
((lastUsageJobExecTime - templateCreatedDateTime).total_seconds() / 3600),
".2f")
self.debug("Template expected usage: %s" % expectedUsage)
actualUsage = format(templateRawUsage, ".2f")
self.assertEqual(
expectedUsage,
actualUsage,
"expected usage %s and actual usage %s not matching" %
(expectedUsage,
actualUsage))
# Verifying usage for Template - END
# Step 7
templateFromVolume.delete(self.userapiclient)
# Verifying usage for Template is stoppd after deleting it - START
response = self.listUsageRecords(usagetype=7)
self.assertEqual(response[0], PASS, response[1])
templateUsageRecords = response[1]
usageForTemplateFromVolumeAfterDeletion_1 = sum(
float(
record.rawusage) for record in [
record for record in templateUsageRecords
if templateFromVolume.id == record.usageid])
response = self.listUsageRecords(usagetype=7)
self.assertEqual(response[0], PASS, response[1])
templateUsageRecords = response[1]
usageForTemplateFromVolumeAfterDeletion_2 = sum(
float(
record.rawusage) for record in [
record for record in templateUsageRecords
if templateFromVolume.id == record.usageid])
self.assertTrue(usageForTemplateFromVolumeAfterDeletion_1 ==
usageForTemplateFromVolumeAfterDeletion_2,
"usage for template after deletion should remain the same\
after specific intervals of time")
# Verifying usage for Template is stoppd after deleting it - END
# Step 8
self.testdata["volume_from_snapshot"]["zoneid"] = self.zone.id
volumeFromSnapshot = Volume.create_from_snapshot(
self.userapiclient,
snapshot_id=snapshotFromRootVolume.id,
services=self.testdata["volume_from_snapshot"],
account=self.account.name,
domainid=self.account.domainid
)
# Verifying usage for Volume from Snapshot - START
response = self.listUsageRecords(usagetype=6)
self.assertEqual(response[0], PASS, response[1])
volumeUsageRecords = response[1]
usageForVolumeFromSnapshotBeforeDeletion = sum(
float(
record.rawusage) for record in [
record for record in volumeUsageRecords
if volumeFromSnapshot.id == record.usageid])
self.debug(usageForVolumeFromSnapshotBeforeDeletion)
# Getting last usage job execution time
response = self.getLatestUsageJobExecutionTime()
self.assertEqual(response[0], PASS, response[1])
lastUsageJobExecTime = response[1]
# Checking exact Volume creation time
response = self.getEventCreatedDateTime(volumeFromSnapshot.name)
self.assertEqual(response[0], PASS, response[1])
volumeCreatedDateTime = response[1]
self.debug("Volume creation date: %s" % volumeCreatedDateTime)
# We have to get the expected usage count in hours as the rawusage returned by listUsageRecords
# is also in hours
expectedUsage = format(
((lastUsageJobExecTime - volumeCreatedDateTime).total_seconds() / 3600),
".2f")
self.debug("Volume expected usage: %s" % expectedUsage)
actualUsage = format(usageForVolumeFromSnapshotBeforeDeletion, ".2f")
self.assertEqual(
expectedUsage,
actualUsage,
"expected usage %s and actual usage %s not matching" %
(expectedUsage,
actualUsage))
# Verifying usage for Volume from Snapshot - END
# Step 9
volumeFromSnapshot.delete(self.userapiclient)
# Verifying usage for Volume from Snapshot is stopped after delete -
# START
response = self.listUsageRecords(usagetype=6)
self.assertEqual(response[0], PASS, response[1])
volumeUsageRecords = response[1]
usageForVolumeFromSnapshotAfterDeletion_1 = sum(
float(
record.rawusage) for record in [
record for record in volumeUsageRecords
if volumeFromSnapshot.id == record.usageid])
response = self.listUsageRecords(usagetype=6)
self.assertEqual(response[0], PASS, response[1])
volumeUsageRecords = response[1]
usageForVolumeFromSnapshotAfterDeletion_2 = sum(
float(
record.rawusage) for record in [
record for record in volumeUsageRecords
if volumeFromSnapshot.id == record.usageid])
self.debug(usageForVolumeFromSnapshotAfterDeletion_1)
self.debug(usageForVolumeFromSnapshotAfterDeletion_2)
self.assertTrue(usageForVolumeFromSnapshotAfterDeletion_1 ==
usageForVolumeFromSnapshotAfterDeletion_2,
"usage for volume after deletion should remain the same\
after specific intervals of time")
# Verifying usage for Volume from Snapshot is stopped after delete -
# END
# Step 10
templateFromSnapshot = Template.create_from_snapshot(
self.userapiclient,
snapshotFromRootVolume,
self.testdata["privatetemplate"]
)
templates = Template.list(
self.userapiclient,
listall=True,
id=templateFromSnapshot.id,
templatefilter="self"
)
self.assertEqual(
validateList(templates)[0],
PASS,
"templates list validation failed"
)
# Verifying usage for Template from Snapshot - START
response = self.listUsageRecords(usagetype=7)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
templateUsageRecords = [record for record in usageRecords
if templateFromSnapshot.id == record.usageid]
self.assertTrue(len(templateUsageRecords) >= 1,
"template usage record list is empty")
self.assertEqual(templateUsageRecords[-1].virtualsize,
templates[0].size,
"The template size in the usage record and \
does not match with the created template size")
templateRawUsage = sum(float(record.rawusage)
for record in templateUsageRecords)
# Getting last usage job execution time
response = self.getLatestUsageJobExecutionTime()
self.assertEqual(response[0], PASS, response[1])
lastUsageJobExecTime = response[1]
# Checking exact Template creation time
response = self.getEventCreatedDateTime(templateFromSnapshot.name)
self.assertEqual(response[0], PASS, response[1])
templateCreatedDateTime = response[1]
self.debug("Template creation date: %s" % templateCreatedDateTime)
# We have to get the expected usage count in hours as the rawusage returned by listUsageRecords
# is also in hours
expectedUsage = format(
((lastUsageJobExecTime - templateCreatedDateTime).total_seconds() / 3600),
".2f")
self.debug("Template expected usage: %s" % expectedUsage)
actualUsage = format(templateRawUsage, ".2f")
self.assertEqual(
expectedUsage,
actualUsage,
"expected usage %s and actual usage %s not matching" %
(expectedUsage,
actualUsage))
# Verifying usage for Template from Snapshot - END
templateFromSnapshot.delete(self.userapiclient)
# Verifying usage for Template from Snapshot is stopped after delete -
# START
response = self.listUsageRecords(usagetype=7)
self.assertEqual(response[0], PASS, response[1])
templateUsageRecords = response[1]
usageForTemplateAfterDeletion_1 = sum(
float(
record.rawusage) for record in [
record for record in templateUsageRecords
if templateFromSnapshot.id == record.usageid])
response = self.listUsageRecords(usagetype=7)
self.assertEqual(response[0], PASS, response[1])
templateUsageRecords = response[1]
usageForTemplateAfterDeletion_2 = sum(
float(
record.rawusage) for record in [
record for record in templateUsageRecords
if templateFromSnapshot.id == record.usageid])
self.assertTrue(usageForTemplateAfterDeletion_1 ==
usageForTemplateAfterDeletion_2,
"usage for volume after deletion should remain the same\
after specific intervals of time")
# Verifying usage for Template from Snapshot is stopped after delete -
# END
snapshotFromRootVolume.delete(self.userapiclient)
# Verifying usage for Snapshot from volume is stopped after delete -
# START
response = self.listUsageRecords(usagetype=9)
self.assertEqual(response[0], PASS, response[1])
templateUsageRecords = response[1]
usageForSnapshotAfterDeletion_1 = sum(
float(
record.rawusage) for record in [
record for record in templateUsageRecords
if snapshotFromRootVolume.id == record.usageid])
response = self.listUsageRecords(usagetype=9)
self.assertEqual(response[0], PASS, response[1])
templateUsageRecords = response[1]
usageForSnapshotAfterDeletion_2 = sum(
float(
record.rawusage) for record in [
record for record in templateUsageRecords
if snapshotFromRootVolume.id == record.usageid])
self.assertTrue(usageForSnapshotAfterDeletion_1 ==
usageForSnapshotAfterDeletion_2,
"usage for volume after deletion should remain the same\
after specific intervals of time")
# Verifying usage for Snapshot from volume is stopped after delete -
# END
return
@attr(tags=["advanced"], required_hardware="true")
def test_03_positive_tests_usage(self):
""" Positive test for usage test path T28 - T35
Steps:
# 1. Add an isolated network to VM and verify that network offering
usage is generated for account
Also verify that IP usage is generated for source NAT IP of
network
# 2. Enabled VPN on source nat IP of default network of VM
# 3. Add two VPN users and check that usage is generated for VPN users
# 4. Acquire public IP in the network and verify that IP usage
is generated for the acquired IP
# 5. Create two PF rules on this IP and verify that PF rules usage
is generated for the account
# 6. Acquire another IP and enabled static NAT on it and create
egress firewall rule on it
# 7. Verify IP usage is generated for above acquired IP
# 8. SSH to VM with above IP and ping to google.com
# 9. Verify that Network bytes usage is generated for account
and it matches with the actual number of bytes
# 10. Repeat the same for other acquired IP
# 11. Delete one of the PF rules and verify that usage is stopped for the PF rule
# 12. Also verify that usage is not stopped for other PF rule which
# is still present
"""
# Step 1
# Create VM in account
virtual_machine = VirtualMachine.create(
self.userapiclient,
self.testdata["small"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id
)
self.testdata["isolated_network"]["zoneid"] = self.zone.id
isolated_network = Network.create(
self.userapiclient,
self.testdata["isolated_network"],
self.account.name,
self.account.domainid,
networkofferingid=self.isolated_network_offering_2.id)
virtual_machine.add_nic(self.userapiclient, isolated_network.id)
# Usages for steps are checked together in batch after the operations are done
# to avoid waiting for usage job to run for each operation separately
# Listing source nat ip of newly added network
ipAddresses = PublicIPAddress.list(
self.apiclient,
associatednetworkid=isolated_network.id,
listall=True)
sourceNatIP = ipAddresses[0]
ipAddressesDefaultNetwork = PublicIPAddress.list(
self.apiclient,
associatednetworkid=virtual_machine.nic[0].networkid,
listall=True)
sourceNatIPDefaultNetwork = ipAddressesDefaultNetwork[0]
# Step 2
# Create VPN for source NAT ip
Vpn.create(self.apiclient,
sourceNatIPDefaultNetwork.id,
account=self.account.name,
domainid=self.account.domainid)
self.debug("Verifying the remote VPN access")
vpns = Vpn.list(self.apiclient,
publicipid=sourceNatIPDefaultNetwork.id,
listall=True)
self.assertEqual(
isinstance(vpns, list),
True,
"List VPNs shall return a valid response"
)
# Step 3:
vpnuser_1 = VpnUser.create(
self.apiclient,
self.testdata["vpn_user"]["username"],
self.testdata["vpn_user"]["password"],
account=self.account.name,
domainid=self.account.domainid,
rand_name=True
)
vpnuser_2 = VpnUser.create(
self.apiclient,
self.testdata["vpn_user"]["username"],
self.testdata["vpn_user"]["password"],
account=self.account.name,
domainid=self.account.domainid,
rand_name=True
)
# Step 4
public_ip_1 = PublicIPAddress.create(
self.userapiclient,
accountid=virtual_machine.account,
zoneid=virtual_machine.zoneid,
domainid=virtual_machine.domainid,
services=self.testdata["server"],
networkid=virtual_machine.nic[0].networkid
)
FireWallRule.create(
self.userapiclient,
ipaddressid=public_ip_1.ipaddress.id,
protocol=self.testdata["fwrule"]["protocol"],
cidrlist=[self.testdata["fwrule"]["cidr"]],
startport=self.testdata["fwrule"]["startport"],
endport=self.testdata["fwrule"]["endport"]
)
# Step 5
self.testdata["natrule"]["startport"] = 22
self.testdata["natrule"]["endport"] = 22
nat_rule_1 = NATRule.create(
self.userapiclient,
virtual_machine,
self.testdata["natrule"],
public_ip_1.ipaddress.id
)
self.testdata["natrule"]["privateport"] = 23
self.testdata["natrule"]["publicport"] = 23
nat_rule_2 = NATRule.create(
self.userapiclient,
virtual_machine,
self.testdata["natrule"],
public_ip_1.ipaddress.id
)
# Usages for above operations are checked here together
# Checking usage for source nat IP of added network
response = self.listUsageRecords(usagetype=13)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
nwOfferingUsageRecords = [
record for record in usageRecords if self.isolated_network_offering_2.id == record.offeringid]
self.assertTrue(validateList(nwOfferingUsageRecords)[0] == PASS,
"IP usage record list validation failed")
self.assertTrue(float(nwOfferingUsageRecords[0].rawusage) > 0,
"Raw usage not started for source NAT ip")
# Checking usage for source nat IP of default VM network
response = self.listUsageRecords(usagetype=3, sleep=False)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
ipUsageRecords = [record for record in usageRecords
if sourceNatIP.id == record.usageid]
self.assertTrue(validateList(ipUsageRecords)[0] == PASS,
"IP usage record list validation failed")
self.assertTrue(float(ipUsageRecords[0].rawusage) > 0,
"Raw usage not started for source NAT ip")
# Checking usage for acquired public IP
response = self.listUsageRecords(usagetype=3, sleep=False)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
ipUsageRecords = [record for record
in usageRecords
if public_ip_1.ipaddress.id == record.usageid
]
self.assertTrue(validateList(ipUsageRecords)[0] == PASS,
"IP usage record list validation failed")
self.assertTrue(float(ipUsageRecords[0].rawusage) > 0,
"Raw usage not started for acquired public ip")
# Checking usage for NAT rules
response = self.listUsageRecords(usagetype=12, sleep=False)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
natRuleUsageRecords = [record for record in usageRecords
if nat_rule_1.id == record.usageid]
self.assertTrue(validateList(natRuleUsageRecords)[0] == PASS,
"NAT rule usage record list validation failed")
self.assertTrue(float(natRuleUsageRecords[0].rawusage) > 0,
"Raw usage not started for nat rule")
natRuleUsageRecords = [record for record in usageRecords
if nat_rule_2.id == record.usageid]
self.assertTrue(validateList(natRuleUsageRecords)[0] == PASS,
"NAT rule usage record list validation failed")
self.assertTrue(float(natRuleUsageRecords[0].rawusage) > 0,
"Raw usage not started for nat rule")
# Checking VPN usage
response = self.listUsageRecords(usagetype=14, sleep=False)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
vpnUserUsageRecords_1 = [record for record in usageRecords
if vpnuser_1.id == record.usageid]
self.assertTrue(validateList(vpnUserUsageRecords_1)[0] == PASS,
"VPN user usage record list validation failed")
vpnuser1_rawusage = sum(float(record.rawusage)
for record in vpnUserUsageRecords_1)
# Getting last usage job execution time
response = self.getLatestUsageJobExecutionTime()
self.assertEqual(response[0], PASS, response[1])
lastUsageJobExecTime = response[1]
# Checking exact VPN user creation time
response = self.getEventCreatedDateTime(vpnuser_1.username)
self.assertEqual(response[0], PASS, response[1])
vpnUserCreatedDateTime = response[1]
self.debug("VPN creation date: %s" % vpnUserCreatedDateTime)
# We have to get the expected usage count in hours as the rawusage returned by listUsageRecords
# is also in hours
expectedUsage = format(
((lastUsageJobExecTime - vpnUserCreatedDateTime).total_seconds() / 3600),
".2f")
self.debug("VPN user expected usage: %s" % expectedUsage)
actualUsage = format(vpnuser1_rawusage, ".2f")
self.assertEqual(
expectedUsage,
actualUsage,
"expected usage %s and actual usage %s not matching" %
(expectedUsage,
actualUsage))
vpnUserUsageRecords_2 = [record for record in usageRecords
if vpnuser_2.id == record.usageid]
self.assertTrue(validateList(vpnUserUsageRecords_2)[0] == PASS,
"VPN user usage record list validation failed")
vpnuser2_rawusage = sum(float(record.rawusage)
for record in vpnUserUsageRecords_2)
# Checking exact VPN user creation time
response = self.getEventCreatedDateTime(vpnuser_2.username)
self.assertEqual(response[0], PASS, response[1])
vpnUserCreatedDateTime = response[1]
self.debug("VPN creation date: %s" % vpnUserCreatedDateTime)
# We have to get the expected usage count in hours as the rawusage returned by listUsageRecords
# is also in hours
expectedUsage = format(
((lastUsageJobExecTime - vpnUserCreatedDateTime).total_seconds() / 3600),
".2f")
self.debug("VPN user expected usage: %s" % expectedUsage)
actualUsage = format(vpnuser2_rawusage, ".2f")
self.assertEqual(
expectedUsage,
actualUsage,
"expected usage %s and actual usage %s not matching" %
(expectedUsage,
actualUsage))
# Acquire another public IP and check usage
public_ip_2 = PublicIPAddress.create(
self.userapiclient,
accountid=virtual_machine.account,
zoneid=virtual_machine.zoneid,
domainid=virtual_machine.domainid,
services=self.testdata["server"],
networkid=virtual_machine.nic[0].networkid
)
# Step 6
# Enabling static Nat for Ip Address associated
StaticNATRule.enable(
self.userapiclient,
ipaddressid=public_ip_2.ipaddress.id,
virtualmachineid=virtual_machine.id,
)
# Step 7
response = self.listUsageRecords(usagetype=3)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
ipUsageRecords = [record for record
in usageRecords
if public_ip_2.ipaddress.id == record.usageid
]
self.assertTrue(validateList(ipUsageRecords)[0] == PASS,
"IP usage record list validation failed")
self.assertTrue(float(ipUsageRecords[0].rawusage) > 0,
"Raw usage not started for public ip")
FireWallRule.create(
self.userapiclient,
ipaddressid=public_ip_2.ipaddress.id,
protocol=self.testdata["fwrule"]["protocol"],
cidrlist=[self.testdata["fwrule"]["cidr"]],
startport=self.testdata["fwrule"]["startport"],
endport=self.testdata["fwrule"]["endport"]
)
EgressFireWallRule.create(
self.userapiclient,
networkid=virtual_machine.nic[0].networkid,
protocol=self.testdata["icmprule"]["protocol"],
type=self.testdata["icmprule"]["icmptype"],
code=self.testdata["icmprule"]["icmpcode"],
cidrlist=self.testdata["icmprule"]["cidrlist"])
# Step 8:
ssh_client = virtual_machine.get_ssh_client(
ipaddress=public_ip_1.ipaddress.ipaddress
)
# Ping Internet and check the bytes received
res = ssh_client.execute("ping -c 1 www.google.com")
self.assertEqual(
str(res).count("1 received"),
1,
"Ping to outside world from VM should be successful"
)
routers = list_routers(
self.apiclient,
networkid=virtual_machine.nic[0].networkid,
listall=True
)
self.assertEqual(
validateList(routers)[0],
PASS,
"Routers list validation failed")
router = routers[0]
result = self.getCommandResultFromRouter(
router,
"iptables -L NETWORK_STATS -n -v -x")
self.debug("iptables -L NETWORK_STATS -n -v -x: %s" % result)
bytesReceivedIptableRows = [record for record in result if
"eth2 eth0" in record]
self.debug("bytes received rows: %s" % bytesReceivedIptableRows)
bytesReceivedOnRouter = sum(
int(record[1]) for record in [x.split() for x in bytesReceivedIptableRows])
self.debug(
"Bytes received extracted from router: %s" %
bytesReceivedOnRouter)
# Step 9:
# Verify that bytes received in usage are equal to
# as shown on router
response = self.listUsageRecords(usagetype=5)
self.assertEqual(response[0], PASS, response[1])
bytesReceivedUsage = sum(
int(record.rawusage) for record in response[1])
self.assertTrue(bytesReceivedUsage ==
bytesReceivedOnRouter,
"Total bytes received usage should be \
equal to bytes received on router")
# Step 10:
# Repeat the same for other public IP
ssh_client = virtual_machine.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress
)
res = ssh_client.execute("ping -c 1 www.google.com")
self.assertEqual(
str(res).count("1 received"),
1,
"Ping to outside world from VM should be successful"
)
result = self.getCommandResultFromRouter(
router,
"iptables -L NETWORK_STATS -n -v -x")
self.debug("iptables -L NETWORK_STATS -n -v -x: %s" % result)
bytesReceivedIptableRows = [record for record in result if
"eth2 eth0" in record]
self.debug("bytes received rows: %s" % bytesReceivedIptableRows)
bytesReceivedOnRouter = sum(
int(record[1]) for record in [x.split() for x in bytesReceivedIptableRows])
self.debug(
"Bytes received extracted from router: %s" %
bytesReceivedOnRouter)
# Step 9:
# Verify that bytes received in usage are equal to
# as shown on router
response = self.listUsageRecords(usagetype=5)
self.assertEqual(response[0], PASS, response[1])
bytesReceivedUsage = sum(
int(record.rawusage) for record in response[1])
self.assertTrue(bytesReceivedUsage ==
bytesReceivedOnRouter,
"Total bytes received usage should be \
equal to bytes received on router")
# Step 11:
# Delete NAT rule and verify that usage is stopped for the NAT rule
nat_rule_1.delete(self.userapiclient)
response = self.listUsageRecords(usagetype=12, sleep=True)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
natRule_1_Usage_t1 = sum(float(record.rawusage) for record
in [record for record in usageRecords
if nat_rule_1.id == record.usageid])
response = self.listUsageRecords(usagetype=12)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
natRule_1_Usage_t2 = sum(float(record.rawusage) for record
in [record for record in usageRecords
if nat_rule_1.id == record.usageid])
self.assertTrue(
natRule_1_Usage_t1 == natRule_1_Usage_t2,
"NAT rule usage should be stopped once the rule is deleted")
# Also verify that usage for other nat rule is running
natRule_2_Usage_t1 = sum(float(record.rawusage) for record
in [record for record in usageRecords
if nat_rule_2.id == record.usageid])
# Step 12:
response = self.listUsageRecords(usagetype=12)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
natRule_2_Usage_t2 = sum(float(record.rawusage) for record
in [record for record in usageRecords
if nat_rule_1.id == record.usageid])
self.assertTrue(natRule_2_Usage_t1 > natRule_2_Usage_t2,
"NAT rule usage for second rule should be running")
return
@attr(tags=["advanced"], required_hardware="true")
def test_04_positive_tests_usage(self):
""" Positive test for usage test path
Steps:
# 1. Create a VM in the account
# 2. Acquire public IP in VM network and verify correct usage
is generated for IP
# 3. Create LB rule for the IP address and verify LB rule usage
is generated for the account
# 4. Create another LB rule with different ports and verify
seperate usage is generated for new LB rule
# 5. Create egress firewall rule for VM and SSH to VM
# 6. Ping external network from the VM and verify that
network byte usage is genrated correctly
# 7. Delete one LB rule and verify that the usage
is stopped for the LB rule
# 8. Stop the network router and
# Verify iptables counters are reset when domR stops
# Verify current_bytes in user_statistics table are moved to
net_bytes
# Verify currnt_bytes becomes zero
# 9. Start the router and
# Verify iptables counters are reset when domR starts
# Verify a diff of total (current_bytes + net_bytes) in previous
aggregation period and current period will give the network usage
"""
# Step 1
# Create VM in account
virtual_machine = VirtualMachine.create(
self.userapiclient,
self.testdata["small"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id
)
# Step 2
public_ip_1 = PublicIPAddress.create(
self.userapiclient,
accountid=virtual_machine.account,
zoneid=virtual_machine.zoneid,
domainid=virtual_machine.domainid,
services=self.testdata["server"]
)
self.testdata["lbrule"]["privateport"] = 22
self.testdata["lbrule"]["publicport"] = 2222
publicport = self.testdata["lbrule"]["publicport"]
# Step 3
# Create LB Rule
lbrule_1 = LoadBalancerRule.create(
self.apiclient,
self.testdata["lbrule"],
ipaddressid=public_ip_1.ipaddress.id,
accountid=self.account.name,
networkid=virtual_machine.nic[0].networkid,
domainid=self.account.domainid)
self.testdata["lbrule"]["privateport"] = 23
self.testdata["lbrule"]["publicport"] = 2223
# Step 4
# Create another LB Rule
lbrule_2 = LoadBalancerRule.create(
self.apiclient,
self.testdata["lbrule"],
ipaddressid=public_ip_1.ipaddress.id,
accountid=self.account.name,
networkid=virtual_machine.nic[0].networkid,
domainid=self.account.domainid)
response = self.listUsageRecords(usagetype=3)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
ipUsageRecords = [record for record in usageRecords
if public_ip_1.ipaddress.id == record.usageid
]
self.assertTrue(validateList(ipUsageRecords)[0] == PASS,
"IP usage record list validation failed")
self.assertTrue(float(ipUsageRecords[0].rawusage) > 0,
"Raw usage not started for ip address")
response = self.listUsageRecords(usagetype=11, sleep=False)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
lbRule_1_UsageRecords = [record for record in usageRecords
if lbrule_1.id == record.usageid]
self.assertTrue(validateList(lbRule_1_UsageRecords)[0] == PASS,
"LB rule usage record list validation failed")
self.assertTrue(float(lbRule_1_UsageRecords[0].rawusage) > 0,
"LB usage not started for nat rule")
lbRule_2_UsageRecords = [record for record in usageRecords
if lbrule_2.id == record.usageid]
self.assertTrue(validateList(lbRule_2_UsageRecords)[0] == PASS,
"LB rule usage record list validation failed")
self.assertTrue(float(lbRule_2_UsageRecords[0].rawusage) > 0,
"LB usage not started for nat rule")
# Step 5
EgressFireWallRule.create(
self.userapiclient,
networkid=virtual_machine.nic[0].networkid,
protocol=self.testdata["icmprule"]["protocol"],
type=self.testdata["icmprule"]["icmptype"],
code=self.testdata["icmprule"]["icmpcode"],
cidrlist=self.testdata["icmprule"]["cidrlist"])
lbrule_1.assign(self.userapiclient, [virtual_machine])
ssh_client = virtual_machine.get_ssh_client(
ipaddress=public_ip_1.ipaddress.ipaddress,
port=publicport
)
# Step 6
res = ssh_client.execute("ping -c 1 www.google.com")
self.assertEqual(
str(res).count("1 received"),
1,
"Ping to outside world from VM should be successful"
)
# Verifying usage for bytes received - START
routers = list_routers(
self.apiclient,
networkid=virtual_machine.nic[0].networkid,
listall=True
)
self.assertEqual(
validateList(routers)[0],
PASS,
"Routers list validation failed")
router = routers[0]
result = self.getCommandResultFromRouter(
router,
"iptables -L NETWORK_STATS -n -v -x")
self.debug("iptables -L NETWORK_STATS -n -v -x: %s" % result)
bytesReceivedIptableRows = [record for record in result if
"eth2 eth0" in record]
self.debug("bytes received rows: %s" % bytesReceivedIptableRows)
bytesReceivedOnRouter = sum(
int(record[1]) for record in [x.split() for x in bytesReceivedIptableRows])
self.debug(
"Bytes received extracted from router: %s" %
bytesReceivedOnRouter)
# Verify that bytes received in usage are equal to
# as shown on router
response = self.listUsageRecords(usagetype=5)
self.assertEqual(response[0], PASS, response[1])
bytesReceivedUsage = sum(
int(record.rawusage) for record in response[1])
self.assertTrue(bytesReceivedUsage ==
bytesReceivedOnRouter,
"Total bytes received usage should be \
equal to bytes received on router")
# Verifying usage for bytes received - END
lbrule_1.delete(self.userapiclient)
# Step 7 Verify that usage is stopped for the LB rule
response = self.listUsageRecords(usagetype=11)
self.assertEqual(response[0], PASS, response[1])
lbUsageRecords = response[1]
usageForLbRuleAfterDeletion_t1 = sum(
float(
record.rawusage) for record in [
record for record in lbUsageRecords
if lbrule_1.id == record.usageid])
response = self.listUsageRecords(usagetype=11)
self.assertEqual(response[0], PASS, response[1])
lbUsageRecords = response[1]
usageForLbRuleAfterDeletion_t2 = sum(
float(
record.rawusage) for record in [
record for record in lbUsageRecords
if lbrule_1.id == record.usageid])
self.assertTrue(usageForLbRuleAfterDeletion_t1 ==
usageForLbRuleAfterDeletion_t2,
"usage for LB rule after deletion should remain the same\
after specific intervals of time")
qresultset = self.dbclient.execute(
"select id from account where account_name = '%s';"
% self.account.name
)
accountid = qresultset[0][0]
self.debug("accountid: %s" % accountid)
qresultset = self.dbclient.execute(
"select current_bytes_sent, current_bytes_received from user_statistics where account_id = '%s';" %
accountid,
db="cloud_usage")[0]
currentBytesSentBeforeRouterStop = qresultset[0]
currentBytesReceivedBeforeRouterStop = qresultset[1]
self.debug(currentBytesSentBeforeRouterStop)
self.debug(currentBytesReceivedBeforeRouterStop)
# Step 8
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
)
self.assertEqual(
validateList(routers)[0],
PASS,
"Check for list routers response return valid data"
)
router = routers[0]
# Stop the router
Router.stop(
self.apiclient,
id=router.id
)
response = verifyRouterState(
self.apiclient,
router.id,
"stopped")
self.assertEqual(response[0], PASS, response[1])
qresultset = self.dbclient.execute(
"select current_bytes_sent, current_bytes_received, net_bytes_sent, net_bytes_received from user_statistics where account_id = '%s';" %
accountid,
db="cloud_usage")[0]
currentBytesSentAfterRouterStop = int(qresultset[0])
currentBytesReceivedAfterRouterStop = int(qresultset[1])
netBytesSentAfterRouterStop = int(qresultset[0])
netBytesReceivedAfterRouterStop = int(qresultset[1])
self.debug(currentBytesSentAfterRouterStop)
self.debug(currentBytesReceivedAfterRouterStop)
self.debug(netBytesSentAfterRouterStop)
self.debug(netBytesReceivedAfterRouterStop)
self.assertTrue(
(currentBytesSentAfterRouterStop +
currentBytesReceivedAfterRouterStop) == 0,
"Current bytes should be 0")
self.assertTrue(
(currentBytesSentBeforeRouterStop +
currentBytesReceivedBeforeRouterStop) == (
netBytesSentAfterRouterStop +
netBytesReceivedAfterRouterStop),
"current bytes should be moved to net bytes")
# TODO: Verify iptables counters are reset when domR starts
# Step 9
# Start the router
Router.start(
self.apiclient,
id=router.id
)
response = verifyRouterState(
self.apiclient,
router.id,
"running")
self.assertEqual(response[0], PASS, response[1])
# TODO: Verify iptables counters are reset when domR starts
# Verify a diff of total (current_bytes + net_bytes) in previous
# aggregation period and current period will give the network usage
return
@attr(tags=["advanced"], required_hardware="true")
def test_05_positive_tests_usage(self):
""" Positive test for usage test path T61 - T62
Steps:
# 1. Deploy a VM
# 2. Take Vm snapshot and verify usage is generated for VM snapshot
# 3. Delete VM snapshot and verify that usage stops
"""
time.sleep(180)
if self.hypervisor.lower() in ['kvm', 'hyperv']:
self.skipTest("This feature is not supported on %s" %
self.hypervisor)
# Step 1
# Create VM in account
virtual_machine = VirtualMachine.create(
self.userapiclient,
self.testdata["small"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id
)
# Step 2
vmsnapshot = VmSnapshot.create(
self.userapiclient,
virtual_machine.id)
response = self.listUsageRecords(usagetype=25)
self.assertEqual(response[0], PASS, response[1])
# Step 3
VmSnapshot.deleteVMSnapshot(
self.userapiclient,
vmsnapshot.id
)
response = self.listUsageRecords(usagetype=25)
self.assertEqual(response[0], PASS, response[1])
vmSnapshotUsageRecords_t1 = response[1]
vmSnapshotUsage_t1 = sum(float(record.rawusage)
for record in vmSnapshotUsageRecords_t1)
response = self.listUsageRecords(usagetype=25)
self.assertEqual(response[0], PASS, response[1])
vmSnapshotUsageRecords_t2 = response[1]
vmSnapshotUsage_t2 = sum(float(record.rawusage)
for record in vmSnapshotUsageRecords_t2)
self.debug(vmSnapshotUsage_t1)
self.debug(vmSnapshotUsage_t2)
self.assertEqual(
vmSnapshotUsage_t1,
vmSnapshotUsage_t2,
"VmSnapshot usage should remain the same\
once snapshot is deleted")
return
@attr(tags=["advanced"], required_hardware="true")
def test_06_positive_tests_usage(self):
"""Migrate VM and verify usage"""
# Validate the following
# 1. Create a VM, and verify that usage is generated for it
# with correct service offering and template id
# 2. Migrate the VM to suitable host
# 3. Verify that after migration, VM usage continues to be running
if self.hypervisor.lower() in ['lxc']:
self.skipTest(
"vm migrate feature is not supported on %s" %
self.hypervisor.lower())
# Step 1:
self.vm = VirtualMachine.create(
self.userapiclient,
self.testdata["small"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id
)
response = self.listUsageRecords(usagetype=1)
self.assertEqual(response[0], PASS, response[1])
vmUsageRecord = response[1][0]
self.assertEqual(vmUsageRecord.offeringid,
self.service_offering.id,
"The service offering id in the usage record\
does not match with id of service offering\
with which the VM was created")
self.assertEqual(vmUsageRecord.templateid,
self.template.id,
"The template id in the usage record\
does not match with id of template\
with which the VM was created")
# Step 2:
host = findSuitableHostForMigration(self.apiclient, self.vm.id)
if host is None:
self.skipTest(ERROR_NO_HOST_FOR_MIGRATION)
try:
self.vm.migrate(self.apiclient, host.id)
except Exception as e:
self.fail("Failed to migrate instance: %s" % e)
# Step 3:
response = self.listUsageRecords(usagetype=1)
self.assertEqual(response[0], PASS, response[1])
vmUsageRecords_t1 = response[1]
vmUsage_t1 = sum(float(record.rawusage)
for record in vmUsageRecords_t1)
response = self.listUsageRecords(usagetype=1)
self.assertEqual(response[0], PASS, response[1])
vmUsageRecords_t2 = response[1]
vmUsage_t2 = sum(float(record.rawusage)
for record in vmUsageRecords_t2)
self.debug(vmUsage_t1)
self.debug(vmUsage_t2)
self.assertTrue(
vmUsage_t1 <
vmUsage_t2,
"Vm usage should be running after\
vm is migrated")
@attr(tags=["advanced"], required_hardware="true")
def test_07_positive_tests_usage(self):
"""
Steps:
# 1. Add VM in VPC network, verify that
# usage is genrated for source nat ip pf network in vpc
# 2. Acquire a public ip in VPC network and verify
usage is genrated for the public ip
# 3. Create multiple PF rule on this ip in VPC network,
and verify that usage is generated for both pf rules
# 4. Enable vpn on source nat ip in vpc network
# 5. Add 2 vpn user
And verify that usage is genrated for both the vpn users
# 6. Delete one VPn user, and verify that usage is stopped
for deleted user
# 7. Open Egress rules on this VPC network
# 8. Create network traffic on this network ping www.google.com,
and verify that usage is genrated for network traffic
# 9. Delete onePF rule in VPC network
And verify that usage is stopped for the pf rule
# 10. Stop router for VPC network
Verify iptables counters are reset when domR stops
# Verify current_bytes in user_statistics table are moved to
net_bytes
# Verify currnt_bytes becomes zero
# 11. Start router for VPC network
Verify iptables counters are reset when domR starts
# Verify a diff of total (current_bytes + net_bytes) in previous
aggregation period and current period will give the network usage
"""
# Step 1
# Create VM in account
vpc_off = VpcOffering.create(
self.apiclient,
self.testdata["vpc_offering"]
)
vpc_off.update(self.apiclient, state='Enabled')
self.testdata["vpc"]["cidr"] = '10.1.1.0/24'
vpc = VPC.create(
self.userapiclient,
self.testdata["vpc"],
vpcofferingid=vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.testdata["isolated_network"]["zoneid"] = self.zone.id
isolated_network = Network.create(
self.userapiclient,
self.testdata["isolated_network"],
self.account.name,
self.account.domainid,
vpcid=vpc.id,
networkofferingid=self.isolated_network_offering_vpc.id,
gateway="10.1.1.1",
netmask="255.255.255.0")
# Create VM in account
virtual_machine = VirtualMachine.create(
self.userapiclient,
self.testdata["small"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id,
networkids=[isolated_network.id]
)
# Checking usage for newly added network in VPC
response = self.listUsageRecords(usagetype=13)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
nwOfferingUsageRecords = [
record for record in usageRecords if self.isolated_network_offering_vpc.id == record.offeringid]
self.assertTrue(validateList(nwOfferingUsageRecords)[0] == PASS,
"Network Offering usage record list validation failed")
self.assertTrue(float(nwOfferingUsageRecords[0].rawusage) > 0,
"Raw usage not started for isolated network offering")
# Step 2 (Verification of usage is done together for
# multiple steps)
# Acquiring public IP
public_ip = PublicIPAddress.create(self.userapiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=isolated_network.id,
vpcid=vpc.id
)
# Step 3
# Create NAT rule
nat_rule_1 = NATRule.create(self.userapiclient,
virtual_machine,
self.testdata["natrule"],
ipaddressid=public_ip.ipaddress.id,
openfirewall=False,
networkid=isolated_network.id,
vpcid=vpc.id
)
self.debug("Adding NetworkACL rules to make NAT rule accessible")
NetworkACL.create(self.userapiclient,
networkid=isolated_network.id,
services=self.testdata["natrule"],
traffictype='Ingress'
)
# Step 7:
NetworkACL.create(self.userapiclient,
networkid=isolated_network.id,
services=self.testdata["natrule"],
traffictype='Egress'
)
self.testdata["natrule"]["privateport"] = 23
self.testdata["natrule"]["publicport"] = 23
nat_rule_2 = NATRule.create(self.userapiclient,
virtual_machine,
self.testdata["natrule"],
ipaddressid=public_ip.ipaddress.id,
openfirewall=False,
networkid=isolated_network.id,
vpcid=vpc.id
)
ipAddresses = PublicIPAddress.list(
self.userapiclient,
vpcid=vpc.id,
issourcenat=True,
listall=True,
forvirtualnetwork=True)
sourceNatIP = ipAddresses[0]
# Usage verification section
# Checking source nat IP usage
response = self.listUsageRecords(usagetype=3)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
ipUsageRecords = [record for record in usageRecords
if sourceNatIP.id == record.usageid]
self.assertTrue(validateList(ipUsageRecords)[0] == PASS,
"IP usage record list validation failed")
self.assertTrue(float(ipUsageRecords[0].rawusage) > 0,
"Raw usage not started for source NAT ip")
# Checking public IP usage
ipUsageRecords = [record for record in usageRecords
if public_ip.ipaddress.id == record.usageid]
self.assertTrue(validateList(ipUsageRecords)[0] == PASS,
"IP usage record list validation failed")
self.assertTrue(float(ipUsageRecords[0].rawusage) > 0,
"Raw usage not started for source NAT ip")
# Verifying NAT rule usage
response = self.listUsageRecords(usagetype=12, sleep=False)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
natRuleUsageRecords = [record for record in usageRecords
if nat_rule_1.id == record.usageid]
self.assertTrue(validateList(natRuleUsageRecords)[0] == PASS,
"NAT rule usage record list validation failed")
self.assertTrue(float(natRuleUsageRecords[0].rawusage) > 0,
"Raw usage not started for nat rule")
natRuleUsageRecords = [record for record in usageRecords
if nat_rule_2.id == record.usageid]
self.assertTrue(validateList(natRuleUsageRecords)[0] == PASS,
"NAT rule usage record list validation failed")
self.assertTrue(float(natRuleUsageRecords[0].rawusage) > 0,
"Raw usage not started for nat rule")
# Step 4:
# Create VPN for source NAT ip
Vpn.create(self.apiclient,
sourceNatIP.id,
account=self.account.name,
domainid=self.account.domainid)
self.debug("Verifying the remote VPN access")
vpns = Vpn.list(self.apiclient,
publicipid=sourceNatIP.id,
listall=True)
self.assertEqual(
isinstance(vpns, list),
True,
"List VPNs shall return a valid response"
)
# Step 5:
vpnuser_1 = VpnUser.create(
self.apiclient,
self.testdata["vpn_user"]["username"],
self.testdata["vpn_user"]["password"],
account=self.account.name,
domainid=self.account.domainid,
rand_name=True
)
vpnuser_2 = VpnUser.create(
self.apiclient,
self.testdata["vpn_user"]["username"],
self.testdata["vpn_user"]["password"],
account=self.account.name,
domainid=self.account.domainid,
rand_name=True
)
# Checking VPN usage
response = self.listUsageRecords(usagetype=14)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
vpnUserUsageRecords_1 = [record for record in usageRecords
if vpnuser_1.id == record.usageid]
vpnuser1_rawusage = sum(float(record.rawusage)
for record in vpnUserUsageRecords_1)
# Getting last usage job execution time
response = self.getLatestUsageJobExecutionTime()
self.assertEqual(response[0], PASS, response[1])
lastUsageJobExecTime = response[1]
# Checking exact VPN user creation time
response = self.getEventCreatedDateTime(vpnuser_1.username)
self.assertEqual(response[0], PASS, response[1])
vpnUserCreatedDateTime = response[1]
self.debug("VPN creation date: %s" % vpnUserCreatedDateTime)
# We have to get the expected usage count in hours as the rawusage returned by listUsageRecords
# is also in hours
expectedUsage = format(
((lastUsageJobExecTime - vpnUserCreatedDateTime).total_seconds() / 3600),
".2f")
self.debug("VPN user expected usage: %s" % expectedUsage)
actualUsage = format(vpnuser1_rawusage, ".2f")
self.assertEqual(
expectedUsage,
actualUsage,
"expected usage %s and actual usage %s not matching" %
(expectedUsage,
actualUsage))
vpnUserUsageRecords_2 = [record for record in usageRecords
if vpnuser_2.id == record.usageid]
self.assertTrue(validateList(vpnUserUsageRecords_2)[0] == PASS,
"VPN user usage record list validation failed")
vpnuser2_rawusage = sum(float(record.rawusage)
for record in vpnUserUsageRecords_2)
# Checking exact VPN user creation time
response = self.getEventCreatedDateTime(vpnuser_2.username)
self.assertEqual(response[0], PASS, response[1])
vpnUserCreatedDateTime = response[1]
self.debug("VPN creation date: %s" % vpnUserCreatedDateTime)
# We have to get the expected usage count in hours as the rawusage returned by listUsageRecords
# is also in hours
expectedUsage = format(
((lastUsageJobExecTime - vpnUserCreatedDateTime).total_seconds() / 3600),
".2f")
self.debug("VPN user expected usage: %s" % expectedUsage)
actualUsage = format(vpnuser2_rawusage, ".2f")
self.assertEqual(
expectedUsage,
actualUsage,
"expected usage %s and actual usage %s not matching" %
(expectedUsage,
actualUsage))
# Step 6:
vpnuser_1.delete(self.apiclient)
# Verify that VPN usage for user stopped
response = self.listUsageRecords(usagetype=14)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
vpnuser_1_Usage_t1 = sum(float(record.rawusage) for record
in [record for record in usageRecords
if vpnuser_1.id == record.usageid])
response = self.listUsageRecords(usagetype=14)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
vpnuser_1_Usage_t2 = sum(float(record.rawusage) for record
in [record for record in usageRecords
if vpnuser_1.id == record.usageid])
self.assertTrue(
vpnuser_1_Usage_t1 == vpnuser_1_Usage_t2,
"vpn user usage should be stopped once the user is deleted")
# Step 7:
# Step 8:
ssh_client = virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress
)
res = ssh_client.execute("ping -c 1 www.google.com")
self.assertEqual(
str(res).count("1 received"),
1,
"Ping to outside world from VM should be successful"
)
# Verifying usage for bytes received - START
routers = list_routers(
self.apiclient,
networkid=isolated_network.id,
listall=True
)
self.assertEqual(
validateList(routers)[0],
PASS,
"Routers list validation failed")
router = routers[0]
result = self.getCommandResultFromRouter(
router,
"iptables -L NETWORK_STATS -n -v -x")
self.debug("iptables -L NETWORK_STATS -n -v -x: %s" % result)
bytesReceivedIptableRows = [record for record in result if
"eth2 eth0" in record]
self.debug("bytes received rows: %s" % bytesReceivedIptableRows)
bytesReceivedOnRouter = sum(
int(record[1]) for record in [x.split() for x in bytesReceivedIptableRows])
self.debug(
"Bytes received extracted from router: %s" %
bytesReceivedOnRouter)
# Verify that bytes received in usage are equal to
# as shown on router
response = self.listUsageRecords(usagetype=5)
self.assertEqual(response[0], PASS, response[1])
bytesReceivedUsage = sum(
int(record.rawusage) for record in response[1])
self.assertTrue(bytesReceivedUsage ==
bytesReceivedOnRouter,
"Total bytes received usage should be \
equal to bytes received on router")
# Verifying usage for bytes received - END
# Step 9:
# Delete one NAT rule
nat_rule_2.delete(self.userapiclient)
response = self.listUsageRecords(usagetype=12)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
natRule_2_Usage_t1 = sum(float(record.rawusage) for record
in [record for record in usageRecords
if nat_rule_2.id == record.usageid])
response = self.listUsageRecords(usagetype=12)
self.assertEqual(response[0], PASS, response[1])
usageRecords = response[1]
natRule_2_Usage_t2 = sum(float(record.rawusage) for record
in [record for record in usageRecords
if nat_rule_2.id == record.usageid])
self.assertTrue(
natRule_2_Usage_t1 == natRule_2_Usage_t2,
"NAT rule usage should be stopped once the rule is deleted")
# Step 10:
qresultset = self.dbclient.execute(
"select id from account where account_name = '%s';"
% self.account.name
)
accountid = qresultset[0][0]
self.debug("accountid: %s" % accountid)
qresultset = self.dbclient.execute(
"select current_bytes_sent, current_bytes_received from user_statistics where account_id = '%s';" %
accountid,
db="cloud_usage")[0]
currentBytesSentBeforeRouterStop = qresultset[0]
currentBytesReceivedBeforeRouterStop = qresultset[1]
self.debug(currentBytesSentBeforeRouterStop)
self.debug(currentBytesReceivedBeforeRouterStop)
# Stop the VPC Router
routers = Router.list(
self.api_client,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"List Routers should return a valid list"
)
router = routers[0]
self.debug("Stopping the router with ID: %s" % router.id)
Router.stop(
self.apiclient,
id=router.id
)
response = verifyRouterState(
self.apiclient,
router.id,
"stopped")
self.assertEqual(response[0], PASS, response[1])
# TODO: Verify iptables counters are reset when domR stops
qresultset = self.dbclient.execute(
"select current_bytes_sent, current_bytes_received, net_bytes_sent, net_bytes_received from user_statistics where account_id = '%s';" %
accountid,
db="cloud_usage")[0]
currentBytesSentAfterRouterStop = int(qresultset[0])
currentBytesReceivedAfterRouterStop = int(qresultset[1])
netBytesSentAfterRouterStop = int(qresultset[0])
netBytesReceivedAfterRouterStop = int(qresultset[1])
self.debug(currentBytesSentAfterRouterStop)
self.debug(currentBytesReceivedAfterRouterStop)
self.debug(netBytesSentAfterRouterStop)
self.debug(netBytesReceivedAfterRouterStop)
self.assertTrue(
(currentBytesSentAfterRouterStop +
currentBytesReceivedAfterRouterStop) == 0,
"Current bytes should be 0")
self.assertTrue(
(currentBytesSentBeforeRouterStop +
currentBytesReceivedBeforeRouterStop) == (
netBytesSentAfterRouterStop +
netBytesReceivedAfterRouterStop),
"current bytes should be moved to net bytes")
# Step 11:
# Start the router
Router.start(
self.apiclient,
id=router.id
)
response = verifyRouterState(
self.apiclient,
router.id,
"running")
self.assertEqual(response[0], PASS, response[1])
# TODO
# Verify iptables counters are reset when domR starts
# Verify a diff of total (current_bytes + net_bytes) in previous
# aggregation period and current period will give the network usage
return
@attr(tags=["advanced", "basic"], required_hardware="false")
def test_08_checkNewVolumein_listUsageRecords(self):
""" Test case to check if new volume crated after
restore VM is listed in listUsageRecords
# 1. Launch a VM
# 2. Restore the VM
# 3. Check if the new volume created is listed in listUsageRecords API
"""
# Step 1
vm = VirtualMachine.create(
self.userapiclient,
self.testdata["small"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id,
)
volumes_root_list = list_volumes(
self.apiclient,
virtualmachineid=vm.id,
type='ROOT',
listall=True
)
list_validation = validateList(volumes_root_list)
self.assertEqual(
list_validation[0],
PASS,
"Volume list validation failed due to %s" %
list_validation[2])
root_volume = volumes_root_list[0]
# Step 2
vm.restore(self.apiclient)
qresultset = self.dbclient.execute(
"select id from volumes where name='%s' and state='Ready';" %
root_volume.name)
db_list_validation = validateList(qresultset)
self.assertEqual(
db_list_validation[0],
PASS,
"Database list validation failed due to %s" %
db_list_validation[2])
self.assertNotEqual(
len(qresultset),
0,
"Check DB Query result set"
)
volumeCheck = "Volume Id: " + str(qresultset[0][0]) + " usage time"
response = self.listUsageRecords(usagetype=6)
self.assertEqual(response[0], PASS, response[1])
UsageRecords = [record for record in response[1]
if volumeCheck in record.description]
# Step 3
if not UsageRecords:
self.fail(
"listUsageRecords not returning usage for newly created volume")
class TestUsageDataAggregatior(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestUsageDataAggregatior, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.cleanup = []
def listUsageTypes(self, apiclient=None):
""" List Usage Types
"""
try:
usageTypes = Usage.listTypes(
self.apiclient
)
self.assertEqual(
validateList(usageTypes)[0],
PASS,
"usage types list validation failed")
return [PASS, usageTypes]
except Exception as e:
return [FAIL, e]
return
@attr(tags=["advanced"], required_hardware="true")
def test_01_positive_tests_usagetypes_listTypes(self):
""" 1. List Usage Types
2. Verify Usage Id and Type mapping
"""
usageTypes = [
{
"usagetypeid": 1, "description": u'Running Vm Usage'}, {
"usagetypeid": 2, "description": u'Allocated Vm Usage'}, {
"usagetypeid": 3, "description": u'IP Address Usage'}, {
"usagetypeid": 4, "description": u'Network Usage (Bytes Sent)'}, {
"usagetypeid": 5, "description": u'Network Usage (Bytes Received)'}, {
"usagetypeid": 6, "description": u'Volume Usage'}, {
"usagetypeid": 7, "description": u'Template Usage'}, {
"usagetypeid": 8, "description": u'ISO Usage'}, {
"usagetypeid": 9, "description": u'Snapshot Usage'}, {
"usagetypeid": 11, "description": u'Load Balancer Usage'}, {
"usagetypeid": 12, "description": u'Port Forwarding Usage'}, {
"usagetypeid": 13, "description": u'Network Offering Usage'}, {
"usagetypeid": 14, "description": u'VPN users usage'
}
]
listTypes = []
response = self.listUsageTypes()
respTypes = response[1]
for res in respTypes:
dictTypes = {
"usagetypeid": res.usagetypeid,
"description": res.description}
listTypes.append(dictTypes)
for type in usageTypes:
if type not in listTypes:
self.fail("Usage Type %s not present in list" % type)
return
class TestUsageDirectMeteringBasicZone(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(
TestUsageDirectMeteringBasicZone,
cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.testdata = testClient.getParsedTestDataConfig()
cls._cleanup = []
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
isUsageJobRunning = cls.IsUsageJobRunning()
cls.usageJobNotRunning = False
if not isUsageJobRunning:
cls.usageJobNotRunning = True
return
if cls.testdata["configurableData"][
"setUsageConfigurationThroughTestCase"]:
cls.setUsageConfiguration()
cls.RestartServers()
else:
currentMgtSvrTime = cls.getCurrentMgtSvrTime()
dateTimeSplit = currentMgtSvrTime.split("/")
cls.curDate = dateTimeSplit[0]
cls.template = get_template(
cls.apiclient,
cls.zone.id,
cls.testdata["ostype"])
try:
# If local storage is enabled, alter the offerings to use
# localstorage
if cls.zone.localstorageenable:
cls.testdata["service_offering"]["storagetype"] = 'local'
# Create 2 service offerings with different values for
# for cpunumber, cpuspeed, and memory
cls.testdata["service_offering"]["cpunumber"] = "1"
cls.testdata["service_offering"]["cpuspeed"] = "128"
cls.testdata["service_offering"]["memory"] = "256"
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
configs = Configurations.list(
cls.apiclient,
name='usage.stats.job.aggregation.range'
)
# Set the value for one more minute than
# actual range to be on safer side
cls.usageJobAggregationRange = (
int(configs[0].value) + 1) * 60 # in seconds
except Exception as e:
cls.tearDownClass()
raise e
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
if self.usageJobNotRunning:
self.skipTest("Skipping test because usage job not running")
# Create an account
self.account = Account.create(
self.apiclient,
self.testdata["account"],
domainid=self.domain.id
)
self.cleanup.append(self.account)
# Create user api client of the account
self.userapiclient = self.testClient.getUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain
)
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def setUsageConfiguration(cls):
""" Set the configuration parameters so that usage job runs
every 10 miuntes """
Configurations.update(
cls.apiclient,
name="enable.usage.server",
value="true"
)
Configurations.update(
cls.apiclient,
name="usage.aggregation.timezone",
value="GMT"
)
Configurations.update(
cls.apiclient,
name="usage.execution.timezone",
value="GMT"
)
Configurations.update(
cls.apiclient,
name="usage.stats.job.aggregation.range",
value="10"
)
currentMgtSvrTime = cls.getCurrentMgtSvrTime()
dateTimeSplit = currentMgtSvrTime.split("/")
cls.curDate = dateTimeSplit[0]
timeSplit = dateTimeSplit[1].split(":")
minutes = int(timeSplit[1])
minutes += 5
usageJobExecTime = timeSplit[0] + ":" + str(minutes)
Configurations.update(
cls.apiclient,
name="usage.stats.job.exec.time",
value=usageJobExecTime
)
return
@classmethod
def getCurrentMgtSvrTime(cls, format='%Y-%m-%d/%H:%M'):
""" Get the current time from Management Server """
sshClient = SshClient(
cls.mgtSvrDetails["mgtSvrIp"],
22,
cls.mgtSvrDetails["user"],
cls.mgtSvrDetails["passwd"]
)
command = "date +%s" % format
return sshClient.execute(command)[0]
@classmethod
def RestartServers(cls):
""" Restart management server and usage server """
sshClient = SshClient(
cls.mgtSvrDetails["mgtSvrIp"],
22,
cls.mgtSvrDetails["user"],
cls.mgtSvrDetails["passwd"]
)
command = "service cloudstack-management restart"
sshClient.execute(command)
command = "service cloudstack-usage restart"
sshClient.execute(command)
return
@classmethod
def IsUsageJobRunning(cls):
""" Check that usage job is running on Management server or not"""
sshClient = SshClient(
cls.mgtSvrDetails["mgtSvrIp"],
22,
cls.mgtSvrDetails["user"],
cls.mgtSvrDetails["passwd"]
)
command = "service cloudstack-usage status"
response = str(sshClient.execute(command)).lower()
if "unknown" in response:
return False
return True
def listUsageRecords(self, usagetype, apiclient=None, startdate=None,
enddate=None, account=None, sleep=True):
"""List and return the usage record for given account
and given usage type"""
if sleep:
# Sleep till usage job has run at least once after the operation
self.debug(
"Sleeping for %s seconds" %
self.usageJobAggregationRange)
time.sleep(self.usageJobAggregationRange)
if not startdate:
startdate = self.curDate
if not enddate:
enddate = self.curDate
if not account:
account = self.account
if not apiclient:
self.apiclient
Usage.generateRecords(
self.apiclient,
startdate=startdate,
enddate=enddate)
try:
usageRecords = Usage.listRecords(
self.apiclient,
startdate=startdate,
enddate=enddate,
account=account.name,
domainid=account.domainid,
type=usagetype)
self.assertEqual(
validateList(usageRecords)[0],
PASS,
"usage records list validation failed")
return [PASS, usageRecords]
except Exception as e:
return [FAIL, e]
return
def getLatestUsageJobExecutionTime(self):
""" Get the end time of latest usage job that has run successfully"""
try:
qresultset = self.dbclient.execute(
"SELECT max(end_date) FROM usage_job WHERE success=1;",
db="cloud_usage")
self.assertNotEqual(
len(qresultset),
0,
"Check DB Query result set"
)
lastUsageJobExecutionTime = qresultset[0][0]
self.debug(
"last usage job exec time: %s" %
lastUsageJobExecutionTime)
return [PASS, lastUsageJobExecutionTime]
except Exception as e:
return [FAIL, e]
def getEventCreatedDateTime(self, resourceName):
""" Get the created date/time of particular entity
from cloud_usage.usage_event table """
try:
# Checking exact entity creation time
qresultset = self.dbclient.execute(
"select created from usage_event where resource_name = '%s';" %
str(resourceName), db="cloud_usage")
self.assertNotEqual(
len(qresultset),
0,
"Check DB Query result set"
)
eventCreatedDateTime = qresultset[0][0]
except Exception as e:
return [FAIL, e]
return [PASS, eventCreatedDateTime]
@attr(tags=["basic"], required_hardware="true")
def test_01_positive_tests_usage_basic_zone(self):
""" Positive test for usage test path Basic Zone
# 1. Deploy VM in basic zone and verify that VM usage is generated
for the account with correct service offering
# 2. SSH to VM and ping to external network
# 3. Verify correct network byte usage is generated for the account
"""
# Create VM in account
vm = VirtualMachine.create(
self.userapiclient,
self.testdata["small"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id,
mode=self.zone.networktype
)
# Checking running VM usage
response = self.listUsageRecords(usagetype=1)
self.assertEqual(response[0], PASS, response[1])
vmRunningUsageRecords = [record for record in response[1]
if record.virtualmachineid == vm.id]
vmRunningRawUsage = sum(float(record.rawusage)
for record in vmRunningUsageRecords)
self.assertEqual(vmRunningUsageRecords[0].offeringid,
self.service_offering.id,
"The service offering id in the usage record\
does not match with id of service offering\
with which the VM was created")
self.assertEqual(vmRunningUsageRecords[0].templateid,
self.template.id,
"The template id in the usage record\
does not match with id of template\
with which the VM was created")
response = self.listUsageRecords(usagetype=2, sleep=False)
self.assertEqual(response[0], PASS, response[1])
vmAllocatedUsageRecords = [record for record in response[1]
if record.virtualmachineid == vm.id]
vmAllocatedRawUsage = sum(float(record.rawusage)
for record in vmAllocatedUsageRecords)
self.debug("running vm usage: %s" % vmRunningRawUsage)
self.debug("allocated vm usage: %s" % vmAllocatedRawUsage)
self.assertTrue(
vmRunningRawUsage < vmAllocatedRawUsage,
"Allocated VM usage should be greater than Running VM usage")
# Getting last usage job execution time
response = self.getLatestUsageJobExecutionTime()
self.assertEqual(response[0], PASS, response[1])
lastUsageJobExecTime = response[1]
# Checking exact VM creation time
response = self.getEventCreatedDateTime(vm.name)
self.assertEqual(response[0], PASS, response[1])
vmCreatedDateTime = response[1]
self.debug("Vm creation date: %s" % vmCreatedDateTime)
# We have to get the expected usage count in hours as the rawusage returned by listUsageRecords
# is also in hours
expectedUsage = format(
((lastUsageJobExecTime - vmCreatedDateTime).total_seconds() / 3600),
".2f")
self.debug("VM expected usage: %s" % expectedUsage)
actualUsage = format(vmAllocatedRawUsage, ".2f")
self.assertEqual(
expectedUsage,
actualUsage,
"expected usage %s and actual usage %s not matching" %
(expectedUsage,
actualUsage))
# TODO: Add traffic sentinel, because it is needed in basic zone
# to gather network traffic values
"""ssh_client = vm.get_ssh_client()
res = ssh_client.execute("ping -c 1 www.google.com")
result = str(res)
self.assertEqual(
result.count("1 received"),
1,
"Ping to outside world from VM should be successful"
)
result = str(res[1])
bytesReceived = int(result.split("bytes", 1)[0])
response = self.listUsageRecords(usagetype=5)
self.assertEqual(response[0], PASS, response[1])
bytesReceivedUsageRecord = sum(
int(record.rawusage) for record in response[1])
self.assertTrue(bytesReceivedUsageRecord >=
bytesReceived,
"Total bytes received usage should be greater than\
or equal to bytes received by pinging\
www.google.com")"""
return
| {
"content_hash": "93dc6262c19d1e257ac7afc40fe2ad7e",
"timestamp": "",
"source": "github",
"line_count": 3397,
"max_line_length": 147,
"avg_line_length": 37.2346187812776,
"alnum_prop": 0.5799456066284016,
"repo_name": "jcshen007/cloudstack",
"id": "5f36c694d7c7ebbe487f67bd44ba2092d9c32781",
"size": "127271",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "test/integration/testpaths/testpath_usage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1451"
},
{
"name": "Batchfile",
"bytes": "11926"
},
{
"name": "C#",
"bytes": "2356211"
},
{
"name": "CSS",
"bytes": "336634"
},
{
"name": "FreeMarker",
"bytes": "4917"
},
{
"name": "Groovy",
"bytes": "153137"
},
{
"name": "HTML",
"bytes": "151248"
},
{
"name": "Java",
"bytes": "34084304"
},
{
"name": "JavaScript",
"bytes": "7687141"
},
{
"name": "Python",
"bytes": "11154323"
},
{
"name": "Ruby",
"bytes": "896"
},
{
"name": "Shell",
"bytes": "770550"
}
],
"symlink_target": ""
} |
"""Ops to use variables as resources."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_resource_variable_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import compat
def get_resource_handle_data(graph_op):
assert ops._USE_C_SHAPES # pylint: disable=protected-access
assert type(graph_op) == ops.Tensor # pylint: disable=unidiomatic-typecheck
handle_data = pywrap_tensorflow.GetResourceHandleShapeAndType(
graph_op.graph._c_graph, graph_op._as_tf_output()) # pylint: disable=protected-access
return cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData.FromString(
compat.as_bytes(handle_data))
def _eager_safe_variable_handle(shape, dtype, shared_name, name, graph_mode):
"""Creates a variable handle with information to do shape inference."""
container = ops.get_default_graph()._container # pylint: disable=protected-access
if container is None:
container = ""
handle = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
if graph_mode:
return handle
# We do not want two distinct ResourceVariable objects for the same
# underlying resource in the runtime.
# When in eager mode, explicitly ensure so here. When in graph mode, it's
# ensured by always generating different variable names.
exists = gen_resource_variable_ops.var_is_initialized_op(handle)
if exists:
raise ValueError("variable object with name '%s' already created. Use "
"get_variable() if reuse is desired." %
shared_name)
with context.graph_mode(), ops.Graph().as_default() as graph:
h = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
# Tensor._handle_data contains information for the shape-inference code to
# know the shape and dtype of the variable pointed to by a handle. Since
# shape inference doesn't run in eager mode we copy this data here for when
# the handle is captured by an eager mode function.
# pylint: disable=protected-access
if ops._USE_C_SHAPES:
handle._handle_data = get_resource_handle_data(h)
else:
if h._handle_data is None:
ops.set_shape_and_handle_data_for_outputs(h.op)
handle._handle_data = h._handle_data
# pylint: enable=protected-access
# Clean up op->graph->op reference cycles.
ops.dismantle_graph(graph)
return handle
@contextlib.contextmanager
def _handle_graph(handle):
# Note: might have an eager tensor but not be executing eagerly when building
# functions.
if (context.executing_eagerly() or isinstance(handle, ops.EagerTensor)
or ops.has_default_graph()):
yield
else:
with handle.graph.as_default():
yield
class EagerResourceDeleter(object):
"""An object which cleans up a resource handle.
An alternative to defining a __del__ method on an object. The intended use is
that ResourceVariables or other objects with resource handles will maintain a
single reference to this object. When the parent object is collected, this
object will be too. Even if the parent object is part of a reference cycle,
the cycle will be collectable.
"""
def __init__(self, handle, handle_device):
if not isinstance(handle, ops.Tensor):
raise ValueError(
("Passed handle=%s to EagerResourceDeleter. Was expecting a handle "
"Tensor." % (handle,)))
self._handle = handle
self._handle_device = handle_device
def __del__(self):
# Resources follow object-identity when executing eagerly, so it is safe to
# delete the resource we have a handle to.
try:
# This resource was created in eager mode. However, this destructor may be
# running in graph mode (especially during unit tests). To clean up
# successfully, we switch back into eager mode temporarily.
with context.eager_mode():
with ops.device(self._handle_device):
gen_resource_variable_ops.destroy_resource_op(
self._handle, ignore_lookup_error=True)
except TypeError:
# Suppress some exceptions, mainly for the case when we're running on
# module deletion. Things that can go wrong include the context module
# already being unloaded, self._handle._handle_data no longer being
# valid, and so on. Printing warnings in these cases is silly
# (exceptions raised from __del__ are printed as warnings to stderr).
pass # 'NoneType' object is not callable when the handle has been
# partially unloaded.
except AttributeError:
pass # 'NoneType' object has no attribute 'eager_mode' when context has
# been unloaded. Will catch other module unloads as well.
def shape_safe_assign_variable_handle(handle, shape, value, name=None):
"""Helper that checks shape compatibility and assigns variable."""
with _handle_graph(handle):
value_tensor = ops.convert_to_tensor(value)
shape.assert_is_compatible_with(value_tensor.shape)
return gen_resource_variable_ops.assign_variable_op(handle,
value_tensor,
name=name)
# TODO(apassos) make this be variables.Variable
class ResourceVariable(variables.RefVariable):
"""Variable based on resource handles.
See the [Variables How To](https://tensorflow.org/guide/variables)
for a high level overview.
A `ResourceVariable` allows you to maintain state across subsequent calls to
session.run.
The `ResourceVariable` constructor requires an initial value for the variable,
which can be a `Tensor` of any type and shape. The initial value defines the
type and shape of the variable. After construction, the type and shape of
the variable are fixed. The value can be changed using one of the assign
methods.
Just like any `Tensor`, variables created with
`tf.Variable(use_resource=True)` can be used as inputs for other Ops in the
graph. Additionally, all the operators overloaded for the `Tensor` class are
carried over to variables, so you can also add nodes to the graph by just
doing arithmetic on variables.
Unlike ref-based variable, a ResourceVariable has well-defined semantics. Each
usage of a ResourceVariable in a TensorFlow graph adds a read_value operation
to the graph. The Tensors returned by a read_value operation are guaranteed to
see all modifications to the value of the variable which happen in any
operation on which the read_value depends on (either directly, indirectly, or
via a control dependency) and guaranteed to not see any modification to the
value of the variable from operations that depend on the read_value operation.
Updates from operations that have no dependency relationship to the read_value
operation might or might not be visible to read_value.
For example, if there is more than one assignment to a ResourceVariable in
a single session.run call there is a well-defined value for each operation
which uses the variable's value if the assignments and the read are connected
by edges in the graph. Consider the following example, in which two writes
can cause tf.Variable and tf.ResourceVariable to behave differently:
```python
a = tf.Variable(1.0, use_resource=True)
a.initializer.run()
assign = a.assign(2.0)
with tf.control_dependencies([assign]):
b = a.read_value()
with tf.control_dependencies([b]):
other_assign = a.assign(3.0)
with tf.control_dependencies([other_assign]):
# Will print 2.0 because the value was read before other_assign ran. If
# `a` was a tf.Variable instead, 2.0 or 3.0 could be printed.
tf.Print(b, [b]).eval()
```
"""
def __init__(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None,
variable_def=None,
import_scope=None,
constraint=None):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound
to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: Ignored. Provided for compatibility with tf.Variable.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If None, either the datatype will be kept (if initial_value is
a Tensor) or float32 will be used (if it is a Python object convertible
to a Tensor).
variable_def: `VariableDef` protocol buffer. If not None, recreates the
`ResourceVariable` object with its contents. `variable_def` and other
arguments (except for import_scope) are mutually exclusive.
import_scope: Optional `string`. Name scope to add to the
ResourceVariable. Only used when `variable_def` is provided.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
@compatibility(eager)
When Eager Execution is enabled, the default for the `collections` argument
is `None`, which signifies that this `Variable` will not be added to any
collections.
@end_compatibility
"""
if variable_def:
if initial_value is not None:
raise ValueError("variable_def and initial_value are mutually "
"exclusive.")
if context.executing_eagerly():
raise ValueError("Creating ResourceVariable from variable_def is "
"not supported when eager execution is enabled.")
self._init_from_proto(variable_def, import_scope=import_scope)
else:
self._init_from_args(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint)
# pylint: disable=unused-argument
def _init_from_args(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None,
constraint=None):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound
to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: Ignored. Provided for compatibility with tf.Variable.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If None, either the datatype will be kept (if initial_value is
a Tensor) or float32 will be used (if it is a Python object convertible
to a Tensor).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
@compatibility(eager)
When Eager Execution is enabled, variables are never added to collections.
It is not implicitly added to the `GLOBAL_VARIABLES` or
`TRAINABLE_VARIABLES` collections, and the `collections` argument is
ignored.
@end_compatibility
"""
if initial_value is None:
raise ValueError("initial_value must be specified.")
init_from_fn = callable(initial_value)
if isinstance(initial_value, ops.Tensor) and hasattr(
initial_value, "graph") and initial_value.graph.building_function:
raise ValueError("Tensor-typed variable initializers must either be "
"wrapped in an init_scope or callable "
"(e.g., `tf.Variable(lambda : "
"tf.truncated_normal([10, 40]))`) when building "
"functions. Please file a feature request if this "
"restriction inconveniences you.")
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if not isinstance(collections, (list, tuple, set)):
raise ValueError(
"collections argument to Variable constructor must be a list, tuple, "
"or set. Got %s of type %s" % (collections, type(collections)))
if constraint is not None and not callable(constraint):
raise ValueError("The `constraint` argument must be a callable.")
if isinstance(initial_value, checkpointable.CheckpointInitialValue):
self._maybe_initialize_checkpointable()
self._update_uid = initial_value.checkpoint_position.restore_uid
initial_value = initial_value.wrapped_value
self._trainable = trainable
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
self._save_slice_info = None
# Store the graph key so optimizers know how to only retrieve variables from
# this graph.
self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
with ops.init_scope():
self._in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "Variable", []
if init_from_fn else [initial_value]) as name:
# pylint: disable=protected-access
handle_name = ops._name_from_scope_name(name)
if self._in_graph_mode:
shared_name = handle_name
else:
# When in eager mode use a uid for the shared_name, to prevent
# accidental sharing.
shared_name = "%s_%d" % (handle_name, ops.uid())
if init_from_fn:
# Use attr_scope and device(None) to simulate the behavior of
# colocate_with when the variable we want to colocate with doesn't
# yet exist.
if self._in_graph_mode:
attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
s=[compat.as_bytes("loc:@%s" % handle_name)]))
with ops.get_default_graph()._attr_scope({"_class": attr}):
with ops.name_scope("Initializer"), ops.device(None):
initial_value = ops.convert_to_tensor(
initial_value(), name="initial_value", dtype=dtype)
self._handle = _eager_safe_variable_handle(
shape=initial_value.get_shape(),
dtype=initial_value.dtype.base_dtype,
shared_name=shared_name,
name=name,
graph_mode=self._in_graph_mode)
self._shape = initial_value.get_shape()
else:
initial_value = initial_value()
with ops.name_scope("Initializer"):
initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
self._handle = _eager_safe_variable_handle(
shape=initial_value.get_shape(),
dtype=initial_value.dtype.base_dtype,
shared_name=shared_name,
name=name,
graph_mode=False)
self._shape = initial_value.get_shape()
# pylint: enable=protected-access
# Or get the initial value from a Tensor or Python object.
else:
with ops.name_scope("Initializer"):
initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
# pylint: disable=protected-access
if (self._in_graph_mode and initial_value is not None and
initial_value.op._get_control_flow_context() is not None):
raise ValueError(
"Initializer for variable %s is from inside a control-flow "
"construct, such as a loop or conditional. When creating a "
"variable inside a loop or conditional, use a lambda as the "
"initializer." % name)
# pylint: enable=protected-access
self._handle = _eager_safe_variable_handle(
shape=initial_value.get_shape(),
dtype=initial_value.dtype.base_dtype,
shared_name=shared_name,
name=name,
graph_mode=self._in_graph_mode)
self._shape = initial_value.get_shape()
self._unique_id = shared_name
self._initial_value = initial_value if self._in_graph_mode else None
self._handle_name = handle_name + ":0"
self._dtype = initial_value.dtype.base_dtype
self._constraint = constraint
if self._in_graph_mode:
with ops.name_scope("IsInitialized"):
self._is_initialized_op = (
gen_resource_variable_ops.var_is_initialized_op(self._handle))
if initial_value is not None:
with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
self._initializer_op = (
gen_resource_variable_ops.assign_variable_op(
self._handle,
self._try_guard_against_uninitialized_dependencies(
initial_value),
name=n))
with ops.name_scope("Read"), ops.colocate_with(self._handle):
# Manually assign reads to the handle's device to avoid log
# messages.
with ops.device(self._handle.device):
value = self._read_variable_op()
self._graph_element = value
if caching_device is not None:
# Variables may be created in a tf.device() or ops.colocate_with()
# context. At the same time, users would expect caching device to
# be independent of this context, and/or would not expect the
# current device context to be merged with the caching device
# spec. Therefore we reset the colocation stack before creating
# the cached value. Note that resetting the colocation stack will
# also reset the device stack.
with ops.colocate_with(None, ignore_existing=True):
with ops.device(caching_device):
self._cached_value = array_ops.identity(value)
else:
self._cached_value = None
else:
gen_resource_variable_ops.assign_variable_op(self._handle,
initial_value)
self._is_initialized_op = None
self._initializer_op = None
self._graph_element = None
if caching_device:
with ops.device(caching_device):
self._cached_value = self._read_variable_op()
else:
self._cached_value = None
if not context.executing_eagerly():
# Eager variables are only added to collections if they are part of an
# eager variable store (otherwise in an interactive session they would
# hog memory and cause OOM). This is done in ops/variable_scope.py.
ops.add_to_collections(collections, self)
elif ops.GraphKeys.GLOBAL_STEP in collections:
ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, self)
if not self._in_graph_mode:
# After the handle has been created, set up a way to clean it up when
# executing eagerly. We'll hold the only reference to the deleter, so that
# when this object is garbage collected the deleter will be too. This
# means ResourceVariables can be part of reference cycles without those
# cycles being uncollectable, and means that no __del__ will be defined at
# all in graph mode.
self._handle_deleter = EagerResourceDeleter(
handle=self._handle, handle_device=self._handle.device)
self._cached_shape_as_list = None
def _init_from_proto(self, variable_def, import_scope=None):
"""Initializes from `VariableDef` proto."""
# Note that init_from_proto is currently not supported in Eager mode.
assert not context.executing_eagerly()
self._in_graph_mode = True
assert isinstance(variable_def, variable_pb2.VariableDef)
if not variable_def.is_resource:
raise ValueError("Trying to restore Variable as ResourceVariable.")
# Create from variable_def.
g = ops.get_default_graph()
self._handle = g.as_graph_element(
ops.prepend_name_scope(
variable_def.variable_name, import_scope=import_scope))
self._shape = tensor_shape.TensorShape(
self._handle.op.get_attr("shape"))
self._handle_name = self._handle.name
self._unique_id = self._handle_name
self._initializer_op = g.as_graph_element(
ops.prepend_name_scope(
variable_def.initializer_name, import_scope=import_scope))
# Check whether initial_value_name exists for backwards compatibility.
if (hasattr(variable_def, "initial_value_name") and
variable_def.initial_value_name):
self._initial_value = g.as_graph_element(
ops.prepend_name_scope(variable_def.initial_value_name,
import_scope=import_scope))
else:
self._initial_value = None
self._trainable = getattr(variable_def, "trainable", True)
if variable_def.snapshot_name:
snapshot = g.as_graph_element(
ops.prepend_name_scope(
variable_def.snapshot_name, import_scope=import_scope))
self._cached_value = snapshot
while snapshot.op.type != "ReadVariableOp":
snapshot = snapshot.op.inputs[0]
self._graph_element = snapshot
else:
self._cached_value = None
# Legacy case for protos without the snapshot name; assume it's the
# following.
self._graph_element = g.get_tensor_by_name(
self._handle.op.name + "/Read/ReadVariableOp:0")
if variable_def.HasField("save_slice_info_def"):
self._save_slice_info = variables.Variable.SaveSliceInfo(
save_slice_info_def=variable_def.save_slice_info_def,
import_scope=import_scope)
else:
self._save_slice_info = None
self._caching_device = None
self._dtype = dtypes.as_dtype(self._handle.op.get_attr("dtype"))
self._constraint = None
self._cached_shape_as_list = None
@contextlib.contextmanager
def _assign_dependencies(self):
"""Makes assignments depend on the cached value, if any.
This prevents undefined behavior with reads not ordered wrt writes.
Yields:
None.
"""
if self._cached_value is not None:
with ops.control_dependencies([self._cached_value]):
yield
else:
yield
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return bool(self.read_value())
def __copy__(self):
return self
def __deepcopy__(self, memo):
if not context.executing_eagerly():
raise NotImplementedError(
"__deepcopy__() is only available when eager execution is enabled.")
copied_variable = ResourceVariable(
initial_value=self.read_value(),
trainable=self._trainable,
constraint=self._constraint,
dtype=self._dtype,
name=self._shared_name + "_copy")
memo[self._unique_id] = copied_variable
return copied_variable
@property
def dtype(self):
"""The dtype of this variable."""
return self._dtype
@property
def device(self):
"""The device this variable is on."""
return self._handle.device
@property
def graph(self):
"""The `Graph` of this variable."""
return self._handle.graph
@property
def name(self):
"""The name of the handle for this variable."""
return self._handle_name
@property
def shape(self):
"""The shape of this variable."""
return self._shape
def _shape_as_list(self):
if self._cached_shape_as_list:
return self._cached_shape_as_list
if self.shape.ndims is None:
return None
self._cached_shape_as_list = [dim.value for dim in self.shape.dims]
return self._cached_shape_as_list
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
@property
def create(self):
"""The op responsible for initializing this variable."""
if not self._in_graph_mode:
raise RuntimeError("Calling create is not supported when eager execution"
" is enabled.")
return self._initializer_op
@property
def handle(self):
"""The handle by which this variable can be accessed."""
return self._handle
def value(self):
"""A cached operation which reads the value of this variable."""
if self._cached_value is not None:
return self._cached_value
with ops.colocate_with(None, ignore_existing=True):
with ops.device(self._handle.device):
return self._read_variable_op()
def _as_graph_element(self):
"""Conversion function for Graph.as_graph_element()."""
return self._graph_element
@property
def initializer(self):
"""The op responsible for initializing this variable."""
return self._initializer_op
@property
def initial_value(self):
"""Returns the Tensor used as the initial value for the variable."""
if context.executing_eagerly():
raise RuntimeError("initial_value not supported in EAGER mode.")
return self._initial_value
@property
def constraint(self):
"""Returns the constraint function associated with this variable.
Returns:
The constraint function that was passed to the variable constructor.
Can be `None` if no constraint was passed.
"""
return self._constraint
@property
def op(self):
"""The op for this variable."""
return self._handle.op
def eval(self, session=None):
"""Evaluates and returns the value of this variable."""
if context.executing_eagerly():
raise RuntimeError("Trying to eval in EAGER mode")
return self._graph_element.eval(session=session)
def numpy(self):
if context.executing_eagerly():
return self.read_value().numpy()
raise NotImplementedError(
"numpy() is only available when eager execution is enabled.")
def count_up_to(self, limit):
"""Increments this variable until it reaches `limit`.
When that Op is run it tries to increment the variable by `1`. If
incrementing the variable would bring it above `limit` then the Op raises
the exception `OutOfRangeError`.
If no error is raised, the Op outputs the value of the variable before
the increment.
This is essentially a shortcut for `count_up_to(self, limit)`.
Args:
limit: value at which incrementing the variable raises an error.
Returns:
A `Tensor` that will hold the variable value before the increment. If no
other Op modifies this variable, the values produced will all be
distinct.
"""
return gen_state_ops.resource_count_up_to(self.handle, limit=limit,
T=self.dtype)
def _set_save_slice_info(self, save_slice_info):
"""Sets the slice info for this `ResourceVariable`.
Args:
save_slice_info: A `Variable.SaveSliceInfo` object.
"""
self._save_slice_info = save_slice_info
def _get_save_slice_info(self):
return self._save_slice_info
def _read_variable_op(self):
if self.trainable:
tape.variable_accessed(self)
result = gen_resource_variable_ops.read_variable_op(self._handle,
self._dtype)
if not context.executing_eagerly():
# Note that if a control flow context is active the input of the read op
# might not actually be the handle. This line bypasses it.
tape.record_operation(
"ReadVariableOp", [result], [self._handle], lambda x: [x])
return result
def read_value(self):
"""Constructs an op which reads the value of this variable.
Should be used when there are multiple reads, or when it is desirable to
read the value only after some condition is true.
Returns:
the read operation.
"""
with ops.name_scope("Read"):
# Ensure we read the variable in the same device as the handle.
with ops.device(self._handle.device):
value = self._read_variable_op()
# Return an identity so it can get placed on whatever device the context
# specifies instead of the device where the variable is.
return array_ops.identity(value)
def sparse_read(self, indices, name=None):
"""Reads the value of this variable sparsely, using `gather`."""
with ops.name_scope("Gather" if name is None else name) as name:
if self.trainable:
tape.variable_accessed(self)
value = gen_resource_variable_ops.resource_gather(
self._handle, indices, dtype=self._dtype, name=name)
return array_ops.identity(value)
def to_proto(self, export_scope=None):
"""Converts a `ResourceVariable` to a `VariableDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Raises:
RuntimeError: If run in EAGER mode.
Returns:
A `VariableDef` protocol buffer, or `None` if the `Variable` is not
in the specified name scope.
"""
if context.executing_eagerly():
raise RuntimeError("to_proto not supported in EAGER mode.")
if export_scope is None or self.handle.name.startswith(export_scope):
var_def = variable_pb2.VariableDef()
var_def.variable_name = ops.strip_name_scope(self.handle.name,
export_scope)
if self._initial_value is not None:
# This is inside an if-statement for backwards compatibility, since
# self._initial_value might be None for variables constructed from old
# protos.
var_def.initial_value_name = ops.strip_name_scope(
self._initial_value.name, export_scope)
var_def.initializer_name = ops.strip_name_scope(self.initializer.name,
export_scope)
if self._cached_value is not None:
var_def.snapshot_name = ops.strip_name_scope(self._cached_value.name,
export_scope)
else:
# Store the graph_element here
var_def.snapshot_name = ops.strip_name_scope(self._graph_element.name,
export_scope)
var_def.is_resource = True
var_def.trainable = self.trainable
if self._save_slice_info:
var_def.save_slice_info_def.MergeFrom(
self._save_slice_info.to_proto(export_scope=export_scope))
return var_def
else:
return None
@staticmethod
def from_proto(variable_def, import_scope=None):
if context.executing_eagerly():
raise RuntimeError("from_proto not supported in EAGER mode.")
return ResourceVariable(
variable_def=variable_def, import_scope=import_scope)
@staticmethod
def _OverloadAllOperators(): # pylint: disable=invalid-name
"""Register overloads for all operators."""
for operator in ops.Tensor.OVERLOADABLE_OPERATORS:
ResourceVariable._OverloadOperator(operator)
# For slicing, bind getitem differently than a tensor (use SliceHelperVar
# instead)
# pylint: disable=protected-access
setattr(ResourceVariable, "__getitem__", array_ops._SliceHelperVar)
def _AsTensor(self):
return self.value()
def _ref(self):
"""Unsupported."""
raise NotImplementedError("ResourceVariable does not implement _ref()")
def set_shape(self, shape):
"""Unsupported."""
raise NotImplementedError("ResourceVariable does not implement set_shape()")
@staticmethod
def _OverloadOperator(operator): # pylint: disable=invalid-name
"""Defer an operator overload to `ops.Tensor`.
We pull the operator out of ops.Tensor dynamically to avoid ordering issues.
Args:
operator: string. The operator name.
"""
tensor_oper = getattr(ops.Tensor, operator)
def _run_op(a, *args):
# pylint: disable=protected-access
value = a._AsTensor()
return tensor_oper(value, *args)
# Propagate __doc__ to wrapper
try:
_run_op.__doc__ = tensor_oper.__doc__
except AttributeError:
pass
setattr(ResourceVariable, operator, _run_op)
__array_priority__ = 100
def is_initialized(self, name=None):
"""Checks whether a resource variable has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
return gen_resource_variable_ops.var_is_initialized_op(self.handle, name)
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
"""Subtracts a value from this variable.
Args:
delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
name: The name to use for the operation.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
"""
# TODO(apassos): this here and below is not atomic. Consider making it
# atomic if there's a way to do so without a performance cost for those who
# don't need it.
with _handle_graph(self.handle), self._assign_dependencies():
assign_sub_op = gen_resource_variable_ops.assign_sub_variable_op(
self.handle, ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._lazy_read(assign_sub_op)
return assign_sub_op
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
"""Adds a value to this variable.
Args:
delta: A `Tensor`. The value to add to this variable.
use_locking: If `True`, use locking during the operation.
name: The name to use for the operation.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
"""
with _handle_graph(self.handle), self._assign_dependencies():
assign_add_op = gen_resource_variable_ops.assign_add_variable_op(
self.handle, ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._lazy_read(assign_add_op)
return assign_add_op
def _lazy_read(self, op):
if self.trainable:
tape.variable_accessed(self)
return _UnreadVariable(
handle=self._handle, dtype=self.dtype, shape=self._shape,
in_graph_mode=self._in_graph_mode,
deleter=self._handle_deleter if not self._in_graph_mode else None,
parent_op=op, unique_id=self._unique_id)
def assign(self, value, use_locking=None, name=None, read_value=True):
"""Assigns a new value to this variable.
Args:
value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
name: The name to use for the assignment.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
"""
# Note: not depending on the cached value here since this can used to
# initialize the variable.
with _handle_graph(self.handle):
value_tensor = ops.convert_to_tensor(value, dtype=self.dtype)
self._shape.assert_is_compatible_with(value_tensor.shape)
assign_op = gen_resource_variable_ops.assign_variable_op(
self.handle, value_tensor, name=name)
if read_value:
return self._lazy_read(assign_op)
return assign_op
def __reduce__(self):
return (ResourceVariable, (self.numpy(),))
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
"""Subtracts `IndexedSlices` from this variable.
Args:
sparse_delta: `IndexedSlices` to be subtracted from this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return self._lazy_read(gen_resource_variable_ops.resource_scatter_sub(
self.handle, sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))
def scatter_add(self, sparse_delta, use_locking=False, name=None):
"""Adds `IndexedSlices` from this variable.
Args:
sparse_delta: `IndexedSlices` to be added to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return self._lazy_read(gen_resource_variable_ops.resource_scatter_add(
self.handle, sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))
def scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `IndexedSlices` to this variable.
Args:
sparse_delta: `IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return self._lazy_read(gen_resource_variable_ops.resource_scatter_update(
self.handle, sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))
def scatter_nd_sub(self, indices, updates, name=None):
"""Applies sparse subtraction to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_sub(indices, updates)
with tf.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, -9, 3, -6, -6, 6, 7, -4]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
return self._lazy_read(gen_state_ops.resource_scatter_nd_sub(
self.handle, indices, ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_add(self, indices, updates, name=None):
"""Applies sparse addition to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
add = ref.scatter_nd_add(indices, updates)
with tf.Session() as sess:
print sess.run(add)
```
The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
return self._lazy_read(gen_state_ops.resource_scatter_nd_add(
self.handle, indices, ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_update(self, indices, updates, name=None):
"""Applies sparse assignment to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_update(indices, updates)
with tf.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
return self._lazy_read(gen_state_ops.resource_scatter_nd_update(
self.handle, indices, ops.convert_to_tensor(updates, self.dtype),
name=name))
def _strided_slice_assign(self, begin, end, strides, value, name, begin_mask,
end_mask, ellipsis_mask, new_axis_mask,
shrink_axis_mask):
with _handle_graph(self.handle), self._assign_dependencies():
return self._lazy_read(
gen_array_ops.resource_strided_slice_assign(
ref=self.handle,
begin=begin,
end=end,
strides=strides,
value=ops.convert_to_tensor(value, dtype=self.dtype),
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask))
def __int__(self):
if self.dtype != dtypes.int32 and self.dtype != dtypes.int64:
raise TypeError("Non-integer variable can't be converted to integer.")
return int(self.value().numpy())
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
del name
if dtype is not None and dtype != self.dtype:
return NotImplemented
if as_ref:
return self.read_value().op.inputs[0]
else:
return self.value()
def __iadd__(self, unused_other):
raise RuntimeError("Variable += value not supported. Use "
"variable.assign_add(value) to modify the variable "
"value and variable = variable + value to get a new "
"Tensor object.")
def __isub__(self, unused_other):
raise RuntimeError("Variable -= value not supported. Use "
"variable.assign_sub(value) to modify the variable "
"value and variable = variable - value to get a new "
"Tensor object.")
def __imul__(self, unused_other):
raise RuntimeError("Variable *= value not supported. Use "
"`var.assign(var * value)` to modify the variable or "
"`var = var * value` to get a new Tensor object.")
def __idiv__(self, unused_other):
raise RuntimeError("Variable /= value not supported. Use "
"`var.assign(var / value)` to modify the variable or "
"`var = var / value` to get a new Tensor object.")
def __itruediv__(self, unused_other):
raise RuntimeError("Variable /= value not supported. Use "
"`var.assign(var / value)` to modify the variable or "
"`var = var / value` to get a new Tensor object.")
def __irealdiv__(self, unused_other):
raise RuntimeError("Variable /= value not supported. Use "
"`var.assign(var / value)` to modify the variable or "
"`var = var / value` to get a new Tensor object.")
def __ipow__(self, unused_other):
raise RuntimeError("Variable **= value not supported. Use "
"`var.assign(var ** value)` to modify the variable or "
"`var = var ** value` to get a new Tensor object.")
pywrap_tensorflow.TFE_Py_RegisterResourceVariableType(ResourceVariable)
math_ops._resource_variable_type = ResourceVariable # pylint: disable=protected-access
def _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
class _UnreadVariable(ResourceVariable):
"""Represents a future for a read of a variable.
Pretends to be the tensor if anyone looks.
"""
def __init__(self, handle, dtype, # pylint: disable=super-init-not-called
shape, in_graph_mode, deleter, parent_op, unique_id):
# We do not call super init on purpose.
self._trainable = False
self._save_slice_info = None
self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
self._in_graph_mode = in_graph_mode
self._handle = handle
self._shape = shape
self._initial_value = None
if isinstance(self._handle, ops.EagerTensor):
self._handle_name = ""
else:
self._handle_name = self._handle.name
self._unique_id = unique_id
self._dtype = dtype
self._constraint = None
self._cached_value = None
self._is_initialized_op = None
self._initializer_op = None
self._parent_op = parent_op
if context.executing_eagerly():
self._graph_element = None
else:
self._graph_element = self.read_value()
self._handle_deleter = deleter
@property
def name(self):
if self._in_graph_mode:
return self._parent_op.name
else:
return "UnreadVariable"
def value(self):
return self._read_variable_op()
def read_value(self):
return self._read_variable_op()
def _read_variable_op(self):
with ops.control_dependencies([self._parent_op]):
return gen_resource_variable_ops.read_variable_op(self._handle,
self._dtype)
def set_shape(self, shape):
self._shape = shape
self._cached_shape_as_list = None
@property
def op(self):
"""The op for this variable."""
return self._parent_op
ops.register_tensor_conversion_function(_UnreadVariable, _dense_var_to_tensor)
ops.register_dense_tensor_like_type(_UnreadVariable)
class _MixedPrecisionVariable(ResourceVariable):
"""Represents a variable that can return in desired dtype when read.
In mixed precision training, it is usually desirable to use different dtypes
for variables and computation. This class will be used to wrap created
ResourceVariable when mixed precision training is enabled. It allows layers to
perform computation in a different dtype than their variable dtypes, in order
to achieve higher performance without causing quality loss.
"""
def __init__(self, var, read_dtype):
"""Creates a MixedPrecisionVariable.
Args:
var: A ResourceVariable instance.
read_dtype: A tf.DType, the returned dtype when read, default to None.
Casting is performed if read_dtype is not None and differs from
var.dtype.
Returns:
An MixedPrecisionVariable instance.
Raises:
ValueError: if var is not a ResourceVariable instance, or read_dtype is
not a tf.DType instance.
"""
# pylint: disable=super-init-not-called
# We do not call super init on purpose.
if not isinstance(var, ResourceVariable):
raise ValueError("InvalidArgument: var must be a ResourceVariable type.")
if not isinstance(read_dtype, dtypes.DType):
raise ValueError("InvalidArgument: read_dtype must be a tf.DType type.")
self._var = var
self._trainable = var.trainable
self._save_slice_info = None
self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
self._in_graph_mode = var._in_graph_mode # pylint: disable=protected-access
self._handle = var.handle
self._shape = var.shape
self._initial_value = None
if isinstance(self.handle, ops.EagerTensor):
self._handle_name = ""
else:
self._handle_name = self.handle.name
self._unique_id = var._unique_id # pylint: disable=protected-access
self._dtype = var.dtype
self._constraint = None
self._cached_value = None
self._is_initialized_op = var._is_initialized_op # pylint: disable=protected-access
self._initializer_op = var._initializer_op # pylint: disable=protected-access
# This needs to be set before read_value() is called.
self._read_dtype = read_dtype
if context.executing_eagerly():
self._graph_element = None
else:
self._graph_element = self.read_value()
self._handle_deleter = (
var._handle_deleter if not self._in_graph_mode # pylint: disable=protected-access
else None)
# pylint: enable=super-init-not-called
@property
def name(self):
return self._var.name
def value(self):
return self._read_variable_op()
def read_value(self):
return self._read_variable_op()
def _read_variable_op(self):
with ops.colocate_with(self._handle):
res = gen_resource_variable_ops.read_variable_op(self._handle,
self._dtype)
if self._read_dtype != self._dtype:
return math_ops.cast(res, self._read_dtype)
else:
return res
def set_shape(self, shape):
self._shape = shape
self._cached_shape_as_list = None
@property
def op(self):
"""The op for this variable."""
return self._var.op
@property
def read_dtype(self):
"""The dtype of the returned tensor when reading the var."""
return self._read_dtype
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
del name
dtype = dtype or self.read_dtype
if dtype != self.read_dtype or as_ref:
return NotImplemented
else:
res = self.value()
return res
def _should_act_as_resource_variable(self):
"""To pass resource_variable_ops.is_resource_variable check."""
pass
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
# Note: registering for Variable after ResourceVariable because inheritance will
# otherwise lead to the wrong behavior.
ops.register_tensor_conversion_function(ResourceVariable, _dense_var_to_tensor)
ops.register_tensor_conversion_function(
variables.Variable, variables.Variable._TensorConversionFunction) # pylint: disable=protected-access
# pylint: disable=protected-access
ResourceVariable._OverloadAllOperators()
ops.register_dense_tensor_like_type(ResourceVariable)
@ops.RegisterGradient("ReadVariableOp")
def _ReadGrad(_, grad):
"""Gradient for read op."""
return grad
@ops.RegisterGradient("ResourceGather")
def _GatherGrad(op, grad):
"""Gradient for gather op."""
# Build appropriately shaped IndexedSlices
handle = op.inputs[0]
indices = op.inputs[1]
params_shape = gen_resource_variable_ops.variable_shape(handle)
size = array_ops.expand_dims(array_ops.size(indices), 0)
values_shape = array_ops.concat([size, params_shape[1:]], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, size)
return (ops.IndexedSlices(values, indices, params_shape), None)
def _to_proto_fn(v, export_scope=None):
"""Converts Variable and ResourceVariable to VariableDef for collections."""
return v.to_proto(export_scope=export_scope)
def _from_proto_fn(v, import_scope=None):
"""Creates Variable or ResourceVariable from VariableDef as needed."""
if v.is_resource:
return ResourceVariable.from_proto(v, import_scope=import_scope)
return variables.Variable.from_proto(v, import_scope=import_scope)
ops.register_proto_function(
ops.GraphKeys.GLOBAL_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.TRAINABLE_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.MOVING_AVERAGE_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.LOCAL_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.MODEL_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.GLOBAL_STEP,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
def is_resource_variable(var):
""""Returns True if `var` is to be considered a ResourceVariable."""
return isinstance(var, ResourceVariable) or hasattr(
var, "_should_act_as_resource_variable")
| {
"content_hash": "84c4cc27550f5e65fa2f584dd4448e02",
"timestamp": "",
"source": "github",
"line_count": 1526,
"max_line_length": 108,
"avg_line_length": 40.07273918741809,
"alnum_prop": 0.6543147291131789,
"repo_name": "AnishShah/tensorflow",
"id": "55c2eb5fa487e96b62201511fdd7e82162c2bee5",
"size": "61840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/resource_variable_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "337393"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "48452986"
},
{
"name": "CMake",
"bytes": "195768"
},
{
"name": "Dockerfile",
"bytes": "36400"
},
{
"name": "Go",
"bytes": "1210238"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "834103"
},
{
"name": "Jupyter Notebook",
"bytes": "2584246"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52618"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40782103"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "458367"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
"""
Functions for de-spiking LA-ICPMS data (outlier removal).
(c) Oscar Branson : https://github.com/oscarbranson
"""
import numpy as np
# Despiking functions
def noise_despike(sig, win=3, nlim=24., maxiter=4):
"""
Apply standard deviation filter to remove anomalous values.
Parameters
----------
win : int
The window used to calculate rolling statistics.
nlim : float
The number of standard deviations above the rolling
mean above which data are considered outliers.
Returns
-------
None
"""
if win % 2 != 1:
win += 1 # win must be odd
kernel = np.ones(win) / win # make convolution kernel
over = np.ones(len(sig), dtype=bool) # initialize bool array
# pad edges to avoid edge-effects
npad = int((win - 1) / 2)
over[:npad] = False
over[-npad:] = False
# set up monitoring
nloops = 0
# do the despiking
while any(over) and (nloops < maxiter):
rmean = np.convolve(sig, kernel, 'valid') # mean by convolution
rstd = rmean**0.5 # std = sqrt(signal), because count statistics
# identify where signal > mean + std * nlim (OR signa < mean - std *
# nlim)
# | (sig[npad:-npad] < rmean - nlim * rstd)
over[npad:-npad] = (sig[npad:-npad] > rmean + nlim * rstd)
# if any are over, replace them with mean of neighbours
if any(over):
# replace with values either side
# sig[over] = sig[np.roll(over, -1) | np.roll(over, 1)].reshape((sum(over), 2)).mean(1)
# replace with mean
sig[npad:-npad][over[npad:-npad]] = rmean[over[npad:-npad]]
nloops += 1
# repeat until no more removed.
return sig
def expdecay_despike(sig, expdecay_coef, tstep, maxiter=3):
"""
Apply exponential decay filter to remove physically impossible data based on instrumental washout.
The filter is re-applied until no more points are removed, or maxiter is reached.
Parameters
----------
exponent : float
Exponent used in filter
tstep : float
The time increment between data points.
maxiter : int
The maximum number of times the filter should be applied.
Returns
-------
None
"""
# determine rms noise of data
noise = np.std(sig[:5]) # initially, calculated based on first 5 points
# expand the selection up to 50 points, unless it dramatically increases
# the std (i.e. catches the 'laser on' region)
for i in [10, 20, 30, 50]:
inoise = np.std(sig[:i])
if inoise < 1.5 * noise:
noise = inoise
rms_noise3 = 3 * noise
i = 0
f = True
while (i < maxiter) and f:
# calculate low and high possibles values based on exponential decay
siglo = np.roll(sig * np.exp(tstep * expdecay_coef), 1)
sighi = np.roll(sig * np.exp(-tstep * expdecay_coef), -1)
# identify points that are outside these limits, beyond what might be explained
# by noise in the data
loind = (sig < siglo - rms_noise3) & (sig < np.roll(sig, -1) - rms_noise3)
hiind = (sig > sighi + rms_noise3) & (sig > np.roll(sig, 1) + rms_noise3)
# replace all such values with their preceding
sig[loind] = sig[np.roll(loind, -1)]
sig[hiind] = sig[np.roll(hiind, -1)]
f = any(np.concatenate([loind, hiind]))
i += 1
return sig | {
"content_hash": "b96a460e37bcef7917096aec57452e35",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 102,
"avg_line_length": 33.398058252427184,
"alnum_prop": 0.5976744186046512,
"repo_name": "oscarbranson/latools",
"id": "bc1e21c17cb93636a7f03cac340a712519263042",
"size": "3440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "latools/processes/despiking.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "9176"
},
{
"name": "Jupyter Notebook",
"bytes": "8871326"
},
{
"name": "Makefile",
"bytes": "222"
},
{
"name": "Python",
"bytes": "492106"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup
def readreq(filename):
result = []
with open(filename) as f:
for req in f:
req = req.lstrip()
if req.startswith('-e ') or req.startswith('http:'):
idx = req.find('#egg=')
if idx >= 0:
req = req[idx + 5:].partition('#')[0].strip()
else:
pass
else:
req = req.partition('#')[0].strip()
if not req:
continue
result.append(req)
return result
def readfile(filename):
with open(filename) as f:
return f.read()
setup(
name='nutjob',
version='0.1.0',
author='Kevin L. Mitchell',
author_email='kevin.mitchell@rackspace.com',
url='https://github.com/klmitch/nutjob',
description="Redis/nutcracker client for Turnstile",
long_description=readfile('README.rst'),
license='Apache License (2.0)',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
py_modules=['nutjob'],
install_requires=readreq('.requires'),
tests_require=readreq('.test-requires'),
entry_points={
'turnstile.redis_client': [
'nutjob = nutjob:NutJobStrictRedis',
],
},
)
| {
"content_hash": "fbd7d03010b96dbc30cee0646e7dfc09",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 65,
"avg_line_length": 27.436363636363637,
"alnum_prop": 0.5400927766732936,
"repo_name": "klmitch/nutjob",
"id": "8b205fbcf29eafccd748180ca807ea3d3ec907cc",
"size": "1532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9924"
}
],
"symlink_target": ""
} |
import gevent.monkey ; gevent.monkey.patch_all()
import os, sys, traceback as tb, bottle, json
from bottle import route, run, template
from uuid import uuid4
from cors import add_headers
from auth import *
@route('/')
def index():
return 'xxx'
@route('/hello/<name>')
def index(name):
return template('<b>Hello {{name}}</b>!', name=name)
@bottle.error(404)
def error404(error):
add_headers(bottle.response)
return 'Nothing here, sorry'
@route('/auth/login',method=['HEAD','OPTIONS'])
def login():
add_headers(bottle.response)
return ''
@route('/auth/login',method=['POST'])
def login():
add_headers(bottle.response)
#print "AUTH LOGIN", dict(bottle.request.params)
#print "AUTH LOGIN", (bottle.request.body)
d = bottle.request.body.read()
#print "AUTH LOGIN", repr(d)
j = json.loads(d)
print "AUTH LOGIN", repr(j)
inp = dict( data=j )
print "AUTH LOGIN INPUT", repr(inp)
ret = login_user( j['u'], j['p'] )
print "AUTH LOGIN -RET-", repr(ret)
return dict(result=ret)
#test()
if __name__=='__main__':run(host='localhost', port=sys.argv[1],server='gevent')
| {
"content_hash": "5a1fb06b588ae986bc64a8bc1694948a",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 79,
"avg_line_length": 24.695652173913043,
"alnum_prop": 0.6452464788732394,
"repo_name": "val314159/old.authsvr",
"id": "a869f75219dd932e5214616590615620f74b9aca",
"size": "1136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ws.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1134"
},
{
"name": "Python",
"bytes": "4021"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
} |
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:10011")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
| {
"content_hash": "44d42b2986a167155d063364640a80c7",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 47,
"avg_line_length": 39.75,
"alnum_prop": 0.7610062893081762,
"repo_name": "HundredCoinProject/HundredCoin",
"id": "7f8fca6675422156b0eec20a12b9a09abb6bd3ea",
"size": "159",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/wallettools/walletunlock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "91288"
},
{
"name": "C++",
"bytes": "2437951"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Objective-C++",
"bytes": "5711"
},
{
"name": "Python",
"bytes": "37266"
},
{
"name": "Shell",
"bytes": "2562"
},
{
"name": "TypeScript",
"bytes": "5248950"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.