text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from setuptools import setup
setup(
name='libipa',
version='0.0.6',
author='Andrew Udvare',
author_email='audvare@gmail.com',
packages=['ipa'],
scripts=['bin/ipa-unzip-bin', 'bin/ipa-dump-info'],
url='https://github.com/Tatsh/libipa',
license='LICENSE.txt',
description='Library to read IPA files (iOS application archives).',
test_suite='ipa.test',
long_description='No description.',
install_requires=[
'biplist>=0.7',
'six>=1.7.3',
],
)
|
{
"content_hash": "c6ae7567928dabfd04b2b16223bd76c5",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 72,
"avg_line_length": 26.789473684210527,
"alnum_prop": 0.6149312377210217,
"repo_name": "Tatsh/libipa",
"id": "58d26b88136b4d1353a37d29d4efe1664eb4183e",
"size": "555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19327"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from words import *
from nltk.wordnet import *
from operator import itemgetter
import nltk
import re
from string import join
def build_word_associations():
cfd = nltk.ConditionalFreqDist()
# get a list of all English stop words
stopwords_list = nltk.corpus.stopwords.words('english')
# count words that occur within a window of size 5 ahead of other words
for sentence in nltk.corpus.brown.tagged_sents():
sentence = [(token.lower(), tag) for (token, tag) in sentence if token.lower() not in stopwords_list]
for (index, (token, tag)) in enumerate(sentence):
if token not in stopwords_list:
window = sentence[index+1:index+5]
for (window_token, window_tag) in window:
if window_token not in stopwords_list and window_tag[0] is tag[0]:
cfd[token].inc(window_token)
return cfd
def associate():
while True:
word = raw_input("Enter a word: ")
for i in range(100):
next = cfd[word].max()
if next:
print("->", next,)
word = next
else:
break
print()
def build_word_contexts(words):
contexts_to_words = {}
words = [w.lower() for w in words]
for i in range(1,len(words)-1):
context = words[i-1]+"_"+words[i+1]
if context not in contexts_to_words:
contexts_to_words[context] = []
contexts_to_words[context].append(words[i])
# inverted structure, tracking frequency
words_to_contexts = {}
for context in contexts_to_words:
for word in contexts_to_words[context]:
if word not in words_to_contexts:
words_to_contexts[word] = []
words_to_contexts[word].append(context)
return words_to_contexts, contexts_to_words
def search_contexts(words):
words_to_contexts, contexts_to_words = build_word_contexts(words)
while True:
hits = []
word = raw_input("word> ")
if word not in words_to_contexts:
print("Word not found")
continue
contexts = words_to_contexts[word]
for w in words_to_contexts: # all words
for context in words_to_contexts[w]:
if context in contexts:
hits.append(w)
hit_freqs = count_words(hits).items()
sorted_hits = sorted(hit_freqs, key=itemgetter(1), reverse=True)
words = [word for (word, count) in sorted_hits[1:] if count > 1]
print(join(words))
def lookup(word):
for category in [N, V, ADJ, ADV]:
if word in category:
for synset in category[word]:
print(category[word], ":", synset.gloss)
############################################
# Simple Tagger
############################################
# map brown pos tags
# http://khnt.hit.uib.no/icame/manuals/brown/INDEX.HTM
def map1(tag):
tag = re.sub(r'fw-', '', tag) # foreign words
tag = re.sub(r'-[th]l', '', tag) # headlines, titles
tag = re.sub(r'-nc', '', tag) # cited
tag = re.sub(r'ber?', 'vb', tag) # verb "to be"
tag = re.sub(r'hv', 'vb', tag) # verb "to have"
tag = re.sub(r'do', 'vb', tag) # verb "to do"
tag = re.sub(r'nc', 'nn', tag) # cited word
tag = re.sub(r'z', '', tag) # third-person singular
return tag
def map2(tag):
tag = re.sub(r'\bj[^-+]*', 'J', tag) # adjectives
tag = re.sub(r'\bp[^-+]*', 'P', tag) # pronouns
tag = re.sub(r'\bm[^-+]*', 'M', tag) # modals
tag = re.sub(r'\bq[^-+]*', 'Q', tag) # qualifiers
tag = re.sub(r'\babl', 'Q', tag) # qualifiers
tag = re.sub(r'\bab[nx]', 'D', tag) # determiners
tag = re.sub(r'\bap', 'D', tag) # determiners
tag = re.sub(r'\bd[^-+]*', 'D', tag) # determiners
tag = re.sub(r'\bat', 'D', tag) # determiners
tag = re.sub(r'\bw[^-+]*', 'W', tag) # wh words
tag = re.sub(r'\br[^-+]*', 'R', tag) # adverbs
tag = re.sub(r'\bto', 'T', tag) # "to"
tag = re.sub(r'\bc[cs]', 'C', tag) # conjunctions
tag = re.sub(r's', '', tag) # plurals
tag = re.sub(r'\bin', 'I', tag) # prepositions
tag = re.sub(r'\buh', 'U', tag) # interjections (uh)
tag = re.sub(r'\bex', 'E', tag) # existential "there"
tag = re.sub(r'\bvbn', 'VN', tag) # past participle
tag = re.sub(r'\bvbd', 'VD', tag) # past tense
tag = re.sub(r'\bvbg', 'VG', tag) # gerund
tag = re.sub(r'\bvb', 'V', tag) # verb
tag = re.sub(r'\bnn', 'N', tag) # noun
tag = re.sub(r'\bnp', 'NP', tag) # proper noun
tag = re.sub(r'\bnr', 'NR', tag) # adverbial noun
tag = re.sub(r'\bex', 'E', tag) # existential "there"
tag = re.sub(r'\bod', 'OD', tag) # ordinal
tag = re.sub(r'\bcd', 'CD', tag) # cardinal
tag = re.sub(r'-t', '', tag) # misc
tag = re.sub(r'[a-z\*]', '', tag) # misc
return tag
def map(tag):
return map2(map1(tag.lower()))
# print(sorted(set(map2(map1(tag)) for s in brown.tagged() for w,tag in s)))
def load_brown_corpus(sections):
global map
corpus = nltk.corpus.brown.tagged_sents(tuple(sections))
return [[(w.lower(), map(t)) for w, t in sent] for sent in corpus]
def train_tagger(corpus):
t0 = nltk.tag.Default('N')
t1 = nltk.tag.Unigram(cutoff=0, backoff=t0)
t2 = nltk.tag.Bigram(cutoff=0, backoff=t1)
t3 = nltk.tag.Trigram(cutoff=1, backoff=t2)
t1.train(corpus, verbose=True)
t2.train(corpus, verbose=True)
t3.train(corpus, verbose=True)
return t3
def tag(corpus):
print("Training tagger...")
tagger = train_tagger(corpus)
while True:
text = raw_input("sentence> ")
words = text.split()
print(join(word+"/"+tag for word, tag in tagger.tag(words)))
WORD_OR_TAG = '[^/ ]+'
BOUNDARY = r'\b'
def process(pattern):
new = []
for term in pattern.split():
if re.match('[A-Z]+$', term):
new.append(BOUNDARY + WORD_OR_TAG + '/' + term + BOUNDARY)
elif '/' in term:
new.append(BOUNDARY + term + BOUNDARY)
else:
new.append(BOUNDARY + term + '/' + WORD_OR_TAG + BOUNDARY)
return join(new)
def search(corpus, num=25):
print("Loading corpus...")
strings = [join(w+'/'+t for (w,t) in sent) for sent in corpus]
while True:
pattern = ""
while not pattern:
pattern = raw_input("search> ")
pattern = process(pattern)
i = 0
for sent in strings:
m = re.search(pattern, sent)
if m:
sent = ' '*35 + sent + ' '*45
print(sent[m.start():m.start()+80])
i += 1
if i > num:
break
############################################
# Wordnet Browser
# now incorporated into NLTK as wordnet.browse
############################################
############################################
# Mad Libs
############################################
madlib = """Britney Spears will meet up with her %(NP)s label for
crisis talks about the future of her %(N)s this week reports Digital Spy.
%(NP)s Records plan to tell Spears to stop %(VG)s and take more
care of her %(J)s image if she wants to avoid being %(VD)s by the noun.
The news %(V)s shortly after Britney posted a message on her
website promising a new album and tour. The last couple of years
have been quite a ride for me, the media has criticized %(P)s every
noun %(C)s printed a skewed perception of who I really am as a human
being, she wrote in a letter posted %(NR)s."""
# mapping = {}
# mapping['NP'] =
# mapping['N'] =
# mapping['VG'] =
# mapping['J'] =
# mapping['VD'] =
# mapping['V'] =
# mapping['P'] =
# mapping['C'] =
# mapping['NR'] =
# print(madlib % mapping)
|
{
"content_hash": "105550d13ff855dffdbb4cfbddda90b2",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 109,
"avg_line_length": 35.62162162162162,
"alnum_prop": 0.5403388973191705,
"repo_name": "nltk/nltk_teach",
"id": "72352ae38e5c597d4ecc6a7e92b4ef213070d79b",
"size": "7908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/school/categories.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "702"
},
{
"name": "Python",
"bytes": "18470"
}
],
"symlink_target": ""
}
|
"""
Crypto Utils
"""
import base64
import math
from Crypto.Hash import SHA256
from Crypto.Random.random import StrongRandom
random = StrongRandom()
def random_mpz_lt(maximum, strong_random=random):
n_bits = int(math.floor(math.log(maximum, 2)))
res = strong_random.getrandbits(n_bits)
while res >= maximum:
res = strong_random.getrandbits(n_bits)
return res
random.mpz_lt = random_mpz_lt
def hash_b64(s):
"""
hash the string using sha256 and produce a base64 output
removes the trailing "="
"""
hasher = SHA256.new(s.encode('utf-8'))
result = base64.b64encode(hasher.digest())[:-1].decode('ascii')
return result
|
{
"content_hash": "705364651f76be7e588b370f3fa319f9",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 67,
"avg_line_length": 21.677419354838708,
"alnum_prop": 0.6770833333333334,
"repo_name": "benadida/helios-server",
"id": "2fcce307f37bc81f0727f851c7ecbdaa60ace05c",
"size": "672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helios/crypto/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13437"
},
{
"name": "HTML",
"bytes": "162385"
},
{
"name": "Java",
"bytes": "2271"
},
{
"name": "JavaScript",
"bytes": "307826"
},
{
"name": "Procfile",
"bytes": "119"
},
{
"name": "Python",
"bytes": "483554"
},
{
"name": "Shell",
"bytes": "402"
}
],
"symlink_target": ""
}
|
import io, os.path, re, sys
from setuptools import setup
# environment markers require a recent setuptools and/or pip version
if sys.version_info >= (3, 3) or 'bdist_wheel' in sys.argv:
install_requires = []
elif sys.version_info >= (3, 0):
install_requires = ['ipaddress>=1.0.7']
else:
install_requires = ['ipaddress>=1.0.6']
with io.open(os.path.join(os.path.dirname(__file__), 'urilib', '__init__.py'),
encoding='utf8') as f:
metadata = dict(re.findall(r"__([a-z]+)__ = '([^']+)", f.read()))
setup(
name='urilib',
version=metadata['version'],
author='Thomas Kemmer, IGARASHI Masanao',
author_email='tkemmer@computer.org, syoux2@gmail.com',
url='https://github.com/masayuko/urilib/',
license='MIT',
description='Encode like brower does, Unicode-aware, scheme-agnostic replacement for urlparse',
long_description=open('README.rst').read(),
keywords='uri url urlparse urlsplit urljoin urldefrag',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=['urilib'],
install_requires=install_requires,
extras_require={
':python_version == "2.7"': ['ipaddress>=1.0.6'],
':python_version == "3.2"': ['ipaddress>=1.0.7']
},
test_suite='tests'
)
|
{
"content_hash": "24bbd69a49b8465dc0e4393d797904fa",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 99,
"avg_line_length": 37.6,
"alnum_prop": 0.6106382978723405,
"repo_name": "masayuko/urilib",
"id": "e695f88d1b63a5e3fa37afd4cd20d864faec0eb0",
"size": "1880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66603"
}
],
"symlink_target": ""
}
|
from pmxbot import quotes
def test_MongoDBQuotes(mongodb_uri):
q = quotes.Quotes.from_URI(mongodb_uri)
q.lib = 'test'
clean = lambda: q.db.remove({'library': 'test'})
clean()
try:
q.quoteAdd('who would ever say such a thing')
q.quoteAdd('go ahead, take my pay')
q.quoteAdd("let's do the Time Warp again")
q.quoteLookup('time warp')
q.quoteLookup('nonexistent')
finally:
clean()
|
{
"content_hash": "2d5cb891155158251fe6d765f90f3769",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 49,
"avg_line_length": 25.875,
"alnum_prop": 0.6618357487922706,
"repo_name": "jamwt/diesel-pmxbot",
"id": "49f2116fd7a86b79102b0b8d4b5bb19ac9b6c8a9",
"size": "414",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/test_quotes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "162520"
},
{
"name": "Shell",
"bytes": "131"
}
],
"symlink_target": ""
}
|
import sys, os, re
import phedexApi
# setup definitions
if not os.environ.get('DETOX_DB'):
print '\n ERROR - DETOX environment not defined: source setup.sh\n'
sys.exit(0)
# make sure we start in the right directory
os.chdir(os.environ.get('DETOX_BASE'))
deprecated = {}
siteDsets = {}
siteSize2Del = {}
siteLCPerc = {}
#===================================================================================================
# H E L P E R S
#===================================================================================================
def readDeprecated():
filename = 'DeprecatedSets.txt'
inputFile = open(os.environ['DETOX_DB'] + '/' + os.environ['DETOX_STATUS'] + '/'
+ filename, 'r')
for line in inputFile.xreadlines():
name,status = line.split()
deprecated[name] = 1
inputFile.close()
def readMatchPhedex():
filename = os.environ['DETOX_PHEDEX_CACHE']
inputFile = open(os.environ['DETOX_DB'] + '/' + os.environ['DETOX_STATUS'] + '/'
+ filename, "r")
for line in inputFile.xreadlines():
line = line.rstrip()
items = line.split()
# print items
datasetName = items[0]
group = items[1]
if datasetName not in deprecated:
continue
if group != 'AnalysisOps':
continue
#if group != 'caf-comm':
# continue
size = float(items[3])
site = items[5]
print site
if site not in siteSize2Del:
siteSize2Del[site] = 0
siteSize2Del[site] = siteSize2Del[site] + size
if site not in siteDsets:
siteDsets[site] = [datasetName]
else:
siteDsets[site].append(datasetName)
inputFile.close()
def requestDeletions(site,dsetNames):
phedex = phedexApi.phedexApi(logPath='./')
# compose data for deletion request
check,data = phedex.xmlData(datasets=dsetNames,instance='prod',level='block')
if check:
print " ERROR - phedexApi.xmlData failed"
sys.exit(1)
# here the request is really sent
message = 'IntelROCCS -- Cache Release Request -- Deprecated Datasets'
check,response = phedex.delete(node=site,data=data,comments=message,instance='prod')
if check:
print " ERROR - phedexApi.delete failed"
print site
print response
sys.exit(1)
respo = response.read()
matchObj = re.search(r'"id":"(\d+)"',respo)
reqid = int(matchObj.group(1))
del phedex
# here we brute force deletion to be approved
phedex = phedexApi.phedexApi(logPath='./')
check,response = phedex.updateRequest(decision='approve',request=reqid,node=site,instance='prod')
if check:
print " ERROR - phedexApi.updateRequest failed - reqid="+ str(reqid)
print response
del phedex
#====================================================================================================
# M A I N
#====================================================================================================
# find deprecated datasets
readDeprecated()
# match deprecated datasets against phedex data
readMatchPhedex()
total = 0
for site in sorted(siteSize2Del):
print ("%-7.1f %-18s" %(siteSize2Del[site],site))
total = total + siteSize2Del[site]
print ("Total size to be deleted = %8.1f GB" %total)
#submit deletion requests for deprecated datasets
print "\n You want do continue with those deleteions? [Y/N]"
line = sys.stdin.readline()
line = line.rstrip()
if line == 'Y':
for site in sorted(siteDsets):
#if 'T2_' not in site: continue
#if site.startswith('T2_DE'):
# continue
if site.startswith('T2_CH'):
continue
setsToDelete = siteDsets[site]
print site
print siteSize2Del[site]
for dset in setsToDelete:
print dset
#print setsToDelete
requestDeletions(site,setsToDelete)
|
{
"content_hash": "074964b8e9c4bca1b040cd074088c551",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 101,
"avg_line_length": 31.125,
"alnum_prop": 0.5522088353413654,
"repo_name": "sidnarayanan/IntelROCCS",
"id": "a03db776ef4b0758d989b05f301b8fbd1b039974",
"size": "4521",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Detox/deleteDeprecated.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "338"
},
{
"name": "CSS",
"bytes": "824"
},
{
"name": "HTML",
"bytes": "11295"
},
{
"name": "JavaScript",
"bytes": "6131"
},
{
"name": "Python",
"bytes": "500361"
},
{
"name": "Shell",
"bytes": "40798"
}
],
"symlink_target": ""
}
|
from flask import Flask
from flask.ext import admin
# Views
class FirstView(admin.BaseView):
@admin.expose('/')
def index(self):
return self.render('first.html')
class SecondView(admin.BaseView):
@admin.expose('/')
def index(self):
return self.render('second.html')
# Create flask app
app = Flask(__name__, template_folder='templates')
# Flask views
@app.route('/')
def index():
return '<a href="/admin1">Click me to get to Admin 1</a><br/><a href="/admin2">Click me to get to Admin 2</a>'
if __name__ == '__main__':
# Create first administrative interface under /admin1
admin1 = admin.Admin(app, url='/admin1')
admin1.add_view(FirstView())
# Create second administrative interface under /admin2
admin2 = admin.Admin(app, url='/admin2', endpoint='admin2')
admin2.add_view(SecondView())
# Start app
app.debug = True
app.run()
|
{
"content_hash": "f72aa41c3d3b44651315bcefd90db85a",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 114,
"avg_line_length": 22.75,
"alnum_prop": 0.6472527472527473,
"repo_name": "sfermigier/flask-admin",
"id": "84acdf79b36e8536a555b66a05d946ef5092fb2c",
"size": "910",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/multi/multi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "74962"
},
{
"name": "Python",
"bytes": "205544"
},
{
"name": "Shell",
"bytes": "130"
}
],
"symlink_target": ""
}
|
import django
from django import forms
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.admin.sites import AdminSite
from .forms import OTPAuthenticationFormMixin
def _admin_template_for_django_version():
minor_django_version = django.VERSION[:2]
if minor_django_version <= (1, 4):
return 'otp/admin14/login.html'
elif minor_django_version == (1, 5):
return 'otp/admin15/login.html'
elif minor_django_version == (1, 6):
return 'otp/admin16/login.html'
elif minor_django_version == (1, 7):
return 'otp/admin17/login.html'
elif minor_django_version == (1, 8):
return 'otp/admin18/login.html'
else:
return 'otp/admin19/login.html'
class OTPAdminAuthenticationForm(AdminAuthenticationForm, OTPAuthenticationFormMixin):
"""
An :class:`~django.contrib.admin.forms.AdminAuthenticationForm` subclass
that solicits an OTP token. This has the same behavior as
:class:`~django_otp.forms.OTPAuthenticationForm`.
"""
otp_device = forms.CharField(required=False, widget=forms.Select)
otp_token = forms.CharField(required=False)
# This is a placeholder field that allows us to detect when the user clicks
# the otp_challenge submit button.
otp_challenge = forms.CharField(required=False)
def __init__(self, *args, **kwargs):
super(OTPAdminAuthenticationForm, self).__init__(*args, **kwargs)
# A litle extra cheese to make it prettier.
minor_django_version = django.VERSION[:2]
if minor_django_version < (1, 6):
self.fields['otp_token'].widget.attrs['style'] = 'width: 14em;'
def clean(self):
self.cleaned_data = super(OTPAdminAuthenticationForm, self).clean()
self.clean_otp(self.get_user())
return self.cleaned_data
class OTPAdminSite(AdminSite):
"""
This is an :class:`~django.contrib.admin.AdminSite` subclass that requires
two-factor authentication. Only users that can be verified by a registered
OTP device will be authorized for this admin site. Unverified users will be
treated as if :attr:`~django.contrib.auth.models.User.is_staff` is
``False``.
"""
#: The default instance name of this admin site. You should instantiate
#: this class as ``OTPAdminSite(OTPAdminSite.name)`` to make sure the admin
#: templates render the correct URLs.
name = 'otpadmin'
login_form = OTPAdminAuthenticationForm
#: We automatically select a modified login template based on your Django
#: version. If it doesn't look right, your version may not be supported, in
#: which case feel free to replace it.
login_template = _admin_template_for_django_version()
def has_permission(self, request):
"""
In addition to the default requirements, this only allows access to
users who have been verified by a registered OTP device.
"""
return super(OTPAdminSite, self).has_permission(request) and request.user.is_verified()
|
{
"content_hash": "e86582ca386379caefddfda6b8bb0697",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 95,
"avg_line_length": 37.9875,
"alnum_prop": 0.6916748930569266,
"repo_name": "robintema/django-otp",
"id": "d92bbb0fb50b2a4e87183a2ed54a5c669f811408",
"size": "3039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_otp/admin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "16149"
},
{
"name": "Makefile",
"bytes": "319"
},
{
"name": "Python",
"bytes": "95513"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PublicAccount'
db.create_table(u'weixin_publicaccount', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('type', self.gf('django.db.models.fields.CharField')(default='subscribe', max_length=10)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('thumbnail_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('callback_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('token', self.gf('django.db.models.fields.CharField')(max_length=255)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=False)),
('connect_status', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'weixin', ['PublicAccount'])
# Adding model 'App'
db.create_table(u'weixin_app', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('thumbnail_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal(u'weixin', ['App'])
# Adding model 'PublicAccountApp'
db.create_table(u'weixin_publicaccountapp', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('public_account', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['weixin.PublicAccount'])),
('app', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['weixin.App'])),
))
db.send_create_signal(u'weixin', ['PublicAccountApp'])
# Adding model 'Message'
db.create_table(u'weixin_message', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('public_account', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['weixin.PublicAccount'])),
('to_user_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('from_user_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('create_time', self.gf('django.db.models.fields.BigIntegerField')()),
('msg_type', self.gf('django.db.models.fields.CharField')(max_length=255)),
('msg_id', self.gf('django.db.models.fields.BigIntegerField')()),
('xml_content', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'weixin', ['Message'])
# Adding model 'Event'
db.create_table(u'weixin_event', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('public_account', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['weixin.PublicAccount'])),
('to_user_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('from_user_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('create_time', self.gf('django.db.models.fields.BigIntegerField')()),
('event_type', self.gf('django.db.models.fields.CharField')(max_length=255)),
('xml_content', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'weixin', ['Event'])
def backwards(self, orm):
# Deleting model 'PublicAccount'
db.delete_table(u'weixin_publicaccount')
# Deleting model 'App'
db.delete_table(u'weixin_app')
# Deleting model 'PublicAccountApp'
db.delete_table(u'weixin_publicaccountapp')
# Deleting model 'Message'
db.delete_table(u'weixin_message')
# Deleting model 'Event'
db.delete_table(u'weixin_event')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'weixin.app': {
'Meta': {'object_name': 'App'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'weixin.event': {
'Meta': {'object_name': 'Event'},
'create_time': ('django.db.models.fields.BigIntegerField', [], {}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'from_user_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'public_account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['weixin.PublicAccount']"}),
'to_user_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'xml_content': ('django.db.models.fields.TextField', [], {})
},
u'weixin.message': {
'Meta': {'object_name': 'Message'},
'create_time': ('django.db.models.fields.BigIntegerField', [], {}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'from_user_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'msg_id': ('django.db.models.fields.BigIntegerField', [], {}),
'msg_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'public_account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['weixin.PublicAccount']"}),
'to_user_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'xml_content': ('django.db.models.fields.TextField', [], {})
},
u'weixin.publicaccount': {
'Meta': {'object_name': 'PublicAccount'},
'callback_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'connect_status': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'subscribe'", 'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'weixin.publicaccountapp': {
'Meta': {'object_name': 'PublicAccountApp'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['weixin.App']"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'public_account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['weixin.PublicAccount']"})
}
}
complete_apps = ['weixin']
|
{
"content_hash": "db9b3c2c9aed4884bfe296623c8e5330",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 195,
"avg_line_length": 69.21693121693121,
"alnum_prop": 0.5863017887173215,
"repo_name": "lettoosoft/lettoo-weixin-platform-back",
"id": "51410f8ce85322763776297ff9bc20d00c690109",
"size": "13106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/weixin/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "JavaScript",
"bytes": "1451"
},
{
"name": "Python",
"bytes": "204592"
},
{
"name": "Shell",
"bytes": "6587"
}
],
"symlink_target": ""
}
|
"""
A script that takes a .svn/pristine/ hierarchy, with its existing
.svn/wc.db database, and populates the database's PRISTINE table
accordingly. (Use 'svn cleanup' to remove unreferenced pristines.)
Usage:
%s /path/to/wc [...]
"""
# TODO: resolve the NotImplemented() in __main__
# TODO: increment refcount upon collision
# TODO: add <given file>, not just argv[1]/.svn/pristine/??/*
import hashlib
import os
import re
import sqlite3
import sys
# ### This could require any other format that has the same PRISTINE schema
# ### and semantics.
FORMAT = 22
BUFFER_SIZE = 4 * 1024
class UnknownFormat(Exception):
def __init__(self, formatno):
self.formatno = formatno
def open_db(wc_path):
wc_db = os.path.join(wc_path, '.svn', 'wc.db')
conn = sqlite3.connect(wc_db)
curs = conn.cursor()
curs.execute('pragma user_version;')
formatno = int(curs.fetchone()[0])
if formatno > FORMAT:
raise UnknownFormat(formatno)
return conn
_sha1_re = re.compile(r'^[0-9a-f]{40}$')
def md5_of(path):
fd = os.open(path, os.O_RDONLY)
ctx = hashlib.md5()
while True:
s = os.read(fd, BUFFER_SIZE)
if len(s):
ctx.update(s)
else:
os.close(fd)
return ctx.hexdigest()
INSERT_QUERY = """
INSERT OR REPLACE
INTO pristine(checksum,compression,size,refcount,md5_checksum)
VALUES (?,?,?,?,?)
"""
def populate(wc_path):
conn = open_db(wc_path)
sys.stdout.write("Updating '%s': " % wc_path)
for dirname, dirs, files in os.walk(os.path.join(wc_path, '.svn/pristine/')):
# skip everything but .svn/pristine/xx/
if os.path.basename(os.path.dirname(dirname)) == 'pristine':
sys.stdout.write("'%s', " % os.path.basename(dirname))
for f in filter(lambda x: _sha1_re.match(x), files):
fullpath = os.path.join(dirname, f)
conn.execute(INSERT_QUERY,
('$sha1$'+f, None, os.stat(fullpath).st_size, 1,
'$md5 $'+md5_of(fullpath)))
# periodic transaction commits, for efficiency
conn.commit()
else:
sys.stdout.write(".\n")
if __name__ == '__main__':
raise NotImplemented("""Subversion does not know yet to avoid fetching
a file when a file with matching sha1 appears in the PRISTINE table.""")
paths = sys.argv[1:]
if not paths:
paths = ['.']
for wc_path in paths:
try:
populate(wc_path)
except UnknownFormat, e:
sys.stderr.write("Don't know how to handle '%s' (format %d)'\n"
% (wc_path, e.formatno))
|
{
"content_hash": "e6527a7cc858bee967f10f54cfa453fd",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 79,
"avg_line_length": 28.06741573033708,
"alnum_prop": 0.6341072858286629,
"repo_name": "wbond/subversion",
"id": "8857371a43db38c26f2788d3038f6eb22faac933",
"size": "3307",
"binary": false,
"copies": "5",
"ref": "refs/heads/1.7.x",
"path": "tools/dev/wc-ng/populate-pristine.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "14370127"
},
{
"name": "C++",
"bytes": "601231"
},
{
"name": "CSS",
"bytes": "1216"
},
{
"name": "Emacs Lisp",
"bytes": "399044"
},
{
"name": "Java",
"bytes": "1212099"
},
{
"name": "Objective-C",
"bytes": "300774"
},
{
"name": "Perl",
"bytes": "226264"
},
{
"name": "Python",
"bytes": "4854316"
},
{
"name": "Ruby",
"bytes": "436740"
},
{
"name": "Shell",
"bytes": "177449"
},
{
"name": "VimL",
"bytes": "3010"
},
{
"name": "XSLT",
"bytes": "24950"
}
],
"symlink_target": ""
}
|
"""
Command-line sample that creates a one-time transfer from Amazon S3 to
Google Cloud Storage.
"""
import argparse
# [START storagetransfer_transfer_from_aws]
from datetime import datetime
from google.cloud import storage_transfer
def create_one_time_aws_transfer(
project_id: str, description: str,
source_bucket: str, aws_access_key_id: str,
aws_secret_access_key: str, sink_bucket: str):
"""Creates a one-time transfer job from Amazon S3 to Google Cloud
Storage."""
client = storage_transfer.StorageTransferServiceClient()
# The ID of the Google Cloud Platform Project that owns the job
# project_id = 'my-project-id'
# A useful description for your transfer job
# description = 'My transfer job'
# AWS S3 source bucket name
# source_bucket = 'my-s3-source-bucket'
# AWS Access Key ID
# aws_access_key_id = 'AKIA...'
# AWS Secret Access Key
# aws_secret_access_key = 'HEAoMK2.../...ku8'
# Google Cloud Storage destination bucket name
# sink_bucket = 'my-gcs-destination-bucket'
now = datetime.utcnow()
# Setting the start date and the end date as
# the same time creates a one-time transfer
one_time_schedule = {
'day': now.day,
'month': now.month,
'year': now.year
}
transfer_job_request = storage_transfer.CreateTransferJobRequest({
'transfer_job': {
'project_id': project_id,
'description': description,
'status': storage_transfer.TransferJob.Status.ENABLED,
'schedule': {
'schedule_start_date': one_time_schedule,
'schedule_end_date': one_time_schedule
},
'transfer_spec': {
'aws_s3_data_source': {
'bucket_name': source_bucket,
'aws_access_key': {
'access_key_id': aws_access_key_id,
'secret_access_key': aws_secret_access_key,
}
},
'gcs_data_sink': {
'bucket_name': sink_bucket,
}
}
}
})
result = client.create_transfer_job(transfer_job_request)
print(f'Created transferJob: {result.name}')
# [END storagetransfer_transfer_from_aws]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--project-id',
help='The ID of the Google Cloud Platform Project that owns the job',
required=True)
parser.add_argument(
'--description',
help='A useful description for your transfer job',
default='My transfer job')
parser.add_argument(
'--source-bucket',
help='AWS S3 source bucket name',
required=True)
parser.add_argument(
'--aws-access-key-id',
help='AWS access key ID',
required=True)
parser.add_argument(
'--aws-secret-access-key',
help='AWS secret access key',
required=True)
parser.add_argument(
'--sink-bucket',
help='Google Cloud Storage destination bucket name',
required=True)
args = parser.parse_args()
create_one_time_aws_transfer(**vars(args))
|
{
"content_hash": "1601ca0128a2e81735025dc316ef67d8",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 77,
"avg_line_length": 29.772727272727273,
"alnum_prop": 0.5868702290076336,
"repo_name": "GoogleCloudPlatform/python-docs-samples",
"id": "cde1828461e8bf408c9ccc00039b258ef46ba7a6",
"size": "3874",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "storagetransfer/aws_request.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8008"
},
{
"name": "Dockerfile",
"bytes": "62031"
},
{
"name": "HTML",
"bytes": "69878"
},
{
"name": "JavaScript",
"bytes": "26494"
},
{
"name": "Jinja",
"bytes": "1892"
},
{
"name": "Jupyter Notebook",
"bytes": "47951698"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "Procfile",
"bytes": "138"
},
{
"name": "PureBasic",
"bytes": "11115"
},
{
"name": "Python",
"bytes": "5323502"
},
{
"name": "Shell",
"bytes": "78261"
}
],
"symlink_target": ""
}
|
import os
import time
import subprocess
from hashlib import sha1
VERTICAL, HORIZONTAL = 'v', 'h'
def execute(cmd):
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = popen.communicate("")
if stderr:
raise Exception("error: %s" % stderr)
return stdout
class SpriteImage(object):
def __init__(self, path, name=None):
self.path = path
self.name = name or os.path.splitext(os.path.basename(path))[0]
@property
def size(self):
if not hasattr(self, '_size'):
from PIL import Image
img = Image.open(self.path)
self._size = img.size
return self._size
class Sprite(object):
def __init__(self, sources=(), offsets=(0, 0), align=VERTICAL, name=None):
self.images = [SpriteImage(source) for source in sources]
self.offsets = offsets
self.align = align
self.name = name
@property
def relpath(self):
return "icons/%s.png" % self.name
def generate_scss(self, f, name=None, add_hash=True):
name = name or self.name
url = self.relpath
if add_hash:
url += '?%s' % sha1(self.relpath + str(time.time())).hexdigest()
f.write(".%s{background-image: url(%s);}\n\n" % (name, url))
y_offset = self.offsets[1]
shortcut_mixin = []
for image in self.images:
x, y = -self.offsets[0], -y_offset
f.write("@mixin %s-%s($dx : 0px, $dy : 0px){@extend .%s;background-position: %spx + $dx %spx + $dy;}\n" % (name, image.name, name, x, y))
shortcut_mixin.append("&.%s{@include %s-%s($dx, $dy);}" % (image.name, name, image.name))
y_offset += image.size[1] + 2 * self.offsets[1]
f.write("\n@mixin %s($dx : 0px, $dy : 0px){\n %s\n}\n" % (name, "\n ".join(shortcut_mixin)))
def generate_image(self, f):
w, h = 0, 0
for image in self.images:
w = max(image.size[0], w)
h = max(image.size[1], h)
cmd = ['montage',
'-background', 'transparent',
'-tile', '1x%s' % len(self.images),
'-gravity', 'NorthWest',
'-geometry', '%sx%s+%s+%s' % (w, h, self.offsets[0], self.offsets[1]),
]
cmd += [image.path for image in self.images]
cmd.append('-')
f.write(execute(cmd))
|
{
"content_hash": "be27f321e6d2734695b1070175e1808f",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 149,
"avg_line_length": 35.10144927536232,
"alnum_prop": 0.5412881915772089,
"repo_name": "emulbreh/shrubbery",
"id": "3ca459865c57800735488f8b48cd9da118b302ae",
"size": "2422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shrubbery/chrome/sprites.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "186"
},
{
"name": "Python",
"bytes": "170934"
}
],
"symlink_target": ""
}
|
import os
import mock
from oslo.config import cfg
import neutron.common.test_lib as test_lib
from neutron.plugins.bigswitch import config
from neutron.tests.unit.bigswitch import fake_server
RESTPROXY_PKG_PATH = 'neutron.plugins.bigswitch.plugin'
L3_RESTPROXY_PKG_PATH = 'neutron.plugins.bigswitch.l3_router_plugin'
NOTIFIER = 'neutron.plugins.bigswitch.plugin.AgentNotifierApi'
CERTFETCH = 'neutron.plugins.bigswitch.servermanager.ServerPool._fetch_cert'
SERVER_MANAGER = 'neutron.plugins.bigswitch.servermanager'
HTTPCON = 'neutron.plugins.bigswitch.servermanager.httplib.HTTPConnection'
SPAWN = 'neutron.plugins.bigswitch.plugin.eventlet.GreenPool.spawn_n'
CWATCH = SERVER_MANAGER + '.ServerPool._consistency_watchdog'
class BigSwitchTestBase(object):
_plugin_name = ('%s.NeutronRestProxyV2' % RESTPROXY_PKG_PATH)
_l3_plugin_name = ('%s.L3RestProxy' % L3_RESTPROXY_PKG_PATH)
def setup_config_files(self):
etc_path = os.path.join(os.path.dirname(__file__), 'etc')
test_lib.test_config['config_files'] = [os.path.join(etc_path,
'restproxy.ini.test')]
self.addCleanup(cfg.CONF.reset)
config.register_config()
# Only try SSL on SSL tests
cfg.CONF.set_override('server_ssl', False, 'RESTPROXY')
cfg.CONF.set_override('ssl_cert_directory',
os.path.join(etc_path, 'ssl'), 'RESTPROXY')
# The mock interferes with HTTP(S) connection caching
cfg.CONF.set_override('cache_connections', False, 'RESTPROXY')
cfg.CONF.set_override('service_plugins', ['bigswitch_l3'])
cfg.CONF.set_override('add_meta_server_route', False, 'RESTPROXY')
def setup_patches(self):
self.plugin_notifier_p = mock.patch(NOTIFIER)
# prevent any greenthreads from spawning
self.spawn_p = mock.patch(SPAWN, new=lambda *args, **kwargs: None)
# prevent the consistency watchdog from starting
self.watch_p = mock.patch(CWATCH, new=lambda *args, **kwargs: None)
self.plugin_notifier_p.start()
self.spawn_p.start()
self.watch_p.start()
def startHttpPatch(self):
self.httpPatch = mock.patch(HTTPCON,
new=fake_server.HTTPConnectionMock)
self.httpPatch.start()
|
{
"content_hash": "72794d10dd5934dd1b1832ffb5d9202e",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 76,
"avg_line_length": 43.092592592592595,
"alnum_prop": 0.6716802750322304,
"repo_name": "uni2u/neutron",
"id": "422c5b95d46432e5642865cad12c4c8ff11f0bf4",
"size": "2941",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/bigswitch/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import sys
if sys.version_info < (2, 7):
raise Exception('The dark matter code needs Python 2.7 or later.')
# Note that the version string must have the following format, otherwise it
# will not be found by the version() function in ../setup.py
#
# Remember to update ../CHANGELOG.md describing what's new in each version.
__version__ = '4.0.6'
|
{
"content_hash": "62c9acf0c85fa441f9cb04ae5588da3d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 75,
"avg_line_length": 35.1,
"alnum_prop": 0.7122507122507122,
"repo_name": "terrycojones/dark-matter",
"id": "b1783ea61cf37b93491511730791700634993555",
"size": "351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dark/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3380"
},
{
"name": "Python",
"bytes": "2143833"
},
{
"name": "Shell",
"bytes": "10548"
}
],
"symlink_target": ""
}
|
"""Merge all translation sources into a single JSON file."""
import glob
import json
import os
import re
from typing import Union, List, Dict
FILENAME_FORMAT = re.compile(r"strings\.(?P<suffix>\w+)\.json")
def load_json(filename: str) -> Union[List, Dict]:
"""Load JSON data from a file and return as dict or list.
Defaults to returning empty dict if file is not found.
"""
with open(filename, encoding="utf-8") as fdesc:
return json.loads(fdesc.read())
return {}
def save_json(filename: str, data: Union[List, Dict]):
"""Save JSON data to a file.
Returns True on success.
"""
data = json.dumps(data, sort_keys=True, indent=4)
with open(filename, "w", encoding="utf-8") as fdesc:
fdesc.write(data)
return True
return False
def get_language(path):
"""Get the language code for the given file path."""
return os.path.splitext(os.path.basename(path))[0]
def get_component_path(lang, component):
"""Get the component translation path."""
if os.path.isdir(os.path.join("homeassistant", "components", component)):
return os.path.join(
"homeassistant",
"components",
component,
".translations",
"{}.json".format(lang),
)
else:
return os.path.join(
"homeassistant",
"components",
".translations",
"{}.{}.json".format(component, lang),
)
def get_platform_path(lang, component, platform):
"""Get the platform translation path."""
if os.path.isdir(os.path.join("homeassistant", "components", component, platform)):
return os.path.join(
"homeassistant",
"components",
component,
platform,
".translations",
"{}.json".format(lang),
)
else:
return os.path.join(
"homeassistant",
"components",
component,
".translations",
"{}.{}.json".format(platform, lang),
)
def get_component_translations(translations):
"""Get the component level translations."""
translations = translations.copy()
translations.pop("platform", None)
return translations
def save_language_translations(lang, translations):
"""Distribute the translations for this language."""
components = translations.get("component", {})
for component, component_translations in components.items():
base_translations = get_component_translations(component_translations)
if base_translations:
path = get_component_path(lang, component)
os.makedirs(os.path.dirname(path), exist_ok=True)
save_json(path, base_translations)
for platform, platform_translations in component_translations.get(
"platform", {}
).items():
path = get_platform_path(lang, component, platform)
os.makedirs(os.path.dirname(path), exist_ok=True)
save_json(path, platform_translations)
def main():
"""Run the script."""
if not os.path.isfile("requirements_all.txt"):
print("Run this from HA root dir")
return
paths = glob.iglob("build/translations-download/*.json")
for path in paths:
lang = get_language(path)
translations = load_json(path)
save_language_translations(lang, translations)
if __name__ == "__main__":
main()
|
{
"content_hash": "0d7ff1b424f724d86862949cb7b8e10f",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 87,
"avg_line_length": 29.338983050847457,
"alnum_prop": 0.6048526863084922,
"repo_name": "fbradyirl/home-assistant",
"id": "7f653e3651e36ab0aea1f5869f42da339e8e632c",
"size": "3485",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "script/translations_download_split.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
}
|
"""A library that provides a Python client to Slicing Dice API"""
import ujson
from . import exceptions
from .api import SlicingDiceAPI
from .url_resources import URLResources
from .utils import validators
class SlicingDice(SlicingDiceAPI):
"""A python interface to Slicing Dice API
Example usage:
To create an object of the SlicingDice:
from pyslicer.api import SlicingDice
sd = SlicingDice('my-token')
To create a column:
column_json = {
'name': 'Pyslicer String Column',
'description': 'Pyslicer example description',
'type': 'string',
'cardinality': 'low'}
print sd.create_column(column_json)
To make a query:
query_json = {
'type': 'count',
'select': [
{
"pyslicer-string-column":
{
"equal": "test_value_1"
}
},
"or",
{
"pyslicer-string-column":
{
"equal": "test_value_2"
}
},
]
}
print sd.query(query_json)
To insert data:
inserting_json = {
'foo@bar.com': {
'pyslicer-string-column': 'test_value_1',
'pyslicer-integer-column': 42,
},
'baz@bar.com': {
'pyslicer-string-column': 'test_value_2',
'pyslicer-integer-column': 42,
},
}
print sd.insert(inserting_json)
"""
def __init__(
self, write_key=None, read_key=None, master_key=None,
custom_key=None, use_ssl=True, timeout=60):
"""Instantiate a new SlicingDice object.
Keyword arguments:
key(string or SlicerKey obj) -- Key to access API
use_ssl(bool) -- Define if the request uses verification SSL for
HTTPS requests. Defaults False.(Optional)
timeout(int) -- Define timeout to request,
defaults 60 secs(default 30).
"""
super(SlicingDice, self).__init__(
master_key, write_key, read_key, custom_key, use_ssl, timeout)
def _count_query_wrapper(self, url, query):
"""Validate count query and make request.
Keyword arguments:
url(string) -- Url to make request
query(dict) -- A count query
"""
sd_count_query = validators.QueryCountValidator(query)
if sd_count_query.validator():
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type="post",
key_level=0)
def _data_extraction_wrapper(self, url, query):
"""Validate data extraction query and make request.
Keyword arguments:
url(string) -- Url to make request
query(dict) -- A data extraction query
"""
sd_extraction_result = validators.QueryDataExtractionValidator(query)
if sd_extraction_result.validator():
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type="post",
key_level=0)
def _saved_query_wrapper(self, url, query, update=False):
"""Validate saved query and make request.
Keyword arguments:
url(string) -- Url to make request
query(dict) -- A saved query
update(bool) -- Indicates with operation is update a
saved query or not.(default false)
"""
req_type = "post"
if update:
req_type = "put"
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type=req_type,
key_level=2)
def get_database(self):
"""Get a database associated with this client (related to keys passed
on construction)"""
url = SlicingDice.BASE_URL + URLResources.DATABASE
return self._make_request(
url=url,
req_type="get",
key_level=2
)
def create_column(self, data):
"""Create column in Slicing Dice
Keyword arguments:
data -- A dictionary or list on the Slicing Dice column
format.
"""
sd_data = validators.ColumnValidator(data)
if sd_data.validator():
url = SlicingDice.BASE_URL + URLResources.COLUMN
return self._make_request(
url=url,
req_type="post",
json_data=ujson.dumps(data),
key_level=1)
def get_columns(self):
"""Get a list of columns"""
url = SlicingDice.BASE_URL + URLResources.COLUMN
return self._make_request(
url=url,
req_type="get",
key_level=2)
def insert(self, data):
"""Insert data into Slicing Dice API
Keyword arguments:
data -- A dictionary in the Slicing Dice data format
format.
"""
sd_data = validators.InsertValidator(data)
if sd_data.validator():
url = SlicingDice.BASE_URL + URLResources.INSERT
return self._make_request(
url=url,
json_data=ujson.dumps(data),
req_type="post",
key_level=1)
def count_entity(self, query):
"""Make a count entity query
Keyword arguments:
query -- A dictionary in the Slicing Dice query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_COUNT_ENTITY
return self._count_query_wrapper(url, query)
def count_entity_total(self, dimensions=None):
"""Make a count entity total query
Keyword arguments:
dimensions -- A dictionary containing the dimensions in which
the total query will be performed
"""
query = {}
if dimensions is not None:
query['dimensions'] = dimensions
url = SlicingDice.BASE_URL + URLResources.QUERY_COUNT_ENTITY_TOTAL
return self._make_request(
url=url,
req_type="post",
json_data=ujson.dumps(query),
key_level=0)
def count_event(self, query):
"""Make a count event query
Keyword arguments:
data -- A dictionary query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_COUNT_EVENT
return self._count_query_wrapper(url, query)
def aggregation(self, query):
"""Make a aggregation query
Keyword arguments:
query -- An aggregation query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_AGGREGATION
if "query" not in query:
raise exceptions.InvalidQueryException(
"The aggregation query must have up the key 'query'.")
columns = query["query"]
if len(columns) > 5:
raise exceptions.MaxLimitException(
"The aggregation query must have up to 5 columns per request.")
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type="post",
key_level=0)
def top_values(self, query):
"""Make a top values query
Keyword arguments:
query -- A dictionary query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_TOP_VALUES
sd_query_top_values = validators.QueryValidator(query)
if sd_query_top_values.validator():
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type="post",
key_level=0)
def exists_entity(self, ids, dimension=None):
"""Make a exists entity query
Keyword arguments:
ids -- A list with entities to check if exists
dimension -- In which dimension entities check be checked
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_EXISTS_ENTITY
if len(ids) > 100:
raise exceptions.MaxLimitException(
"The query exists entity must have up to 100 ids.")
query = {
'ids': ids
}
if dimension:
query['dimension'] = dimension
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type="post",
key_level=0)
def get_saved_query(self, query_name):
"""Get a saved query
Keyword arguments:
query_name(string) -- The name of the saved query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_SAVED + query_name
return self._make_request(
url=url,
req_type="get",
key_level=0)
def get_saved_queries(self):
"""Get all saved queries
Keyword arguments:
query_name(string) -- The name of the saved query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_SAVED
return self._make_request(
url=url,
req_type="get",
key_level=2)
def delete_saved_query(self, query_name):
"""Delete a saved query
Keyword arguments:
query_name(string) -- The name of the saved query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_SAVED + query_name
return self._make_request(
url=url,
req_type="delete",
key_level=2
)
def create_saved_query(self, query):
"""Get a list of queries saved
Keyword arguments:
query -- A dictionary query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_SAVED
return self._saved_query_wrapper(url, query)
def update_saved_query(self, name, query):
"""Get a list of queries saved
Keyword arguments:
name -- The name of the saved query to update
query -- A dictionary query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_SAVED + name
return self._saved_query_wrapper(url, query, True)
def result(self, query):
"""Get a data extraction result
Keyword arguments:
query -- A dictionary query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_DATA_EXTRACTION_RESULT
return self._data_extraction_wrapper(url, query)
def score(self, query):
"""Get a data extraction score
Keyword arguments:
query -- A dictionary query
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_DATA_EXTRACTION_SCORE
return self._data_extraction_wrapper(url, query)
def sql(self, query):
""" Make a sql query to SlicingDice
:param query: the query written in SQL format
:return: The response from the SlicingDice
"""
url = SlicingDice.BASE_URL + URLResources.QUERY_SQL
return self._make_request(
url=url,
string_data=query,
req_type="post",
key_level=0,
content_type='application/sql')
def delete(self, query):
"""Make a delete request
Keyword arguments:
query -- The query that represents the data to be deleted
"""
url = SlicingDice.BASE_URL + URLResources.DELETE
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type="post",
key_level=2)
def update(self, query):
"""Make a update request
Keyword arguments:
query -- The query that represents the data to be updated
"""
url = SlicingDice.BASE_URL + URLResources.UPDATE
return self._make_request(
url=url,
json_data=ujson.dumps(query),
req_type="post",
key_level=2)
|
{
"content_hash": "9375b87d141d2833752eab341c08a9f5",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 79,
"avg_line_length": 31.973958333333332,
"alnum_prop": 0.533881739697019,
"repo_name": "SlicingDice/slicingdice-python",
"id": "b5792b3b8d4cac276554886647e1bfaf205ac844",
"size": "12918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyslicer/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52035"
}
],
"symlink_target": ""
}
|
import os
import random
import shutil
import stat
from contextlib import contextmanager
from tempfile import mkdtemp
@contextmanager
def temp_dir():
name = make_temp_dir()
yield name
shutil.rmtree(name)
def make_temp_dir():
if os.path.exists('/dev/shm/'):
if os.stat('/dev/shm').st_mode & stat.S_IWGRP:
dirname = 'django-cms-tests-%s' % random.randint(1,1000000)
path = os.path.join('/dev/shm', dirname)
while os.path.exists(path):
dirname = 'django-cms-tests-%s' % random.randint(1,1000000)
path = os.path.join('/dev/shm', dirname)
os.mkdir(path)
return path
return mkdtemp()
|
{
"content_hash": "52ac7f1cb3f63594ebe86fb54805667c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 75,
"avg_line_length": 28.32,
"alnum_prop": 0.5988700564971752,
"repo_name": "rsalmaso/django-cms",
"id": "19e679ab4fcdc9391e3e5e3f1d736fda2888d207",
"size": "708",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "cms/test_utils/tmpdir.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "204223"
},
{
"name": "JavaScript",
"bytes": "1250281"
},
{
"name": "Python",
"bytes": "2386268"
},
{
"name": "SCSS",
"bytes": "137693"
},
{
"name": "Shell",
"bytes": "22511"
}
],
"symlink_target": ""
}
|
"browser coordinates object"
from pycbio.sys.immutable import Immutable
# FIXME: support MAF db.chrom syntax, single base syntax, etc.
class CoordsError(Exception):
"Coordinate error"
pass
class Coords(Immutable):
"""Browser coordinates
Fields:
chrom, start, end - start/end maybe None to indicate a full chromosome
db - optional genome assembly database
chromSize - optional size of chromosome
"""
__slots__ = ("chrom", "start", "end", "db", "chromSize")
def __parseCombined__(self, coordsStr):
"parse chrom:start-end "
try:
self.chrom, rng = str.split(coordsStr, ":")
self.start, self.end = str.split(rng, "-")
self.start = int(self.start)
self.end = int(self.end)
except Exception as ex:
raise CoordsError("invalid coordinates: \"" + str(coordsStr) + "\": " + str(ex))
def __parseThree__(self, chrom, start, end):
"parse chrom, start, end. start/end maybe strings, int or None "
try:
self.chrom = chrom
self.start = int(start) if start is not None else None
self.end = int(end) if end is not None else None
except Exception as ex:
raise CoordsError("invalid coordinates: \"" + str(coordsStr) + "\": " + str(ex))
def __init__(self, *args, **kwargs):
"""args are either one argument in the form chr:start-end, or
chr, start, end. options are db= and chromSize="""
Immutable.__init__(self)
if len(args) == 1:
self.__parseCombined__(args[0])
elif len(args) == 3:
self.__parseThree__(args[0], args[1], args[2])
else:
raise CoordsError("Coords() excepts either one or three arguments")
self.db = kwargs.get("db")
self.chromSize = kwargs.get("chromSize")
self.mkImmutable()
def __str__(self):
return self.chrom + ":" + str(self.start) + "-" + str(self.end)
def size(self):
return self.end-self.start
def overlaps(self, other):
return ((self.chrom == other.chrom) and (self.start < other.end) and (self.end > other.start))
def pad(self, frac=0.05, minBases=5):
"""return Coords, padded with a fraction of the range."""
amt = int(frac*(self.end - self.start))
if amt < minBases:
amt = minBases
st = self.start - amt
if st < 0:
st = 0
return Coords(self.chrom, st, self.end+amt)
def __cmp__(self, other):
if other is None:
return -1
d = cmp(self.db, other.db)
if d == 0:
d = cmp(self.chrom, other.chrom)
if d == 0:
d = cmp(self.start, other.start)
if d == 0:
d = cmp(self.end, other.end)
return d
def __hash__(self):
return hash(self.db) + hash(self.chrom) + hash(self.start)
|
{
"content_hash": "59e9dbdd5bc44094a6fab06f926e2657",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 102,
"avg_line_length": 34.845238095238095,
"alnum_prop": 0.5596173556542535,
"repo_name": "ifiddes/pycbio",
"id": "e941cd20971d47d6e39decf5f851eaefcbd66e00",
"size": "2963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycbio/hgbrowser/coords.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Gnuplot",
"bytes": "112679"
},
{
"name": "Makefile",
"bytes": "7284"
},
{
"name": "Python",
"bytes": "714944"
},
{
"name": "Shell",
"bytes": "186"
}
],
"symlink_target": ""
}
|
import base64
LRS_STATEMENT_ENDPOINT = 'http://localhost:8000/xapi/statements'
ENDPOINT_AUTH_USERNAME = 'tom'
ENDPOINT_AUTH_PASSWORD = '1234'
AUTHORIZATION = "Basic %s" % base64.b64encode("%s:%s" % (ENDPOINT_AUTH_USERNAME, ENDPOINT_AUTH_PASSWORD))
HEADERS = {
'Authorization': AUTHORIZATION,
'content-type': 'application/json',
'X-Experience-API-Version': '1.0.0'
}
|
{
"content_hash": "d89abce351a04321e656752c89e1d5d7",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 105,
"avg_line_length": 29.466666666666665,
"alnum_prop": 0.6085972850678733,
"repo_name": "creighton/competency-example",
"id": "fa8cce8adddbb7d6bad1248a58c40c8e16811554",
"size": "442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14594"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import logging
import os
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import Max
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from djblets.db.fields import RelationCounterField
from reviewboard.admin.server import build_server_url
from reviewboard.attachments.managers import FileAttachmentManager
from reviewboard.attachments.mimetypes import MimetypeHandler
from reviewboard.diffviewer.models import FileDiff
from reviewboard.scmtools.models import Repository
from reviewboard.site.models import LocalSite
class FileAttachmentHistory(models.Model):
"""Revision history for a single file attachment.
This tracks multiple revisions of the same file attachment (for instance,
when someone replaces a screenshot with an updated version).
"""
display_position = models.IntegerField()
latest_revision = RelationCounterField('file_attachments')
def get_revision_to_id_map(self):
"""Return a map from revision number to FileAttachment ID."""
results = {}
for attachment in self.file_attachments.all():
results[attachment.attachment_revision] = attachment.id
return results
@staticmethod
def compute_next_display_position(review_request):
"""Compute the display position for a new FileAttachmentHistory."""
# Right now, display_position is monotonically increasing for each
# review request. In the future this might be extended to allow the
# user to change the order of attachments on the page.
max_position = (
FileAttachmentHistory.objects
.filter(review_request=review_request)
.aggregate(Max('display_position'))
.get('display_position__max')) or 0
return max_position + 1
class Meta:
db_table = 'attachments_fileattachmenthistory'
verbose_name = _('File Attachment History')
verbose_name_plural = _('File Attachment Histories')
@python_2_unicode_compatible
class FileAttachment(models.Model):
"""A file associated with a review request.
Like diffs, a file can have comments associated with it.
These comments are of type
:py:class:`reviewboard.reviews.models.FileAttachmentComment`.
"""
caption = models.CharField(_('caption'), max_length=256, blank=True)
draft_caption = models.CharField(_('draft caption'),
max_length=256, blank=True)
orig_filename = models.CharField(_('original filename'),
max_length=256, blank=True, null=True)
user = models.ForeignKey(User,
blank=True,
null=True,
related_name='file_attachments')
local_site = models.ForeignKey(LocalSite,
blank=True,
null=True,
related_name='file_attachments')
uuid = models.CharField(_('uuid'), max_length=255, blank=True)
file = models.FileField(_('file'),
max_length=512,
blank=True,
null=True,
upload_to=os.path.join('uploaded', 'files',
'%Y', '%m', '%d'))
mimetype = models.CharField(_('mimetype'), max_length=256, blank=True)
# repo_path, repo_revision, and repository are used to identify
# FileAttachments associated with committed binary files in a source tree.
# They are not used for new files that don't yet have a revision.
#
# For new files, the added_in_filediff association is used.
repo_path = models.CharField(_('repository file path'),
max_length=1024,
blank=True,
null=True)
repo_revision = models.CharField(_('repository file revision'),
max_length=64,
blank=True,
null=True,
db_index=True)
repository = models.ForeignKey(Repository,
blank=True,
null=True,
related_name='file_attachments')
added_in_filediff = models.ForeignKey(FileDiff,
blank=True,
null=True,
related_name='added_attachments')
attachment_history = models.ForeignKey(FileAttachmentHistory,
blank=True,
null=True,
related_name='file_attachments')
attachment_revision = models.IntegerField(default=0)
objects = FileAttachmentManager()
@property
def mimetype_handler(self):
"""Return the mimetype handler for this file."""
if not hasattr(self, '_thumbnail'):
self._thumbnail = MimetypeHandler.for_type(self)
return self._thumbnail
@property
def review_ui(self):
"""Return the review UI for this file."""
if not hasattr(self, '_review_ui'):
from reviewboard.reviews.ui.base import FileAttachmentReviewUI
self._review_ui = FileAttachmentReviewUI.for_type(self)
return self._review_ui
def _get_thumbnail(self):
"""Return the thumbnail for display."""
if not self.mimetype_handler:
return None
try:
return self.mimetype_handler.get_thumbnail()
except Exception as e:
logging.error('Error when calling get_thumbnail for '
'MimetypeHandler %r: %s',
self.mimetype_handler, e, exc_info=1)
return None
def _set_thumbnail(self, data):
"""Set the thumbnail."""
if not self.mimetype_handler:
return None
try:
self.mimetype_handler.set_thumbnail(data)
except Exception as e:
logging.error('Error when calling get_thumbnail for '
'MimetypeHandler %r: %s',
self.mimetype_handler, e, exc_info=1)
return None
thumbnail = property(_get_thumbnail, _set_thumbnail)
@property
def filename(self):
"""Return the filename for display purposes."""
# Older versions of Review Board didn't store the original filename,
# instead just using the FileField's name. Newer versions have
# a dedicated filename field.
if self.file:
alt = os.path.basename(self.file.name)
else:
alt = None
return self.orig_filename or alt
@property
def display_name(self):
"""Return a display name for the file."""
if self.caption:
return self.caption
else:
return self.filename
@property
def icon_url(self):
"""Return the icon URL for this file."""
if not self.mimetype_handler:
return None
try:
return self.mimetype_handler.get_icon_url()
except Exception as e:
logging.error('Error when calling get_thumbnail for '
'MimetypeHandler %r: %s',
self.mimetype_handler, e, exc_info=1)
return None
@property
def is_from_diff(self):
"""Return if this file attachment is associated with a diff."""
return (self.repository_id is not None or
self.added_in_filediff_id is not None)
@property
def num_revisions(self):
"""Return the number of revisions of this attachment."""
return FileAttachment.objects.filter(
attachment_history=self.attachment_history_id).count() - 1
def __str__(self):
"""Return a string representation of this file for the admin list."""
return self.caption
def get_review_request(self):
"""Return the ReviewRequest that this file is attached to."""
if hasattr(self, '_review_request'):
return self._review_request
try:
return self.review_request.all()[0]
except IndexError:
try:
return self.inactive_review_request.all()[0]
except IndexError:
# Maybe it's on a draft.
try:
draft = self.drafts.get()
except ObjectDoesNotExist:
draft = self.inactive_drafts.get()
return draft.review_request
def get_comments(self):
"""Return all the comments made on this file attachment."""
if not hasattr(self, '_comments'):
self._comments = list(self.comments.all())
return self._comments
def get_absolute_url(self):
"""Return the absolute URL to download this file."""
if not self.file:
return None
url = self.file.url
if url.startswith('http:') or url.startswith('https:'):
return url
return build_server_url(url)
def is_accessible_by(self, user):
"""Returns whether or not the user has access to this FileAttachment.
This checks that the user has access to the LocalSite if the attachment
is associated with a local site. This is only applicable for user owned
file attachments.
"""
return (self.user and user.is_authenticated() and
(user.is_superuser or self.user == user) and
(not self.local_site or
self.local_site.is_accessible_by(user)))
def is_mutable_by(self, user):
"""Returns whether or not a user can modify this FileAttachment.
This checks that the user is either a superuser or the owner of the
file attachment. This is only applicable for user owned file
attachments.
"""
return self.user and (user.is_superuser or self.user == user)
class Meta:
db_table = 'attachments_fileattachment'
get_latest_by = 'attachment_revision'
verbose_name = _('File Attachment')
verbose_name_plural = _('File Attachments')
def get_latest_file_attachments(file_attachments):
"""Filter the list of file attachments to only return the latest revisions.
Args:
file_attachments (list of
reviewboard.attachments.models.FileAttachment):
The file attachments to filter.
Returns:
list of reviewboard.attachments.models.FileAttachment:
The list of file attachments that are the latest revisions in their
respective histories.
"""
file_attachment_histories = FileAttachmentHistory.objects.filter(
file_attachments__in=file_attachments)
latest = {
data['id']: data['latest_revision']
for data in file_attachment_histories.values('id', 'latest_revision')
}
return [
f
for f in file_attachments
if (not f.is_from_diff and
f.attachment_revision == latest[f.attachment_history_id])
]
|
{
"content_hash": "30723f8bbe0c3b56af03ee04533fe02b",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 79,
"avg_line_length": 36.66134185303515,
"alnum_prop": 0.5860566448801743,
"repo_name": "sgallagher/reviewboard",
"id": "3670b64f4ce26673c6dc5d1406fab130b7c3aece",
"size": "11475",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "reviewboard/attachments/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "225650"
},
{
"name": "HTML",
"bytes": "185770"
},
{
"name": "JavaScript",
"bytes": "2121168"
},
{
"name": "Python",
"bytes": "4153859"
},
{
"name": "Shell",
"bytes": "20225"
}
],
"symlink_target": ""
}
|
"""Implements iptables rules using linux utilities."""
import inspect
import os
from neutron.agent.linux import utils as linux_utils
from neutron.common import utils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
def get_binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(inspect.stack()[-1][1])[:16]
binary_name = get_binary_name()
# A length of a chain name must be less than or equal to 11 characters.
# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
MAX_CHAIN_LEN_WRAP = 11
MAX_CHAIN_LEN_NOWRAP = 28
def get_chain_name(chain_name, wrap=True):
if wrap:
return chain_name[:MAX_CHAIN_LEN_WRAP]
else:
return chain_name[:MAX_CHAIN_LEN_NOWRAP]
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False,
binary_name=binary_name):
self.chain = get_chain_name(chain, wrap)
self.rule = rule
self.wrap = wrap
self.top = top
self.wrap_name = binary_name[:16]
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (self.wrap_name, self.chain)
else:
chain = self.chain
return '-A %s %s' % (chain, self.rule)
class IptablesTable(object):
"""An iptables table."""
def __init__(self, binary_name=binary_name):
self.rules = []
self.remove_rules = []
self.chains = set()
self.unwrapped_chains = set()
self.remove_chains = set()
self.wrap_name = binary_name[:16]
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if nova-compute creates a chain named 'OUTPUT', it'll actually
end up named 'nova-compute-OUTPUT'.
"""
name = get_chain_name(name, wrap)
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def _select_chain_set(self, wrap):
if wrap:
return self.chains
else:
return self.unwrapped_chains
def ensure_remove_chain(self, name, wrap=True):
"""Ensure the chain is removed.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
return
self.remove_chain(name, wrap)
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
LOG.warn(_('Attempted to remove chain %s which does not exist'),
name)
return
chain_set.remove(name)
if not wrap:
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
self.remove_chains.add(name)
# first, add rules to remove that have a matching chain name
self.remove_rules += [r for r in self.rules if r.chain == name]
# next, remove rules from list that have a matching chain name
self.rules = [r for r in self.rules if r.chain != name]
if not wrap:
jump_snippet = '-j %s' % name
# next, add rules to remove that have a matching jump chain
self.remove_rules += [r for r in self.rules
if jump_snippet in r.rule]
else:
jump_snippet = '-j %s-%s' % (self.wrap_name, name)
# finally, remove rules from list that have a matching jump chain
self.rules = [r for r in self.rules
if jump_snippet not in r.rule]
def add_rule(self, chain, rule, wrap=True, top=False):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
chain = get_chain_name(chain, wrap)
if wrap and chain not in self.chains:
raise LookupError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))
self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name))
def _wrap_target_chain(self, s):
if s.startswith('$'):
return ('%s-%s' % (self.wrap_name, s[1:]))
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
chain = get_chain_name(chain, wrap)
try:
self.rules.remove(IptablesRule(chain, rule, wrap, top,
self.wrap_name))
if not wrap:
self.remove_rules.append(IptablesRule(chain, rule, wrap, top,
self.wrap_name))
except ValueError:
LOG.warn(_('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chain = get_chain_name(chain, wrap)
chained_rules = [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
for rule in chained_rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT. Its
name is not wrapped, so it's shared between the various nova workers. It's
intended for rules that need to live at the top of the FORWARD and OUTPUT
chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from neutron-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, _execute=None, state_less=False,
root_helper=None, use_ipv6=False, namespace=None,
binary_name=binary_name):
if _execute:
self.execute = _execute
else:
self.execute = linux_utils.execute
self.use_ipv6 = use_ipv6
self.root_helper = root_helper
self.namespace = namespace
self.iptables_apply_deferred = False
self.wrap_name = binary_name[:16]
self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)}
self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)}
# Add a neutron-filter-top chain. It's intended to be shared
# among the various nova components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('neutron-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('neutron-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
if not state_less:
self.ipv4.update(
{'nat': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'nat': ['PREROUTING',
'OUTPUT', 'POSTROUTING']})
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in builtin_chains[ip_version].iteritems():
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' %
(chain), wrap=False)
if not state_less:
# Add a neutron-postrouting-bottom chain. It's intended to be
# shared among the various nova components. We set it as the last
# chain of POSTROUTING chain.
self.ipv4['nat'].add_chain('neutron-postrouting-bottom',
wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING',
'-j neutron-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared neutron-postrouting-bottom
# chain so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('neutron-postrouting-bottom',
'-j $snat', wrap=False)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
def defer_apply_on(self):
self.iptables_apply_deferred = True
def defer_apply_off(self):
self.iptables_apply_deferred = False
self._apply()
def apply(self):
if self.iptables_apply_deferred:
return
self._apply()
@utils.synchronized('iptables', external=True)
def _apply(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if self.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
args = ['%s-save' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
all_tables = self.execute(args, root_helper=self.root_helper)
all_lines = all_tables.split('\n')
for table_name, table in tables.iteritems():
start, end = self._find_table(all_lines, table_name)
all_lines[start:end] = self._modify_rules(
all_lines[start:end], table, table_name)
args = ['%s-restore' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
self.execute(args, process_input='\n'.join(all_lines),
root_helper=self.root_helper)
LOG.debug(_("IPTablesManager.apply completed with success"))
def _find_table(self, lines, table_name):
if len(lines) < 3:
# length only <2 when fake iptables
return (0, 0)
try:
start = lines.index('*%s' % table_name) - 1
except ValueError:
# Couldn't find table_name
LOG.debug(_('Unable to find table %s'), table_name)
return (0, 0)
end = lines[start:].index('COMMIT') + start + 2
return (start, end)
def _find_rules_index(self, lines):
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(lines):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
if not seen_chains:
rules_index = 2
return rules_index
def _modify_rules(self, current_lines, table, table_name):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
remove_chains = table.remove_chains
rules = table.rules
remove_rules = table.remove_rules
if not current_lines:
fake_table = ['# Generated by iptables_manager',
'*' + table_name, 'COMMIT',
'# Completed by iptables_manager']
current_lines = fake_table
# Fill old_filter with any chains or rules we might have added,
# they could have a [packet:byte] count we want to preserve.
# Fill new_filter with any chains or rules without our name in them.
old_filter, new_filter = [], []
for line in current_lines:
(old_filter if self.wrap_name in line else
new_filter).append(line.strip())
rules_index = self._find_rules_index(new_filter)
all_chains = [':%s' % name for name in unwrapped_chains]
all_chains += [':%s-%s' % (self.wrap_name, name) for name in chains]
# Iterate through all the chains, trying to find an existing
# match.
our_chains = []
for chain in all_chains:
chain_str = str(chain).strip()
orig_filter = [s for s in old_filter if chain_str in s.strip()]
dup_filter = [s for s in new_filter if chain_str in s.strip()]
new_filter = [s for s in new_filter if chain_str not in s.strip()]
# if no old or duplicates, use original chain
if orig_filter:
# grab the last entry, if there is one
old = orig_filter[-1]
chain_str = str(old).strip()
elif dup_filter:
# grab the last entry, if there is one
dup = dup_filter[-1]
chain_str = str(dup).strip()
else:
# add-on the [packet:bytes]
chain_str += ' - [0:0]'
our_chains += [chain_str]
# Iterate through all the rules, trying to find an existing
# match.
our_rules = []
bot_rules = []
for rule in rules:
rule_str = str(rule).strip()
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
orig_filter = [s for s in old_filter if rule_str in s.strip()]
dup_filter = [s for s in new_filter if rule_str in s.strip()]
new_filter = [s for s in new_filter if rule_str not in s.strip()]
# if no old or duplicates, use original rule
if orig_filter:
# grab the last entry, if there is one
old = orig_filter[-1]
rule_str = str(old).strip()
elif dup_filter:
# grab the last entry, if there is one
dup = dup_filter[-1]
rule_str = str(dup).strip()
# backup one index so we write the array correctly
rules_index -= 1
else:
# add-on the [packet:bytes]
rule_str = '[0:0] ' + rule_str
if rule.top:
# rule.top == True means we want this rule to be at the top.
our_rules += [rule_str]
else:
bot_rules += [rule_str]
our_rules += bot_rules
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = our_chains
def _strip_packets_bytes(line):
# strip any [packet:byte] counts at start or end of lines
if line.startswith(':'):
# it's a chain, for example, ":neutron-billing - [0:0]"
line = line.split(':')[1]
line = line.split(' - [', 1)[0]
elif line.startswith('['):
# it's a rule, for example, "[0:0] -A neutron-billing..."
line = line.split('] ', 1)[1]
line = line.strip()
return line
seen_chains = set()
def _weed_out_duplicate_chains(line):
# ignore [packet:byte] counts at end of lines
if line.startswith(':'):
line = _strip_packets_bytes(line)
if line in seen_chains:
return False
else:
seen_chains.add(line)
# Leave it alone
return True
seen_rules = set()
def _weed_out_duplicate_rules(line):
if line.startswith('['):
line = _strip_packets_bytes(line)
if line in seen_rules:
return False
else:
seen_rules.add(line)
# Leave it alone
return True
def _weed_out_removes(line):
# We need to find exact matches here
if line.startswith(':'):
line = _strip_packets_bytes(line)
for chain in remove_chains:
if chain == line:
remove_chains.remove(chain)
return False
elif line.startswith('['):
line = _strip_packets_bytes(line)
for rule in remove_rules:
rule_str = _strip_packets_bytes(str(rule))
if rule_str == line:
remove_rules.remove(rule)
return False
# Leave it alone
return True
# We filter duplicates. Go throught the chains and rules, letting
# the *last* occurrence take precendence since it could have a
# non-zero [packet:byte] count we want to preserve. We also filter
# out anything in the "remove" list.
new_filter.reverse()
new_filter = [line for line in new_filter
if _weed_out_duplicate_chains(line) and
_weed_out_duplicate_rules(line) and
_weed_out_removes(line)]
new_filter.reverse()
# flush lists, just in case we didn't find something
remove_chains.clear()
for rule in remove_rules:
remove_rules.remove(rule)
return new_filter
def _get_traffic_counters_cmd_tables(self, chain, wrap=True):
name = get_chain_name(chain, wrap)
cmd_tables = [('iptables', key) for key, table in self.ipv4.items()
if name in table._select_chain_set(wrap)]
cmd_tables += [('ip6tables', key) for key, table in self.ipv6.items()
if name in table._select_chain_set(wrap)]
return cmd_tables
def get_traffic_counters(self, chain, wrap=True, zero=False):
"""Return the sum of the traffic counters of all rules of a chain."""
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
if not cmd_tables:
LOG.warn(_('Attempted to get traffic counters of chain %s which '
'does not exist'), chain)
return
name = get_chain_name(chain, wrap)
acc = {'pkts': 0, 'bytes': 0}
for cmd, table in cmd_tables:
args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x']
if zero:
args.append('-Z')
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
current_table = (self.execute(args,
root_helper=self.root_helper))
current_lines = current_table.split('\n')
for line in current_lines[2:]:
if not line:
break
data = line.split()
if (len(data) < 2 or
not data[0].isdigit() or
not data[1].isdigit()):
break
acc['pkts'] += int(data[0])
acc['bytes'] += int(data[1])
return acc
|
{
"content_hash": "b8b4d234b27802dbd49cea9769af9b1e",
"timestamp": "",
"source": "github",
"line_count": 594,
"max_line_length": 79,
"avg_line_length": 36.93434343434343,
"alnum_prop": 0.5387209991339623,
"repo_name": "citrix-openstack-build/neutron",
"id": "acb32b821914cbaad6daef8aca3cd59ff49cffad",
"size": "22734",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/agent/linux/iptables_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "6817315"
},
{
"name": "Shell",
"bytes": "8983"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
""" Spark Fun Camera
Facilitates communication with the Sparkfun Camera
For more details see:
http://wiki.cospandesign.com/index.php?title=Wb_sf_camera
"""
__author__ = 'Cospan Design: info@cospandesign.com'
import sys
import os
import time
import i2c
from array import array as Array
from nysa.host.nysa import Nysa
from nysa.host.nysa import NysaCommError
import driver
from driver import NysaDMAException
from driver import DMAReadController
SPARKFUN_640_480_CAMERA_MODULE = 1
#Register Constants
CONTROL = 0
STATUS = 1
PIXEL_COUNT = 2
ROW_COUNT = 3
MEM_0_BASE = 4
MEM_0_SIZE = 5
MEM_1_BASE = 6
MEM_1_SIZE = 7
#Control bit values
CONTROL_ENABLE = 0
CONTROL_INTERRUPT_ENABLE = 1
CONTROL_AUTO_FLASH_EN = 2
CONTROL_MANUAL_FLASH_ON = 3
CONTROL_CAMERA_RESET = 4
CONTROL_RESET_COUNTS = 5
CONTROL_IMAGE_DEBUG = 6
#Status bit values
STATUS_MEMORY_0_FINISHED = 0
STATUS_MEMORY_1_FINISHED = 1
STATUS_IMAGE_CAPTURED = 2
STATUS_BUSY = 3
STATUS_LOCKED = 4
STATUS_ENABLE = 5
STATUS_MEMORY_0_EMPTY = 6
STATUS_MEMORY_1_EMPTY = 7
#Camera I2C Address
CAMERA_I2C = 0x3C
#CAMERA_I2C = 0x7A
#I2C Addresses
I2C_CLK = 0x03
I2C_CONFIG_UTILS= 0x02
I2C_IMG_CONFIG = 0x03
I2C_IMG_COUNTS = 0x1A
class SFCameraError(Exception):
pass
class SFCamera(driver.Driver):
@staticmethod
def get_abi_class():
return 0
@staticmethod
def get_abi_major():
return driver.get_device_id_from_name("camera")
@staticmethod
def get_abi_minor():
return SPARKFUN_640_480_CAMERA_MODULE
def __init__(self, nysa, urn, debug = False):
super(SFCamera, self).__init__(nysa, urn, debug)
#print "Camera URN : %s" % urn
i2c_urn = self.get_integration_references(self.urn)[0]
#print "i2c: URN: %s" % i2c_urn
self.i2c = i2c.I2C(nysa, i2c_urn, debug = debug)
self.i2c.enable_i2c(True)
self.i2c.enable_interrupt(True)
self.i2c.get_status()
self.i2c.set_speed_to_100khz()
self.reset_camera()
self.set_rgb_mode()
self.reset_counts()
time.sleep(.4)
row_count = self.read_row_count()
pixel_count = self.read_pixel_count()
size = row_count * pixel_count
self.width = pixel_count / 2
self.height = row_count
self.status = 0
try:
self.dma_reader = DMAReadController(device = self,
mem_base0 = 0x00000000,
mem_base1 = 0x00100000,
size = size / 4,
reg_status = STATUS,
reg_base0 = MEM_0_BASE,
reg_size0 = MEM_0_SIZE,
reg_base1 = MEM_1_BASE,
reg_size1 = MEM_1_SIZE,
timeout = 3,
finished0 = STATUS_MEMORY_0_FINISHED,
finished1 = STATUS_MEMORY_1_FINISHED,
empty0 = STATUS_MEMORY_0_EMPTY,
empty1 = STATUS_MEMORY_1_EMPTY)
except NysaDMAException as ex:
raise SFCameraError("Error initializing the DMA Reader: %s" % str(ex))
def __del__(self):
self.i2c.enable_i2c(False)
def get_width(self):
return self.width
def get_height(self):
return self.height
def stop_async_reader(self):
self.dma_reader.disable_asynchronous_read()
def start_async_reader(self, callback):
self.dma_reader.enable_asynchronous_read(callback)
def get_control(self):
"""get_control
Read the control register
Args:
Nothing
Return:
32-bit control register value
Raises:
NysaCommError: Error in communication
"""
return self.read_register(CONTROL)
def set_control(self, control):
"""set_control
Write the control register
Args:
control: 32-bit control value
Returns:
Nothing
Raises:
NysaCommError: Error in communication
"""
self.write_register(CONTROL, control)
def get_status(self):
"""get_status
read the status register
Args:
Nothing
Return:
32-bit status register value
Raises:
NysaCommError: Error in communication
"""
return self.read_register(STATUS)
def enable_camera(self, enable):
"""
Enable the camera core
Args:
enable(boolean): True/False to enable the camera and core
Returns:
Nothing
Raises:
NysaCommError
"""
self.enable_register_bit(CONTROL, CONTROL_ENABLE, enable)
self.enable_register_bit(CONTROL, CONTROL_INTERRUPT_ENABLE, enable)
def read_row_count(self):
"""
Read the number of rows from the camera
Args:
Nothing
Returns (int)
Number of rows in the image
Raises:
NysaCommError
"""
return self.read_register(ROW_COUNT)
def read_pixel_count(self):
"""
Reads the number of pixels in a row
Args:
Nothing
Returns (32-bit Integer):
Number of pixels in a row
Raises:
NysaCommError
"""
return self.read_register(PIXEL_COUNT)
def reset_counts(self):
"""
Resets the row and pixel counts so the core will re-read the values
form an image
Args:
Nothing
Returns:
Nothing
Raises:
NysaCommError
"""
self.set_register_bit(CONTROL, 5)
self.clear_register_bit(CONTROL, 5)
def auto_led_mode(self, enable):
"""
Allow core to enable and disable the 'Flash' LED
Args:
enable(boolean):
True: Allow core to control LED
False: Do not allow core to control LED
Returns:
Nothing
Raises:
NysaCommError
"""
self.enable_register_bit(CONTROL, CONTROL_AUTO_FLASH_EN, enable)
def manual_flash_on(self, enable):
"""
When Auto mode is not enabled this will turn on the 'Flash'
Args:
enable(boolean):
True: Turn on th flash
False: Turn off the flash
Returns:
Nothing
Raises:
NysaCommError
"""
self.enable_register_bit(CONTROL, CONTROL_MANUAL_FLASH_ON)
def reset_camera(self):
"""
Resets the camera core (sends reset to the camera as well)
Args:
Nothing
Return:
Nothing
Raises:
NysaCommError
"""
self.i2c.reset_i2c_core()
self.clear_register_bit(CONTROL, CONTROL_CAMERA_RESET)
time.sleep(.2)
self.set_register_bit(CONTROL, CONTROL_CAMERA_RESET)
time.sleep(.5)
def enable_image_debug(self, enable):
"""
Enable a debug image to be sent to the host from core
(Exercise memroy controller)
Args:
enable (boolean):
True: Send the debug image to the host
False: Set the camera to normal mode
Returns:
Nothing
Raises:
NysaCommError
"""
self.enable_register_bit(CONTROL, CONTROL_IMAGE_DEBUG, enable)
def get_config_data(self):
config_dict = {}
img_config = self.i2c.read_from_i2c(CAMERA_I2C, Array('B', [0x02]), 2)
return img_config
def set_rgb_mode(self, mode = 0x00):
"""
Set the camera to output RGB data instead of YCBCR data
Args:
mode (32-bit unsigned integer) default = 0x00 (RGB)
Returns:
Nothing
Raises:
NysaCommError
"""
mode = mode << 2
mode |= 0x02
self.i2c.write_to_i2c(CAMERA_I2C, Array('B', [0x02, 0x40]))
self.i2c.write_to_i2c(CAMERA_I2C, Array('B', [I2C_IMG_CONFIG, mode]))
self.i2c.is_interrupt_enabled()
def set_start_pixel_count(self, start_pixel):
"""
Adjust the start of the pixel count within the camera
Args:
start_pixel (16-bit unsigned integer): Start pixel of the image
Returns:
Nothing
Raises:
NysaCommError
"""
sp = Array('B')
sp.append(start_pixel & 0xFF)
sp.append(((start_pixel) >> 8) & 0xFF)
self.i2c.write_to_i2c(CAMERA_I2C, Array('B', [0x1E, sp[0]]))
self.i2c.write_to_i2c(CAMERA_I2C, Array('B', [0x1F, sp[1]]))
def software_reset(self):
#self.i2c.write_to_i2c(CAMERA_I2C, Array('B', [0x02, 0x40]))
time.sleep(.5)
#self.i2c.write_to_i2c(CAMERA_I2C, Array('B', [0x02, 0x00]))
def set_debug_mode(self):
#print "Setting idle mode"
self.i2c.write_to_i2c(CAMERA_I2C, Array('B', [0x02, 0x08]))
def read_raw_image(self):
"""
Read an entire image from the memory. The size of the image is set
during initialization of the DMA Reader controller. The data is outputted
This data is not formatted
Args:
Nothing
Returns:
(Array of bytes): Size of the image (row * pixel count)
"""
return self.dma_reader.read(anticipate = True)
|
{
"content_hash": "ed11f565b162b59292d94d6ceabeaaf4",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 86,
"avg_line_length": 25.635204081632654,
"alnum_prop": 0.5301024977609713,
"repo_name": "CospanDesign/nysa",
"id": "e4e7d7144c5f592f1ce73996cc6febe63c7bc138",
"size": "11189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nysa/host/driver/sf_camera.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Coq",
"bytes": "6020"
},
{
"name": "Makefile",
"bytes": "1167"
},
{
"name": "Python",
"bytes": "1238364"
},
{
"name": "Shell",
"bytes": "186"
},
{
"name": "Verilog",
"bytes": "235254"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class GroupGetMemberGroupsParameters(Model):
"""Request parameters for GetMemberGroups API call.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param security_enabled_only: Required. If true, only membership in
security-enabled groups should be checked. Otherwise, membership in all
groups should be checked.
:type security_enabled_only: bool
"""
_validation = {
'security_enabled_only': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'security_enabled_only': {'key': 'securityEnabledOnly', 'type': 'bool'},
}
def __init__(self, *, security_enabled_only: bool, additional_properties=None, **kwargs) -> None:
super(GroupGetMemberGroupsParameters, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.security_enabled_only = security_enabled_only
|
{
"content_hash": "7c38b534a9611fffa0c7871b7c6e67b8",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 101,
"avg_line_length": 38.43333333333333,
"alnum_prop": 0.6903729401561145,
"repo_name": "Azure/azure-sdk-for-python",
"id": "2ff9062d6d94e77eb0bac68efb148025a9f0c618",
"size": "1627",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/graphrbac/azure-graphrbac/azure/graphrbac/models/group_get_member_groups_parameters_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import os
import sys
from distutils.util import strtobool
import tempfile
from subprocess import call
def editor(text):
EDITOR = os.environ.get('EDITOR','vi')
with tempfile.NamedTemporaryFile(suffix=".tmp", delete=False) as tmpf:
tmpf.write(text)
tmpf.flush()
call([EDITOR, tmpf.name])
return tmpf.name
def remove(filename):
os.remove(filename)
def yesno(question):
sys.stdout.write('%s [y/n]: ' % question)
while True:
try:
return strtobool(raw_input().lower())
except ValueError:
sys.stdout.write('Please respond with \'y\' or \'n\'.\n')
|
{
"content_hash": "cf37018a7dcd3c173ae98d3fd2f531be",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 74,
"avg_line_length": 26.458333333333332,
"alnum_prop": 0.6330708661417322,
"repo_name": "vvgelder/turret",
"id": "9c3a9998c9432db700c27c0fc8a1e2ef43991b4d",
"size": "635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/turret/console.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37463"
}
],
"symlink_target": ""
}
|
"""
:class:`.Baidu` is the Baidu Maps geocoder.
"""
from geopy.compat import urlencode
from geopy.geocoders.base import Geocoder, DEFAULT_TIMEOUT
from geopy.exc import (
GeocoderQueryError,
GeocoderQuotaExceeded,
GeocoderAuthenticationFailure,
)
from geopy.location import Location
from geopy.util import logger
__all__ = ("Baidu", )
class Baidu(Geocoder):
"""
Geocoder using the Baidu Maps v2 API. Documentation at:
http://developer.baidu.com/map/webservice-geocoding.htm
"""
def __init__(
self,
api_key,
scheme='http',
timeout=DEFAULT_TIMEOUT,
proxies=None,
user_agent=None
):
"""
Initialize a customized Baidu geocoder using the v2 API.
.. versionadded:: 1.0.0
:param string api_key: The API key required by Baidu Map to perform
geocoding requests. API keys are managed through the Baidu APIs
console (http://lbsyun.baidu.com/apiconsole/key).
:param string scheme: Use 'https' or 'http' as the API URL's scheme.
Default is http and only http support.
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
"""
super(Baidu, self).__init__(
scheme=scheme, timeout=timeout, proxies=proxies, user_agent=user_agent
)
self.api_key = api_key
self.scheme = scheme
self.doc = {}
self.api = 'http://api.map.baidu.com/geocoder/v2/'
@staticmethod
def _format_components_param(components):
"""
Format the components dict to something Baidu understands.
"""
return "|".join(
(":".join(item)
for item in components.items()
)
)
def geocode(
self,
query,
exactly_one=True,
timeout=None
):
"""
Geocode a location query.
:param string query: The address or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
"""
params = {
'ak': self.api_key,
'output': 'json',
'address': self.format_string % query,
}
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one=exactly_one
)
def reverse(self, query, timeout=None): # pylint: disable=W0221
"""
Given a point, find an address.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
"""
params = {
'ak': self.api_key,
'output': 'json',
'location': self._coerce_point_to_string(query),
}
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_reverse_json(
self._call_geocoder(url, timeout=timeout)
)
@staticmethod
def _parse_reverse_json(page):
"""
Parses a location from a single-result reverse API call.
"""
place = page.get('result')
location = place.get('formatted_address').encode('utf-8')
latitude = place['location']['lat']
longitude = place['location']['lng']
return Location(location, (latitude, longitude), place)
def _parse_json(self, page, exactly_one=True):
"""
Returns location, (latitude, longitude) from JSON feed.
"""
place = page.get('result', None)
if not place:
self._check_status(page.get('status'))
return None
def parse_place(place):
"""
Get the location, lat, lng from a single JSON place.
"""
location = place.get('level')
latitude = place['location']['lat']
longitude = place['location']['lng']
return Location(location, (latitude, longitude), place)
if exactly_one:
return parse_place(place)
else:
return [parse_place(item) for item in place]
@staticmethod
def _check_status(status):
"""
Validates error statuses.
"""
if status == '0':
# When there are no results, just return.
return
if status == '1':
raise GeocoderQueryError(
'Internal server error.'
)
elif status == '2':
raise GeocoderQueryError(
'Invalid request.'
)
elif status == '3':
raise GeocoderAuthenticationFailure(
'Authentication failure.'
)
elif status == '4':
raise GeocoderQuotaExceeded(
'Quota validate failure.'
)
elif status == '5':
raise GeocoderQueryError(
'AK Illegal or Not Exist.'
)
elif status == '101':
raise GeocoderQueryError(
'Your request was denied.'
)
elif status == '102':
raise GeocoderQueryError(
'IP/SN/SCODE/REFERER Illegal:'
)
elif status == '2xx':
raise GeocoderQueryError(
'Has No Privilleges.'
)
elif status == '3xx':
raise GeocoderQuotaExceeded(
'Quota Error.'
)
else:
raise GeocoderQueryError('Unknown error')
|
{
"content_hash": "f20bbb1d2ca28d3e3ad26f9304a195a7",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 82,
"avg_line_length": 30.772093023255813,
"alnum_prop": 0.5480652962515115,
"repo_name": "emc-tridax/appengine-python-flask-master",
"id": "0fd1cc6de0feb0ba356abb4a44428d39c6905cc0",
"size": "6616",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "lib/geopy/geocoders/baidu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6075"
},
{
"name": "JavaScript",
"bytes": "6345"
},
{
"name": "Python",
"bytes": "1605560"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.onyx import onyx_config
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxConfigModule(TestOnyxModule):
module = onyx_config
def setUp(self):
super(TestOnyxConfigModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.onyx.onyx_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.onyx.onyx_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_run_commands = patch('ansible.modules.network.onyx.onyx_config.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestOnyxConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_config_config.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_onyx_config_unchanged(self):
src = load_fixture('onyx_config_config.cfg')
set_module_args(dict(src=src))
self.execute_module()
def test_onyx_config_src(self):
src = load_fixture('onyx_config_src.cfg')
set_module_args(dict(src=src))
commands = [
'interface mlag-port-channel 2']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_onyx_config_save(self):
set_module_args(dict(lines=['hostname foo'], save='yes'))
self.execute_module(changed=True)
self.assertEqual(self.run_commands.call_count, 0)
self.assertEqual(self.get_config.call_count, 1)
self.assertEqual(self.load_config.call_count, 1)
args = self.load_config.call_args[0][1]
self.assertIn('configuration write', args)
def test_onyx_config_lines_wo_parents(self):
set_module_args(dict(lines=['hostname foo']))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_before(self):
set_module_args(dict(lines=['hostname foo'], before=['test1', 'test2']))
commands = ['test1', 'test2', 'hostname foo']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_after(self):
set_module_args(dict(lines=['hostname foo'], after=['test1', 'test2']))
commands = ['hostname foo', 'test1', 'test2']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_before_after(self):
set_module_args(dict(lines=['hostname foo'],
before=['test1', 'test2'],
after=['test3', 'test4']))
commands = ['test1', 'test2', 'hostname foo', 'test3', 'test4']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_config(self):
config = 'hostname localhost'
set_module_args(dict(lines=['hostname router'], config=config))
commands = ['hostname router']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_match_none(self):
lines = ['hostname router']
set_module_args(dict(lines=lines, match='none'))
self.execute_module(changed=True, commands=lines, is_updates=True)
|
{
"content_hash": "7faad70c6620110ecb28e67151e37c48",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 95,
"avg_line_length": 41.52127659574468,
"alnum_prop": 0.6528311555213938,
"repo_name": "thaim/ansible",
"id": "f645034e5cf9c6596057dafe5addc9f4f40f1f29",
"size": "4615",
"binary": false,
"copies": "50",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/modules/network/onyx/test_onyx_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2015-2020 Raj Patel(raj454raj@gmail.com), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
utable = db.unsubscriber
unsubscribers = db(utable).select()
for user in unsubscribers:
new_value = user.friend_requests or \
user.acceptance_rejectance or \
user.unfriend
user.update_record(friend_unfriend=new_value)
|
{
"content_hash": "d5b0bdb56aab5cfd2bc112c4ca73cbb7",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 81,
"avg_line_length": 47.43333333333333,
"alnum_prop": 0.7385804638088546,
"repo_name": "stopstalk/stopstalk-deployment",
"id": "bb7dcef2fb64d4a2b993f3164903fb5cf0da2888",
"size": "1423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "private/scripts/extras/unsubscribe-shift.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "40171"
},
{
"name": "CSS",
"bytes": "83271"
},
{
"name": "Cython",
"bytes": "123663"
},
{
"name": "HTML",
"bytes": "190175"
},
{
"name": "JavaScript",
"bytes": "681456"
},
{
"name": "Less",
"bytes": "78481"
},
{
"name": "Makefile",
"bytes": "98"
},
{
"name": "Python",
"bytes": "7648306"
},
{
"name": "SCSS",
"bytes": "79489"
},
{
"name": "Shell",
"bytes": "6187"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
import logging
import threading
import time
from pykit import utfjson
from kazoo.client import KazooClient
from kazoo.exceptions import LockTimeout
from kazoo.exceptions import NodeExistsError
from kazoo.exceptions import NoNodeError
from .exceptions import ZKUtilError
from . import zkutil
from .zkconf import ZKConf
from .zkconf import KazooClientExt
logger = logging.getLogger(__name__)
class ZKTXError(Exception):
pass
class ZKTXConnectionLost(ZKTXError):
pass
# TODO add method: ZKLock.get_owner() to get current lock owner
class ZKLock(object):
def __init__(self, lock_name,
zkconf=None,
zkclient=None,
on_lost=None,
identifier=None,
ephemeral=True,
timeout=10):
if zkconf is None:
zkconf = ZKConf()
if isinstance(zkconf, dict):
zkconf = ZKConf(**zkconf)
self.zkconf = zkconf
if zkclient is None:
# If user does not pass a zkclient instance,
# we need to create one for lock to use.
# This zkclient will be closed after lock is released
if on_lost is None:
raise ValueError('on_lost must be specified to watch zk connection issue if no zkclient specified')
zkclient = make_owning_zkclient(zkconf.hosts(), zkconf.auth())
self.owning_client = True
else:
self.owning_client = False
if identifier is None:
identifier = zkutil.lock_id(zkconf.node_id())
if not isinstance(identifier, dict):
identifier = make_identifier(identifier, None)
assert sorted(['id', 'val']) == sorted(list(identifier.keys()))
# a copy of hosts for debugging and tracking
self._hosts = ','.join(['{0}:{1}'.format(*x)
for x in zkclient.hosts])
self.zkclient = zkclient
if isinstance(self.zkclient, KazooClientExt):
self.zkclient = self.zkclient._zk
self.on_lost = on_lost
self.lock_name = lock_name
self.lock_path = zkconf.lock(self.lock_name)
self.identifier = identifier
self.ephemeral = ephemeral
self.timeout = timeout
self.mutex = threading.RLock()
self.maybe_available = threading.Event()
self.maybe_available.set()
self.lock_holder = None
logger.info('adding event listener: {s}'.format(s=self))
self.zkclient.add_listener(self.on_connection_change)
def on_node_change(self, watchevent):
# Must be locked first.
# Or there is a chance on_node_change is triggered before
# self.maybe_available.clear()
with self.mutex:
self.maybe_available.set()
# If locked. the node change is treated as losing a lock
if self.is_locked():
if self.on_lost is not None:
self.on_lost()
logger.info('node state changed:{ev}, lock might be released: {s}'.format(ev=watchevent, s=str(self)))
def on_connection_change(self, state):
# notify zklock to re-do acquiring procedure, to trigger Connection Error
with self.mutex:
self.maybe_available.set()
if self.on_lost is not None:
self.on_lost()
def acquire_loop(self, timeout=None):
if timeout is None:
timeout = self.timeout
expire_at = time.time() + timeout
while True:
# Even if timeout is smaller than 0, try-loop continue on until
# maybe_available is not ready.
#
# There is a chance that:
# - Failed to create lock node(lock is occupied by other)
# - Failed to get lock node(just deleted)
# - Failed to create lock node(lock is occupied by other)
# - Failed to get lock node(just deleted)
# - ...
if not self.maybe_available.wait(timeout=expire_at - time.time()):
logger.debug('lock is still held by others: ' + str(self))
if time.time() > expire_at:
raise LockTimeout('lock: ' + str(self.lock_path))
# Always proceed the "get" phase, in order to add a watch handler.
# To watch node change event.
self._create()
self._acquire_by_get()
if self.is_locked():
return
# If it is possible to acquire the lock in next retry, do not yield
if self.maybe_available.is_set():
continue
else:
yield self.lock_holder[0], self.lock_holder[1]
def acquire(self, timeout=None):
for holder, ver in self.acquire_loop(timeout=timeout):
continue
def try_acquire(self):
try:
self.acquire(timeout=-1)
except LockTimeout:
pass
# if_locked, lock holder identifier, holder version
return self.is_locked(), self.lock_holder[0], self.lock_holder[1]
def try_release(self):
logger.debug('try to release if I am locker holder: {s}'.format(s=str(self)))
try:
holder, zstat = self.zkclient.get(self.lock_path)
holder = utfjson.load(holder)
self.lock_holder = (holder, zstat.version)
logger.debug('got lock holder: {s}'.format(s=str(self)))
if self.cmp_identifier(holder, self.identifier):
self.zkclient.remove_listener(self.on_connection_change)
try:
self.zkclient.delete(self.lock_path, version=zstat.version)
except NoNodeError as e:
logger.info(repr(e) + ' while delete lock: ' + str(self))
self.lock_holder = None
return True, holder, zstat.version
else:
return False, holder, zstat.version
except NoNodeError as e:
logger.info(repr(e) + ' while try_release: {s}'.format(s=str(self)))
return True, self.identifier, -1
def release(self):
with self.mutex:
if self.is_locked():
# remove listener to avoid useless event triggering
self.zkclient.remove_listener(self.on_connection_change)
try:
self.zkclient.delete(self.lock_path)
except NoNodeError as e:
logger.info(repr(e) + ' while delete lock: ' + str(self))
self.lock_holder = None
logger.info('RELEASED: {s}'.format(s=str(self)))
else:
logger.info('not acquired, do not need to release')
self.close()
def close(self):
self.zkclient.remove_listener(self.on_connection_change)
if self.owning_client:
logger.info('zk client is made by me, close it')
zkutil.close_zk(self.zkclient)
def is_locked(self):
l = self.lock_holder
if l is None:
return False
return self.cmp_identifier(l[0], self.identifier)
def _create(self):
logger.debug('to creaet: {s}'.format(s=str(self)))
try:
self.zkclient.create(self.lock_path,
utfjson.dump(self.identifier),
ephemeral=self.ephemeral,
acl=self.zkconf.kazoo_digest_acl())
except NodeExistsError as e:
# NOTE Success create on server side might also results in failure
# on client side due to network issue.
# 'get' after 'create' to check if existent node belongs to this
# client.
logger.debug(repr(e) + ' while create lock: {s}'.format(s=str(self)))
self.lock_holder = None
return
logger.info('CREATE OK: {s}'.format(s=str(self)))
def set_lock_val(self, val, version=-1):
locked, holder, ver = self.try_acquire()
if not locked:
raise ZKUtilError("set non-locked: {k}".format(k=self.lock_name))
self.identifier['val'] = val
value = utfjson.dump(self.identifier)
st = self.zkclient.set(self.lock_path, value, version=version)
return st.version
def get_lock_val(self):
holder, zstat = self.zkclient.get(self.lock_path)
holder = utfjson.load(holder)
return holder['val'], zstat.version
def cmp_identifier(self, ia, ib):
return ia['id'] == ib['id']
def _acquire_by_get(self):
logger.debug('to get: {s}'.format(s=str(self)))
try:
with self.mutex:
holder, zstat = self.zkclient.get(self.lock_path, watch=self.on_node_change)
holder = utfjson.load(holder)
self.lock_holder = (holder, zstat.version)
logger.debug('got lock holder: {s}'.format(s=str(self)))
if self.cmp_identifier(holder, self.identifier):
logger.info('ACQUIRED: {s}'.format(s=str(self)))
return
logger.debug('other holds: {s}'.format(s=str(self)))
self.maybe_available.clear()
except NoNodeError as e:
# create failed but when getting it, it has been deleted
logger.info(repr(e) + ' while get lock: {s}'.format(s=str(self)))
with self.mutex:
self.lock_holder = None
self.maybe_available.set()
def __str__(self):
return '<id={id} {l}:[{holder}] on {h}>'.format(
id=self.identifier['id'],
l=self.lock_path,
holder=(self.lock_holder or ''),
h=str(self._hosts),
)
def __enter__(self):
self.acquire()
def __exit__(self, tp, value, tb):
self.release()
def make_owning_zkclient(hosts, auth):
zkclient = KazooClient(hosts=hosts)
zkclient.start()
if auth is not None:
scheme, name, passw = auth
zkclient.add_auth(scheme, name + ':' + passw)
return zkclient
def make_identifier(_id, val):
return {'id': _id, 'val': val}
|
{
"content_hash": "b73e914a095e902256ceac51d5eb7c79",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 115,
"avg_line_length": 30.335311572700295,
"alnum_prop": 0.5620659297662134,
"repo_name": "baishancloud/pykit",
"id": "e8921f5a4a940b848a24f2d1b4613ef13aaf90be",
"size": "10262",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zkutil/zklock.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "666985"
},
{
"name": "Shell",
"bytes": "32314"
}
],
"symlink_target": ""
}
|
"""
Example that shows how to list available chromecasts.
"""
import argparse
import logging
import time
import pychromecast
import zeroconf
parser = argparse.ArgumentParser(description="Example on how to receive updates on discovered chromecasts.")
parser.add_argument("--show-debug", help="Enable debug log", action="store_true")
parser.add_argument("--show-zeroconf-debug", help="Enable zeroconf debug log", action="store_true")
args = parser.parse_args()
if args.show_debug:
logging.basicConfig(level=logging.DEBUG)
if args.show_zeroconf_debug:
print("Zeroconf version: " + zeroconf.__version__)
logging.getLogger("zeroconf").setLevel(logging.DEBUG)
devices, browser = pychromecast.discovery.discover_chromecasts()
# Shut down discovery
pychromecast.stop_discovery(browser)
print(f"Discovered {len(devices)} device(s):")
for device in devices:
print(f" {device}")
|
{
"content_hash": "e38b6ed0c3347734a1328c11723753a6",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 108,
"avg_line_length": 30.862068965517242,
"alnum_prop": 0.7541899441340782,
"repo_name": "balloob/pychromecast",
"id": "795f0e9c2066313396c352016fc7fe23e0cc2039",
"size": "895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/discovery_example2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "136235"
},
{
"name": "Shell",
"bytes": "137"
}
],
"symlink_target": ""
}
|
""" Reinforcement - Distributed Actor
- Class responsible for playing the 'actor' role in the cluster by generating games
"""
import logging
from tornado import gen
from diplomacy_research.models.training.reinforcement.common import create_advantage
from diplomacy_research.models.training.reinforcement.generation import start_training_process
from diplomacy_research.models.training.reinforcement.memory_buffer import build_memory_buffer
from diplomacy_research.models.training.reinforcement.serving import start_tf_serving_server, check_opening_orders, \
update_serving, wait_for_version, get_training_config
# Constants
LOGGER = logging.getLogger(__name__)
@gen.coroutine
def start_distributed_actor(trainer):
""" Starts an actor
:param trainer: A reinforcement learning trainer instance.
:type trainer: diplomacy_research.models.training.reinforcement.trainer.ReinforcementTrainer
"""
# Creating advantage function
create_advantage(trainer)
# Builds the memory buffer and wait for the cluster to be ready
build_memory_buffer(trainer)
trainer.memory_buffer.wait_until_ready()
# Creating a serving - Training (endpoint only)
start_tf_serving_server(trainer,
force_cpu=True,
serving_id=0,
config=None,
endpoint_only=True)
# Wait for model to be loaded
update_serving(trainer, serving_id=0, config=get_training_config(trainer))
wait_for_version(trainer, serving_id=0, model_name='player')
# Querying the model to make sure to check if it has been trained
check_opening_orders(trainer, serving_id=0)
# Launchs the training process to generate training games forever
start_training_process(trainer, block=True)
|
{
"content_hash": "6ca447655c6551682ee94b20fa757d3e",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 117,
"avg_line_length": 41.31818181818182,
"alnum_prop": 0.7183718371837183,
"repo_name": "diplomacy/research",
"id": "7c4570afbb884b0920d1eff5182a913a267a217f",
"size": "2618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diplomacy_research/models/training/reinforcement/distributed_actor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "592"
},
{
"name": "C++",
"bytes": "5188"
},
{
"name": "Dockerfile",
"bytes": "31749"
},
{
"name": "Groovy",
"bytes": "15568"
},
{
"name": "Python",
"bytes": "2557493"
},
{
"name": "Shell",
"bytes": "26305"
}
],
"symlink_target": ""
}
|
"""
The base for using sqlalchemy as a store with TiddlyWeb.
"""
from __future__ import absolute_import
import logging
from pyparsing import ParseException
from base64 import b64encode, b64decode
from sqlalchemy import event
from sqlalchemy.engine import create_engine, Engine
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql.expression import and_
from tiddlyweb.filters import FilterIndexRefused
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.policy import Policy
from tiddlyweb.model.recipe import Recipe
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.model.user import User
from tiddlyweb.store import (NoBagError, NoRecipeError, NoTiddlerError,
NoUserError, StoreError)
from tiddlyweb.stores import StorageInterface
from tiddlyweb.util import binary_tiddler
from .model import (Base, Session, sBag, sPolicy, sRecipe, sTiddler, sRevision,
sText, sTag, sField, sCurrentRevision, sFirstRevision, sUser, sRole)
from .parser import DEFAULT_PARSER
from .producer import Producer
__version__ = '3.1.1'
#logging.basicConfig()
#logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
#logging.getLogger('sqlalchemy.pool').setLevel(logging.DEBUG)
class Store(StorageInterface):
"""
A SqlAlchemy based storage interface for TiddlyWeb.
"""
mapped = False
def __init__(self, store_config=None, environ=None):
super(Store, self).__init__(store_config, environ)
self.store_type = self._db_config().split(':', 1)[0]
self.parser = DEFAULT_PARSER
self.producer = Producer()
self.has_geo = False
self._init_store()
def _init_store(self):
"""
Establish the database engine and session,
creating tables if needed.
"""
engine = create_engine(self._db_config())
Base.metadata.bind = engine
Session.configure(bind=engine)
self.session = Session()
# turn on foreign keys for sqlite (the default engine)
if 'sqlite' in self.store_type:
@event.listens_for(Engine, 'connect')
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute('PRAGMA foreign_keys=ON')
cursor.close()
if not Store.mapped:
Base.metadata.create_all(engine)
Store.mapped = True
def _db_config(self):
return self.store_config['db_config']
def list_recipes(self):
try:
recipes = self.session.query(sRecipe).all()
for srecipe in recipes:
recipe = Recipe(srecipe.name)
recipe = self._load_recipe(recipe, srecipe)
yield recipe
self.session.close()
except:
self.session.rollback()
raise
def list_bags(self):
try:
bags = self.session.query(sBag).all()
for sbag in bags:
bag = Bag(sbag.name)
bag = self._load_bag(bag, sbag)
yield bag
self.session.close()
except:
self.session.rollback()
raise
def list_users(self):
try:
users = self.session.query(sUser.usersign).all()
self.session.close()
except:
self.session.rollback()
raise
for user in users:
yield User(user[0])
def list_bag_tiddlers(self, bag):
try:
try:
self.session.query(sBag.id).filter(
sBag.name == bag.name).one()
tiddlers = self.session.query(sTiddler).filter(
sTiddler.bag == bag.name).all()
except NoResultFound, exc:
raise NoBagError('no results for bag %s, %s' % (bag.name, exc))
self.session.close()
except:
self.session.rollback()
raise
return (Tiddler(stiddler.title, bag.name) for stiddler in tiddlers)
def list_tiddler_revisions(self, tiddler):
try:
try:
revisions = self.session.query(sTiddler).filter(and_(
sTiddler.title == tiddler.title,
sTiddler.bag == tiddler.bag)).one().revisions
except NoResultFound, exc:
raise NoTiddlerError('tiddler %s not found: %s' % (
tiddler.title, exc))
return [revision.number for revision in revisions]
except:
self.session.rollback()
raise
finally:
self.session.close()
def recipe_delete(self, recipe):
try:
try:
rows = self.session.query(sRecipe).filter(sRecipe.name
== recipe.name).delete()
if rows == 0:
raise NoResultFound
self.session.commit()
except NoResultFound, exc:
raise NoRecipeError('no results for recipe %s, %s' %
(recipe.name, exc))
except:
self.session.rollback()
raise
def recipe_get(self, recipe):
try:
try:
srecipe = self.session.query(sRecipe).filter(sRecipe.name
== recipe.name).one()
recipe = self._load_recipe(recipe, srecipe)
self.session.close()
return recipe
except NoResultFound, exc:
raise NoRecipeError('no results for recipe %s, %s' %
(recipe.name, exc))
except:
self.session.rollback()
raise
def recipe_put(self, recipe):
try:
self._store_recipe(recipe)
self.session.commit()
except:
self.session.rollback()
raise
def bag_delete(self, bag):
try:
try:
rows = self.session.query(sBag).filter(sBag.name
== bag.name).delete()
if rows == 0:
raise NoResultFound
self.session.commit()
except NoResultFound, exc:
raise NoBagError('Bag %s not found: %s' % (bag.name, exc))
except:
self.session.rollback()
raise
def bag_get(self, bag):
try:
try:
sbag = self.session.query(sBag).filter(sBag.name
== bag.name).one()
bag = self._load_bag(bag, sbag)
self.session.close()
return bag
except NoResultFound, exc:
raise NoBagError('Bag %s not found: %s' % (bag.name, exc))
except:
self.session.rollback()
raise
def bag_put(self, bag):
try:
self._store_bag(bag)
self.session.commit()
except:
self.session.rollback()
raise
def tiddler_delete(self, tiddler):
try:
try:
rows = (self.session.query(sTiddler).
filter(sTiddler.title == tiddler.title).
filter(sTiddler.bag == tiddler.bag).delete())
if rows == 0:
raise NoResultFound
self.session.commit()
except NoResultFound, exc:
raise NoTiddlerError('no tiddler %s to delete, %s' %
(tiddler.title, exc))
except:
self.session.rollback()
raise
def tiddler_get(self, tiddler):
try:
try:
if tiddler.revision:
try:
revision_value = int(tiddler.revision)
except ValueError, exc:
raise NoTiddlerError('%s is not a valid revision id'
% tiddler.revision)
revision = self.session.query(sRevision).filter(
sRevision.number == revision_value).one()
stiddler = self.session.query(sTiddler).filter(and_(
sTiddler.id == revision.tiddler_id,
sTiddler.title == tiddler.title,
sTiddler.bag == tiddler.bag)).one()
current_revision = revision
else:
stiddler = self.session.query(sTiddler).filter(and_(
sTiddler.title == tiddler.title,
sTiddler.bag == tiddler.bag)).one()
current_revision = stiddler.current
base_revision = stiddler.first
tiddler = self._load_tiddler(tiddler, current_revision,
base_revision)
self.session.close()
return tiddler
except NoResultFound, exc:
raise NoTiddlerError('Tiddler %s:%s:%s not found: %s' %
(tiddler.bag, tiddler.title, tiddler.revision, exc))
except:
self.session.rollback()
raise
def tiddler_put(self, tiddler):
tiddler.revision = None
try:
if not tiddler.bag:
raise NoBagError('bag required to save')
try:
self.session.query(sBag.id).filter(sBag.name
== tiddler.bag).one()
except NoResultFound, exc:
raise NoBagError('bag %s must exist for tiddler save: %s'
% (tiddler.bag, exc))
current_revision_number = self._store_tiddler(tiddler)
tiddler.revision = current_revision_number
self.session.commit()
except:
self.session.rollback()
raise
def user_delete(self, user):
try:
try:
rows = self.session.query(sUser).filter(sUser.usersign
== user.usersign).delete()
if rows == 0:
raise NoResultFound
self.session.commit()
except NoResultFound, exc:
raise NoUserError('user %s not found, %s' %
(user.usersign, exc))
except:
self.session.rollback()
raise
def user_get(self, user):
try:
try:
suser = self.session.query(sUser).filter(sUser.usersign
== user.usersign).one()
user = self._load_user(user, suser)
self.session.close()
return user
except NoResultFound, exc:
raise NoUserError('user %s not found, %s' %
(user.usersign, exc))
except:
self.session.rollback()
raise
def user_put(self, user):
try:
suser = self._store_user(user)
self.session.merge(suser)
self._store_roles(user)
self.session.commit()
except:
self.session.rollback()
raise
def search(self, search_query=''):
"""
Do a search of of the database, using the 'q' query,
parsed by the parser and turned into a producer.
"""
query = self.session.query(sTiddler).join('current')
config = self.environ.get('tiddlyweb.config', {})
if '_limit:' not in search_query:
default_limit = config.get('mysql.search_limit',
config.get('sqlalchemy3.search_limit', '20'))
search_query += ' _limit:%s' % default_limit
try:
try:
ast = self.parser(search_query)[0]
fulltext = config.get('mysql.fulltext', False)
query = self.producer.produce(ast, query, fulltext=fulltext,
geo=self.has_geo)
except ParseException, exc:
raise StoreError('failed to parse search query: %s' % exc)
try:
for stiddler in query.all():
try:
yield Tiddler(unicode(stiddler.title),
unicode(stiddler.bag))
except AttributeError:
stiddler = stiddler[0]
yield Tiddler(unicode(stiddler.title),
unicode(stiddler.bag))
self.session.close()
except ProgrammingError, exc:
raise StoreError('generated search SQL incorrect: %s' % exc)
except:
self.session.rollback()
raise
def _load_bag(self, bag, sbag):
bag.desc = sbag.desc
bag.policy = self._load_policy(sbag.policy)
bag.store = True
return bag
def _load_policy(self, spolicy):
policy = Policy()
for pol in spolicy:
principal_name = pol.principal_name
if pol.principal_type == 'R':
principal_name = 'R:%s' % principal_name
if pol.constraint == 'owner':
policy.owner = principal_name
else:
principals = getattr(policy, pol.constraint, [])
principals.append(principal_name)
setattr(policy, pol.constraint, principals)
return policy
def _load_tiddler(self, tiddler, current_revision, base_revision):
tiddler.modifier = current_revision.modifier
tiddler.modified = current_revision.modified
tiddler.revision = current_revision.number
tiddler.type = current_revision.type
try:
if binary_tiddler(tiddler):
tiddler.text = b64decode(
current_revision.text.text.lstrip().rstrip())
else:
tiddler.text = current_revision.text.text
except AttributeError:
tiddler.text = ''
tiddler.tags = [tag.tag for tag in current_revision.tags]
for sfield in current_revision.fields:
tiddler.fields[sfield.name] = sfield.value
tiddler.created = base_revision.modified
tiddler.creator = base_revision.modifier
return tiddler
def _load_recipe(self, recipe, srecipe):
recipe.desc = srecipe.desc
recipe.policy = self._load_policy(srecipe.policy)
recipe.set_recipe(self._load_recipe_string(srecipe.recipe_string))
recipe.store = True
return recipe
def _load_recipe_string(self, recipe_string):
recipe = []
if recipe_string:
for line in recipe_string.split('\n'):
bag, filter_string = line.rsplit('?', 1)
recipe.append((bag, filter_string))
return recipe
def _load_user(self, user, suser):
user.usersign = suser.usersign
user._password = suser.password
user.note = suser.note
for role in suser.roles:
user.add_role(role.name)
return user
def _store_bag(self, bag):
try:
sbag = self.session.query(sBag).filter(
sBag.name == bag.name).one()
except NoResultFound:
sbag = sBag(bag.name)
self.session.add(sbag)
sbag.desc = bag.desc
self._store_policy(sbag, bag.policy)
return sbag
def _store_policy(self, container, policy):
policies = []
for attribute in policy.attributes:
if attribute == 'owner':
value = policy.owner is None and [] or [policy.owner]
else:
value = getattr(policy, attribute, [])
spolicies = self._handle_policy_attribute(attribute, value)
policies.extend(spolicies)
container.policy = policies
def _handle_policy_attribute(self, attribute, value):
spolicies = []
for principal_name in value:
if principal_name is not None:
if principal_name.startswith('R:'):
pname = principal_name[2:]
ptype = u'R'
else:
pname = principal_name
ptype = u'U'
try:
spolicy = self.session.query(sPolicy).filter(and_(
sPolicy.constraint == attribute,
sPolicy.principal_name == pname,
sPolicy.principal_type == ptype)).one()
except NoResultFound:
spolicy = sPolicy()
spolicy.constraint = attribute
spolicy.principal_name = pname
spolicy.principal_type = ptype
self.session.add(spolicy)
spolicies.append(spolicy)
return spolicies
def _store_recipe(self, recipe):
try:
srecipe = self.session.query(sRecipe).filter(
sRecipe.name == recipe.name).one()
except NoResultFound:
srecipe = sRecipe(recipe.name)
self.session.add(srecipe)
srecipe.recipe_string = self._store_recipe_string(recipe.get_recipe())
srecipe.desc = recipe.desc
self._store_policy(srecipe, recipe.policy)
return srecipe
def _store_recipe_string(self, recipe_list):
string = u''
string += u'\n'.join([u'%s?%s' % (unicode(bag),
unicode(filter_string)) for bag, filter_string in recipe_list])
return string
def _store_roles(self, user):
usersign = user.usersign
for role in user.roles:
srole = sRole()
srole.user = usersign
srole.name = role
self.session.merge(srole)
def _store_tiddler(self, tiddler):
if binary_tiddler(tiddler):
tiddler.text = unicode(b64encode(tiddler.text))
try:
stiddler = self.session.query(sTiddler.id).filter(
and_(sTiddler.title == tiddler.title,
sTiddler.bag == tiddler.bag)).one()
new_tiddler = False
except NoResultFound:
stiddler = sTiddler(tiddler.title, tiddler.bag)
self.session.add(stiddler)
new_tiddler = True
self.session.flush()
srevision = sRevision()
srevision.type = tiddler.type
srevision.modified = tiddler.modified
srevision.modifier = tiddler.modifier
srevision.tiddler_id = stiddler.id
self.session.add(srevision)
self.session.flush()
text_insert = (sText.__table__.insert()
.values(text=tiddler.text, revision_number=srevision.number))
self.session.execute(text_insert)
for tag in set(tiddler.tags):
stag = sTag(tag)
stag.revision_number = srevision.number
self.session.add(stag)
for field in tiddler.fields:
if not field.startswith('server.'):
sfield = sField(field, tiddler.fields[field])
sfield.revision_number = srevision.number
self.session.add(sfield)
self.session.flush()
current_revision = sCurrentRevision()
current_revision.tiddler_id = stiddler.id
current_revision.current_id = srevision.number
self.session.merge(current_revision)
if new_tiddler:
first_revision = sFirstRevision()
first_revision.tiddler_id = stiddler.id
first_revision.first_id = srevision.number
self.session.merge(first_revision)
return srevision.number
def _store_user(self, user):
suser = sUser()
suser.usersign = user.usersign
suser.password = user._password
suser.note = user.note
return suser
def index_query(environ, **kwargs):
"""
Attempt to optimize filter processing by using the search index
to provide results that can be matched.
In practice, this proves not to be that helpful when memcached
is being used, but it is in other situations.
"""
store = environ['tiddlyweb.store']
queries = []
for key, value in kwargs.items():
if '"' in value:
# XXX The current parser is currently unable to deal with
# nested quotes. Rather than running the risk of tweaking
# the parser with unclear results, we instead just refuse
# for now. Later this can be fixed for real.
raise FilterIndexRefused('unable to process values with quotes')
queries.append('%s:"%s"' % (key, value))
query = ' '.join(queries)
storage = store.storage
try:
tiddlers = storage.search(search_query=query)
return (store.get(tiddler) for tiddler in tiddlers)
except StoreError, exc:
raise FilterIndexRefused('error in the store: %s' % exc)
|
{
"content_hash": "f11d20dd346921a2c495d7238832334d",
"timestamp": "",
"source": "github",
"line_count": 597,
"max_line_length": 79,
"avg_line_length": 35.04857621440536,
"alnum_prop": 0.5402408717262474,
"repo_name": "tiddlyweb/tiddlywebplugins.sqlalchemy",
"id": "96dfb7a4bcbfe72ed2a012c9cfe9df991ebe7c10",
"size": "20924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tiddlywebplugins/sqlalchemy3/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "69288"
},
{
"name": "Shell",
"bytes": "326"
}
],
"symlink_target": ""
}
|
"""
test score_mgr
"""
import datetime
from django.test import TransactionTestCase
from django.contrib.auth.models import User
from apps.managers.score_mgr import score_mgr
from apps.managers.team_mgr.models import Group, Team
from apps.managers.score_mgr.models import ScoreboardEntry, PointsTransaction
from apps.utils import test_utils
class ScoreboardEntryUnitTests(TransactionTestCase):
"""scoreboard test"""
def setUp(self):
"""Generate test. Set the competition settings to the current date for testing."""
self.user = User(username="test_user", password="changeme")
self.user.save()
self.current_round = "Round 1"
test_utils.set_competition_round()
self.user.profile.add_points(10, datetime.datetime.today(), "test")
def testUserOverallRoundRankWithPoints(self):
"""Tests that the overall rank calculation for a user in a round is
correct based on points."""
top_entry = ScoreboardEntry.objects.filter(
round_name=self.current_round).order_by("-points")[0]
entry, _ = ScoreboardEntry.objects.get_or_create(
profile=self.user.profile,
round_name=self.current_round,
)
entry.points = top_entry.points + 1
entry.last_awarded_submission = datetime.datetime.today()
entry.save()
self.assertEqual(score_mgr.player_rank(self.user.profile,
self.current_round),
1,
"Check user is ranked #1 for the current round.")
user2 = User(username="test_user2", password="changeme")
user2.save()
profile2 = user2.profile
entry2, _ = ScoreboardEntry.objects.get_or_create(
profile=profile2,
round_name=self.current_round,
)
entry2.points = entry.points + 1
entry2.last_awarded_submission = entry.last_awarded_submission
entry2.save()
self.assertEqual(score_mgr.player_rank(self.user.profile,
self.current_round),
2,
"Check user is now second.")
def testUserOverallRoundRankWithSubmissionDate(self):
"""Tests that the overall rank calculation for a user in a round is
correct based on submission date."""
top_entry = ScoreboardEntry.objects.filter(
round_name=self.current_round).order_by("-points")[0]
entry, _ = ScoreboardEntry.objects.get_or_create(
profile=self.user.profile,
round_name=self.current_round,
)
entry.points = top_entry.points + 1
entry.last_awarded_submission = datetime.datetime.today() - datetime.timedelta(days=3)
entry.save()
self.assertEqual(score_mgr.player_rank(self.user.profile,
self.current_round),
1,
"Check user is ranked #1 for the current round.")
user2 = User(username="test_user2", password="changeme")
user2.save()
profile2 = user2.profile
entry2, _ = ScoreboardEntry.objects.get_or_create(
profile=profile2,
round_name=self.current_round,
)
entry2.points = entry.points
entry2.last_awarded_submission = datetime.datetime.today()
entry2.save()
self.assertEqual(score_mgr.player_rank(self.user.profile,
self.current_round),
2,
"Check user is now second.")
def testUserTeamRoundRankWithPoints(self):
"""Tests that the team rank calculation for a round is correct based
on points."""
# Setup dorm
group = Group(name="Test group")
group.save()
team = Team(name="A", group=group)
team.save()
profile = self.user.profile
profile.team = team
profile.save()
# Set up entry
top_entry = ScoreboardEntry.objects.filter(
round_name=self.current_round).order_by("-points")[0]
entry, _ = ScoreboardEntry.objects.get_or_create(
profile=self.user.profile,
round_name=self.current_round,
)
entry.points = top_entry.points + 1
entry.last_awarded_submission = datetime.datetime.today()
entry.save()
self.assertEqual(score_mgr.player_rank_in_team(self.user.profile,
self.current_round),
1,
"Check user is ranked #1 for the current round.")
user2 = User(username="test_user2", password="changeme")
user2.save()
profile2 = user2.profile
profile2.team = team
profile2.save()
entry2, _ = ScoreboardEntry.objects.get_or_create(
profile=profile2,
round_name=self.current_round,
)
entry2.points = entry.points + 1
entry2.last_awarded_submission = entry.last_awarded_submission
entry2.save()
self.assertEqual(score_mgr.player_rank_in_team(self.user.profile,
self.current_round),
2,
"Check user is now second.")
def testUserTeamRoundRankWithSubmissionDate(self):
"""Tests that the team rank calculation for a round is correct based
on points."""
# Set up dorm
group = Group(name="Test group")
group.save()
team = Team(name="A", group=group)
team.save()
# Create the entry for the test user
profile = self.user.profile
profile.team = team
profile.save()
top_entry = ScoreboardEntry.objects.filter(
round_name=self.current_round).order_by("-points")[0]
entry, _ = ScoreboardEntry.objects.get_or_create(
profile=self.user.profile,
round_name=self.current_round,
)
entry.points = top_entry.points + 1
entry.last_awarded_submission = datetime.datetime.today() - \
datetime.timedelta(days=3)
entry.save()
# Create another test user
user2 = User(username="test_user2", password="changeme")
user2.save()
profile2 = user2.profile
profile2.team = team
profile2.save()
entry2, _ = ScoreboardEntry.objects.get_or_create(
profile=profile2,
round_name=self.current_round,
)
entry2.points = entry.points
entry2.last_awarded_submission = datetime.datetime.today()
entry2.save()
self.assertEqual(score_mgr.player_rank_in_team(self.user.profile,
self.current_round),
2,
"Check user is now second.")
def testRoundRankWithoutEntry(self):
"""Tests that the overall rank calculation is correct even if a user
has not done anything yet."""
group = Group(name="Test group")
group.save()
team = Team(name="A", group=group)
team.save()
# Rank will be the number of users who have points plus one.
overall_rank = 1
team_rank = 1
self.assertEqual(score_mgr.player_rank(self.user.profile,
self.current_round),
overall_rank,
"Check user is last overall for the current round.")
self.assertEqual(score_mgr.player_rank_in_team(self.user.profile,
self.current_round),
team_rank,
"Check user is last in their team for the current "
"round.")
user2 = User(username="test_user2", password="changeme")
user2.save()
profile2 = user2.profile
profile2.add_points(10, datetime.datetime.today(), "test")
self.assertEqual(score_mgr.player_rank(self.user.profile,
self.current_round),
overall_rank + 1,
"Check that the user's overall rank has moved down.")
self.assertEqual(score_mgr.player_rank_in_team(self.user.profile,
self.current_round),
team_rank + 1,
"Check that the user's team rank has moved down.")
class PointsLogTest(TransactionTestCase):
"""test points log"""
def setUp(self):
self.user = User.objects.create_user("test", "test@test.com")
test_utils.set_competition_round()
def testAddPoints(self):
"""
Test that adding points creates a new entry in the points log.
"""
log_count = PointsTransaction.objects.count()
profile = self.user.profile
profile.add_points(10, datetime.datetime.today(), "Hello world", None)
profile.save()
self.assertEqual(PointsTransaction.objects.count(), log_count + 1,
"A new log should have been created.")
log = profile.user.pointstransaction_set.all()[0]
self.assertEqual(log.points, 10, "Points should have been awarded.")
self.assertEqual(log.message, "Hello world",
"Message should have been added.")
|
{
"content_hash": "2d853f2929b961995610779ce51e98f1",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 94,
"avg_line_length": 39.064516129032256,
"alnum_prop": 0.5541907514450867,
"repo_name": "yongwen/makahiki",
"id": "94edb02c7ad65ded2d5d8d14ffaca3dd5968f78c",
"size": "9688",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "makahiki/apps/managers/score_mgr/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "107603"
},
{
"name": "HTML",
"bytes": "568630"
},
{
"name": "JavaScript",
"bytes": "244377"
},
{
"name": "Python",
"bytes": "1489909"
},
{
"name": "Shell",
"bytes": "20118"
}
],
"symlink_target": ""
}
|
"""
Problem Definition :
The following iterative sequence is defined for the set of positive integers:
n = n/2 (n is even)
n = 3n + 1 (n is odd)
Using the rule above and starting with 13, we generate the following sequence:
13 - 40 - 20 - 10 - 5 - 16 - 8 - 4 - 2 - 1
It can be seen that this sequence (starting at 13 and finishing at 1) contains 10 terms. Although it has not been proved yet (Collatz Problem), it is thought that all starting numbers finish at 1.
Which starting number, under one million, produces the longest chain?
NOTE: Once the chain starts the terms are allowed to go above one million.'''
"""
def main():
numbers = [0]*10000000
numbers[1] = 1
length = len(numbers)
for i in xrange(1, 1000001):
if numbers[i] == 0: #Only check for unseen numbers (list value = 0)
final = i
num_dict = dict()
while final > 1:
if final < length: #To check if the value of final is less than the length of list - numbers - to avoid array index out of range error. If so, treat is as a never seen number in the else logic.
if numbers[final] == 0: #To confirm that the current final value was never seen before during any calculation ( 0 is by default value assigned to every list membeSrs)
num_dict[final] = 1 #Add the new number to the temp dictionary and assign value = 1 ( i.e. default length = 1 for any single number seen during calculation)
if final % 2 == 0:
final /= 2
else :
final = 3*final + 1
if final < length: #To check if the new value of final is less than the length of list - numbers - to avoid array index out of range error.
if numbers[final] == 0: #Never seen logic : if the final value is new, increase values of each current member of dictionary by 1. and this number will be added to the dictionary in the next iteration of while loop.
for key in num_dict:
num_dict[key] = int(num_dict[key]) + 1
else: #Old value logic : if the final value is available in the list, add its corresponding value to each current member of dictionary.
for key in num_dict:
num_dict[key] = int(num_dict[key]) + int(numbers[final])
else:
for key in num_dict:
num_dict[key] = int(num_dict[key]) + 1
else:
final = 1
else:
num_dict[final] = 1
if final % 2 == 0:
final /= 2
else:
final = 3*final + 1
for key in num_dict: #Never seen logic.
num_dict[key] = int(num_dict[key]) + 1
for key in num_dict:
if key < length:
numbers[key] = num_dict[key]
print numbers.index(max(numbers))
if __name__ == '__main__':
main()
|
{
"content_hash": "6a8615a17c27010fd626a137bf45906b",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 249,
"avg_line_length": 45.52054794520548,
"alnum_prop": 0.5188083057478182,
"repo_name": "vivekpabani/projecteuler",
"id": "fa26ed89d01b9ad4f63cb1fd1226ccdccdcb907f",
"size": "3347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/014/problem_014.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "123733"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function
import re
from .util import fix_fileid, DIRNAME, NORMCASE
from .info import ParentInfo
FILE_ID_RE = re.compile(
r"""
^
(?:
( .* [.] (?: py | txt ) \b ) # .txt for doctest files
( [^.] .* )?
)
$
""",
re.VERBOSE,
)
def fix_nodeid(
nodeid,
kind,
rootdir=None,
# *,
_fix_fileid=fix_fileid,
):
if not nodeid:
raise ValueError("missing nodeid")
if nodeid == ".":
return nodeid
fileid = nodeid
remainder = ""
if kind not in ("folder", "file"):
m = FILE_ID_RE.match(nodeid)
if m:
fileid, remainder = m.groups()
elif len(nodeid) > 1:
fileid = nodeid[:2]
remainder = nodeid[2:]
fileid = _fix_fileid(fileid, rootdir)
return fileid + (remainder or "")
class DiscoveredTests(object):
"""A container for the discovered tests and their parents."""
def __init__(self):
self.reset()
def __len__(self):
return len(self._tests)
def __getitem__(self, index):
return self._tests[index]
@property
def parents(self):
return sorted(
self._parents.values(),
# Sort by (name, id).
key=lambda p: (NORMCASE(p.root or p.name), p.id),
)
def reset(self):
"""Clear out any previously discovered tests."""
self._parents = {}
self._tests = []
def add_test(self, test, parents):
"""Add the given test and its parents."""
parentid = self._ensure_parent(test.path, parents)
# Updating the parent ID and the test ID aren't necessary if the
# provided test and parents (from the test collector) are
# properly generated. However, we play it safe here.
test = test._replace(
# Clean up the ID.
id=fix_nodeid(test.id, "test", test.path.root),
parentid=parentid,
)
self._tests.append(test)
def _ensure_parent(
self,
path,
parents,
# *,
_dirname=DIRNAME,
):
rootdir = path.root
relpath = path.relfile
_parents = iter(parents)
nodeid, name, kind = next(_parents)
# As in add_test(), the node ID *should* already be correct.
nodeid = fix_nodeid(nodeid, kind, rootdir)
_parentid = nodeid
for parentid, parentname, parentkind in _parents:
# As in add_test(), the parent ID *should* already be correct.
parentid = fix_nodeid(parentid, kind, rootdir)
if kind in ("folder", "file"):
info = ParentInfo(nodeid, kind, name, rootdir, relpath, parentid)
relpath = _dirname(relpath)
else:
info = ParentInfo(nodeid, kind, name, rootdir, None, parentid)
self._parents[(rootdir, nodeid)] = info
nodeid, name, kind = parentid, parentname, parentkind
assert nodeid == "."
info = ParentInfo(nodeid, kind, name=rootdir)
self._parents[(rootdir, nodeid)] = info
return _parentid
|
{
"content_hash": "d5f11aefd69e9c4cf1943a922dc8bcd8",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 81,
"avg_line_length": 27.87719298245614,
"alnum_prop": 0.5434235368156073,
"repo_name": "DonJayamanne/pythonVSCode",
"id": "798aea1e93f1fd160e9ad99d737bbb9545d2b41e",
"size": "3273",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "pythonFiles/testing_tools/adapter/discovery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "569"
},
{
"name": "JavaScript",
"bytes": "28707"
},
{
"name": "Jupyter Notebook",
"bytes": "10520"
},
{
"name": "Python",
"bytes": "2602995"
},
{
"name": "Roff",
"bytes": "108"
},
{
"name": "Shell",
"bytes": "76"
},
{
"name": "TypeScript",
"bytes": "5178987"
}
],
"symlink_target": ""
}
|
"""Imports offline conversion values for specific clicks into your account.
To get the Google Click ID for a click, run a CLICK_PERFORMANCE_REPORT.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
CONVERSION_NAME = 'INSERT_CONVERSION_NAME_HERE'
# Your click ID must be less than 30 days old.
CLICK_ID = 'INSERT_GOOGLE_CLICK_ID_HERE'
# The conversion time must be more recent than the time of the click.
CONVERSION_TIME = 'INSERT_CONVERSION_TIME_HERE'
CONVERSION_VALUE = 'INSERT_CONVERSION_VALUE_HERE'
def main(client, conversion_name, click_id, conversion_time, conversion_value):
# Initialize appropriate services.
conversion_tracker_service = client.GetService('ConversionTrackerService',
version='v201506')
offline_conversion_feed_service = client.GetService(
'OfflineConversionFeedService', version='v201506')
# Once created, this entry will be visible under
# Tools and Analysis->Conversion and will have "Source = Import".
upload_conversion = {
'xsi_type': 'UploadConversion',
'category': 'PAGE_VIEW',
'name': conversion_name,
'viewthroughLookbackWindow': '30',
'ctcLookbackWindow': '90'
}
upload_conversion_operation = {
'operator': 'ADD',
'operand': upload_conversion
}
response = conversion_tracker_service.mutate(
[upload_conversion_operation])
new_upload_conversion = response['value']
print ('New upload conversion type with name \'%s\' and ID \'%s\' was '
'created.' % (new_upload_conversion['name'],
new_upload_conversion['id']))
# Associate offline conversions with the upload conversion we created.
feed = {
'conversionName': conversion_name,
'conversionTime': conversion_time,
'conversionValue': conversion_value,
'googleClickId': click_id
}
offline_conversion_operation = {
'operator': 'ADD',
'operand': feed
}
offline_conversion_response = offline_conversion_feed_service.mutate(
[offline_conversion_operation])
new_feed = offline_conversion_response['value']
print ('Uploaded offline conversion value of \'%s\' for Google Click ID '
'\'%s\' to \'%s\'.' % (new_feed['conversionValue'],
new_feed['googleClickId'],
new_feed['conversionName']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CONVERSION_NAME, CLICK_ID, CONVERSION_TIME,
CONVERSION_VALUE)
|
{
"content_hash": "40fe71e1a3867b2427deb0ce2533c511",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 79,
"avg_line_length": 33.8433734939759,
"alnum_prop": 0.6753292986828052,
"repo_name": "richardfergie/googleads-python-lib",
"id": "01d725cb5ac6339127eb689e4857d2317e9b336e",
"size": "3427",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/adwords/v201506/advanced_operations/upload_offline_conversions.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168602"
}
],
"symlink_target": ""
}
|
import copy
from nailgun.db.sqlalchemy.models import NeutronConfig
from nailgun.db.sqlalchemy.models import NovaNetworkConfig
from nailgun.objects import ClusterCollection
from nailgun.objects import MasterNodeSettings
from nailgun.objects import NodeCollection
from nailgun.settings import settings
from nailgun.statistics.utils import get_attr_value
from nailgun.statistics.utils import WhiteListRule
from nailgun import utils
class InstallationInfo(object):
"""Collects info about Fuel installation
Master nodes, clusters, networks, e.t.c.
Used for collecting info for fuel statistics
"""
attributes_white_list = (
# ((path, to, property), 'map_to_name', transform_function)
WhiteListRule(('common', 'libvirt_type', 'value'),
'libvirt_type', None),
WhiteListRule(('common', 'debug', 'value'), 'debug_mode', None),
WhiteListRule(('common', 'use_cow_images', 'value'),
'use_cow_images', None),
WhiteListRule(('common', 'auto_assign_floating_ip', 'value'),
'auto_assign_floating_ip', None),
WhiteListRule(('common', 'nova_quota', 'value'), 'nova_quota', None),
WhiteListRule(('common', 'puppet_debug', 'value'),
'puppet_debug', None),
WhiteListRule(('common', 'resume_guests_state_on_host_boot', 'value'),
'resume_guests_state_on_host_boot', None),
WhiteListRule(('corosync', 'verified', 'value'),
'corosync_verified', None),
WhiteListRule(('public_network_assignment', 'assign_to_all_nodes',
'value'), 'assign_public_to_all_nodes', None),
WhiteListRule(('syslog', 'syslog_transport', 'value'),
'syslog_transport', None),
WhiteListRule(('provision', 'method', 'value'),
'provision_method', None),
WhiteListRule(('kernel_params', 'kernel', 'value'),
'kernel_params', None),
WhiteListRule(('external_mongo', 'mongo_replset', 'value'),
'external_mongo_replset', bool),
WhiteListRule(('external_ntp', 'ntp_list', 'value'),
'external_ntp_list', bool),
WhiteListRule(('repo_setup', 'repos', 'value'), 'repos', bool),
WhiteListRule(('storage', 'volumes_lvm', 'value'),
'volumes_lvm', None),
WhiteListRule(('storage', 'iser', 'value'), 'iser', None),
WhiteListRule(('storage', 'volumes_ceph', 'value'),
'volumes_ceph', None),
WhiteListRule(('storage', 'images_ceph', 'value'),
'images_ceph', None),
WhiteListRule(('storage', 'images_vcenter', 'value'),
'images_vcenter', None),
WhiteListRule(('storage', 'ephemeral_ceph', 'value'),
'ephemeral_ceph', None),
WhiteListRule(('storage', 'objects_ceph', 'value'),
'objects_ceph', None),
WhiteListRule(('storage', 'osd_pool_size', 'value'),
'osd_pool_size', None),
WhiteListRule(('neutron_mellanox', 'plugin', 'value'),
'mellanox', None),
WhiteListRule(('neutron_mellanox', 'vf_num', 'value'),
'mellanox_vf_num', None),
WhiteListRule(('additional_components', 'sahara', 'value'),
'sahara', None),
WhiteListRule(('additional_components', 'murano', 'value'),
'murano', None),
WhiteListRule(('additional_components', 'heat', 'value'),
'heat', None),
WhiteListRule(('additional_components', 'ceilometer', 'value'),
'ceilometer', None),
WhiteListRule(('additional_components', 'mongo', 'value'),
'mongo', None),
WhiteListRule(('workloads_collector', 'enabled', 'value'),
'workloads_collector_enabled', None),
WhiteListRule(('public_ssl', 'horizon', 'value'),
'public_ssl_horizon', None),
WhiteListRule(('public_ssl', 'services', 'value'),
'public_ssl_services', None),
WhiteListRule(('public_ssl', 'cert_source', 'value'),
'public_ssl_cert_source', None),
)
vmware_attributes_white_list = (
# ((path, to, property), 'map_to_name', transform_function)
WhiteListRule(('value', 'availability_zones', 'cinder', 'enable'),
'vmware_az_cinder_enable', None),
# We add 'vsphere_cluster' into path for enter into nested list.
# Private value of 'vsphere_cluster' is not collected, we only
# computes length of the nested list
WhiteListRule(('value', 'availability_zones', 'nova_computes',
'vsphere_cluster'), 'vmware_az_nova_computes_num', len),
)
def fuel_release_info(self):
versions = utils.get_fuel_release_versions(settings.FUEL_VERSION_FILE)
if settings.FUEL_VERSION_KEY not in versions:
versions[settings.FUEL_VERSION_KEY] = settings.VERSION
return versions[settings.FUEL_VERSION_KEY]
def get_network_configuration_info(self, cluster):
network_config = cluster.network_config
result = {}
if isinstance(network_config, NovaNetworkConfig):
result['net_manager'] = network_config.net_manager
result['fixed_networks_vlan_start'] = \
network_config.fixed_networks_vlan_start
result['fixed_network_size'] = network_config.fixed_network_size
result['fixed_networks_amount'] = \
network_config.fixed_networks_amount
elif isinstance(network_config, NeutronConfig):
result['segmentation_type'] = network_config.segmentation_type
result['net_l23_provider'] = network_config.net_l23_provider
return result
def get_clusters_info(self):
clusters = ClusterCollection.all()
clusters_info = []
for cluster in clusters:
release = cluster.release
nodes_num = NodeCollection.filter_by(
None, cluster_id=cluster.id).count()
vmware_attributes_editable = None
if cluster.vmware_attributes:
vmware_attributes_editable = cluster.vmware_attributes.editable
cluster_info = {
'id': cluster.id,
'nodes_num': nodes_num,
'release': {
'os': release.operating_system,
'name': release.name,
'version': release.version
},
'mode': cluster.mode,
'nodes': self.get_nodes_info(cluster.nodes),
'node_groups': self.get_node_groups_info(cluster.node_groups),
'status': cluster.status,
'attributes': self.get_attributes(cluster.attributes.editable,
self.attributes_white_list),
'vmware_attributes': self.get_attributes(
vmware_attributes_editable,
self.vmware_attributes_white_list
),
'net_provider': cluster.net_provider,
'fuel_version': cluster.fuel_version,
'is_customized': cluster.is_customized,
'network_configuration': self.get_network_configuration_info(
cluster),
'installed_plugins': self.get_cluster_plugins_info(cluster)
}
clusters_info.append(cluster_info)
return clusters_info
def get_cluster_plugins_info(self, cluster):
plugins_info = []
for plugin_inst in cluster.plugins:
plugin_info = {
"id": plugin_inst.id,
"name": plugin_inst.name,
"version": plugin_inst.version,
"releases": plugin_inst.releases,
"fuel_version": plugin_inst.fuel_version,
"package_version": plugin_inst.package_version,
}
plugins_info.append(plugin_info)
return plugins_info
def get_attributes(self, attributes, white_list):
result_attrs = {}
for path, map_to_name, func in white_list:
try:
result_attrs[map_to_name] = get_attr_value(
path, func, attributes)
except (KeyError, TypeError):
pass
return result_attrs
def get_node_meta(self, node):
meta = copy.deepcopy(node.meta)
result = {}
if not meta:
return result
to_copy = ['cpu', 'memory', 'disks']
for param in to_copy:
result[param] = meta.get(param)
system = meta.get('system', {})
system.pop('fqdn', None)
system.pop('serial', None)
result['system'] = system
interfaces = meta.get('interfaces', [])
result['interfaces'] = []
for interface in interfaces:
interface.pop('mac')
result['interfaces'].append(interface)
return result
def get_nodes_info(self, nodes):
nodes_info = []
for node in nodes:
node_info = {
'id': node.id,
'group_id': node.group_id,
'roles': node.roles,
'os': node.os_platform,
'status': node.status,
'error_type': node.error_type,
'online': node.online,
'manufacturer': node.manufacturer,
'platform_name': node.platform_name,
'meta': self.get_node_meta(node),
'pending_addition': node.pending_addition,
'pending_deletion': node.pending_deletion,
'pending_roles': node.pending_roles,
'nic_interfaces':
self.get_node_intefaces_info(node.nic_interfaces, bond=False),
'bond_interfaces':
self.get_node_intefaces_info(node.bond_interfaces, bond=True),
}
nodes_info.append(node_info)
return nodes_info
def get_node_intefaces_info(self, interfaces, bond):
ifs_info = []
for interface in interfaces:
if_info = {
'id': interface.id
}
if bond:
if_info['slaves'] = [s.id for s in interface.slaves]
ifs_info.append(if_info)
return ifs_info
def get_node_groups_info(self, node_groups):
groups_info = []
for group in node_groups:
group_info = {
'id': group.id,
'nodes': [n.id for n in group.nodes]
}
groups_info.append(group_info)
return groups_info
def get_installation_info(self):
clusters_info = self.get_clusters_info()
allocated_nodes_num = sum([c['nodes_num'] for c in clusters_info])
unallocated_nodes_num = NodeCollection.filter_by(
None, cluster_id=None).count()
info = {
'user_information': self.get_user_info(),
'master_node_uid': self.get_master_node_uid(),
'fuel_release': self.fuel_release_info(),
'clusters': clusters_info,
'clusters_num': len(clusters_info),
'allocated_nodes_num': allocated_nodes_num,
'unallocated_nodes_num': unallocated_nodes_num
}
return info
def get_master_node_uid(self):
return getattr(MasterNodeSettings.get_one(), 'master_node_uid', None)
def get_user_info(self):
try:
stat_settings = MasterNodeSettings.get_one(). \
settings.get("statistics", {})
result = {
"contact_info_provided":
stat_settings.get("user_choice_saved", {}).get("value", False)
and stat_settings.get("send_user_info", {}).get("value", False)
}
if result["contact_info_provided"]:
result["name"] = stat_settings.get("name", {}).get("value")
result["email"] = stat_settings.get("email", {}).get("value")
result["company"] = stat_settings.get("company", {}).\
get("value")
return result
except AttributeError:
return {"contact_info_provided": False}
|
{
"content_hash": "9ac885f00de937c41c3725412ad5a80c",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 79,
"avg_line_length": 41.29042904290429,
"alnum_prop": 0.5454400127887459,
"repo_name": "prmtl/fuel-web",
"id": "b1d4f75637bf1d245209df0c889952bb0015e85c",
"size": "13121",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nailgun/nailgun/statistics/fuel_statistics/installation_info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "67993"
},
{
"name": "HTML",
"bytes": "7268"
},
{
"name": "JavaScript",
"bytes": "774488"
},
{
"name": "Mako",
"bytes": "1449"
},
{
"name": "Puppet",
"bytes": "282"
},
{
"name": "Python",
"bytes": "4031810"
},
{
"name": "Ruby",
"bytes": "36362"
},
{
"name": "Shell",
"bytes": "30651"
}
],
"symlink_target": ""
}
|
from pychron.lasers.stage_managers.remote_stage_manger import RemoteStageManager
class ChromiumStageManager(RemoteStageManager):
pass
# ============= EOF =============================================
|
{
"content_hash": "1eddba2defccf7f73bbd4389dae1f756",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 80,
"avg_line_length": 26,
"alnum_prop": 0.5817307692307693,
"repo_name": "NMGRL/pychron",
"id": "ee1265695a4cc20d115ba0d867d794d8c577f835",
"size": "945",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pychron/lasers/stage_managers/chromium_stage_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
}
|
"""
Created on May 7, 2011
@author: jake
"""
from decimal import Decimal
from django import VERSION
from django.db import models
import moneyed
from djmoney.models.fields import MoneyField
from djmoney.models.managers import money_manager, understands_money
from .._compat import register
class ModelWithVanillaMoneyField(models.Model):
money = MoneyField(max_digits=10, decimal_places=2)
second_money = MoneyField(max_digits=10, decimal_places=2, default_currency='EUR')
integer = models.IntegerField(default=0)
class ModelWithDefaultAsInt(models.Model):
money = MoneyField(default=123, max_digits=10, decimal_places=2, default_currency='GHS')
class ModelWithUniqueIdAndCurrency(models.Model):
money = MoneyField(default=123, max_digits=10, decimal_places=2, default_currency='GHS')
class Meta:
unique_together = ('id', 'money')
class ModelWithDefaultAsStringWithCurrency(models.Model):
money = MoneyField(default='123 USD', max_digits=10, decimal_places=2)
class Meta:
verbose_name = 'model_default_string_currency'
class ModelWithDefaultAsString(models.Model):
money = MoneyField(default='123', max_digits=10, decimal_places=2, default_currency='PLN')
class ModelWithDefaultAsFloat(models.Model):
money = MoneyField(default=12.05, max_digits=10, decimal_places=2, default_currency='PLN')
class ModelWithDefaultAsDecimal(models.Model):
money = MoneyField(default=Decimal('0.01'), max_digits=10, decimal_places=2, default_currency='CHF')
class ModelWithDefaultAsMoney(models.Model):
money = MoneyField(default=moneyed.Money('0.01', 'RUB'), max_digits=10, decimal_places=2)
class ModelWithTwoMoneyFields(models.Model):
amount1 = MoneyField(max_digits=10, decimal_places=2)
amount2 = MoneyField(max_digits=10, decimal_places=3)
class ModelRelatedToModelWithMoney(models.Model):
moneyModel = models.ForeignKey(ModelWithVanillaMoneyField, on_delete=models.CASCADE)
class ModelWithChoicesMoneyField(models.Model):
money = MoneyField(
max_digits=10,
decimal_places=2,
currency_choices=[
(moneyed.USD, 'US Dollars'),
(moneyed.ZWN, 'Zimbabwian')
],
)
class ModelWithNonMoneyField(models.Model):
money = MoneyField(max_digits=10, decimal_places=2, default_currency='USD')
desc = models.CharField(max_length=10)
class AbstractModel(models.Model):
money = MoneyField(max_digits=10, decimal_places=2, default_currency='USD')
m2m_field = models.ManyToManyField(ModelWithDefaultAsInt)
class Meta:
abstract = True
class InheritorModel(AbstractModel):
second_field = MoneyField(max_digits=10, decimal_places=2, default_currency='USD')
class RevisionedModel(models.Model):
amount = MoneyField(max_digits=10, decimal_places=2, default_currency='USD')
register(RevisionedModel)
class BaseModel(models.Model):
money = MoneyField(max_digits=10, decimal_places=2, default_currency='USD')
class InheritedModel(BaseModel):
second_field = MoneyField(max_digits=10, decimal_places=2, default_currency='USD')
class SimpleModel(models.Model):
money = MoneyField(max_digits=10, decimal_places=2, default_currency='USD')
class NullMoneyFieldModel(models.Model):
field = MoneyField(max_digits=10, decimal_places=2, null=True, default_currency='USD', blank=True)
class ProxyModel(SimpleModel):
class Meta:
proxy = True
class MoneyManager(models.Manager):
@understands_money
def super_method(self, **kwargs):
return self.filter(**kwargs)
class ModelWithCustomManager(models.Model):
field = MoneyField(max_digits=10, decimal_places=2)
manager = money_manager(MoneyManager())
class DateTimeModel(models.Model):
field = MoneyField(max_digits=10, decimal_places=2)
created = models.DateTimeField(null=True, blank=True)
if VERSION < (1, 7, 0):
from djmoney.contrib.django_rest_framework import register_money_field
from djmoney.admin import setup_admin_integration
register_money_field()
setup_admin_integration()
class ModelIssue300(models.Model):
money = models.ForeignKey(DateTimeModel, on_delete=models.CASCADE)
price = MoneyField(max_digits=10, decimal_places=2, default_currency='EUR', default=Decimal('0.0'))
|
{
"content_hash": "ce28b9eff8e02be8b62241ef8a592d59",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 104,
"avg_line_length": 28.30263157894737,
"alnum_prop": 0.7333798233379824,
"repo_name": "rescale/django-money",
"id": "8fc7f57f690147e4c913a2e2cedc95ea10f7e31a",
"size": "4326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testapp/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1245"
},
{
"name": "Python",
"bytes": "119784"
}
],
"symlink_target": ""
}
|
import sys
sys.path.insert(0, './externals')
sys.path.insert(0, './apps')
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
|
{
"content_hash": "4c1549cade4a1afc61d787777902383a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 299,
"avg_line_length": 41.785714285714285,
"alnum_prop": 0.7196581196581197,
"repo_name": "justquick/django-native-tags",
"id": "f3e87d9248143ec8c8076aea18465b753d543aa7",
"size": "607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example_project/manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "90620"
},
{
"name": "Shell",
"bytes": "3090"
}
],
"symlink_target": ""
}
|
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
import six
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.client import session
from tensorflow.python.compat.compat import forward_compatibility_horizon
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import config
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import gpu_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import tfrt_utils
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_ops # pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import _pywrap_stacktrace_handler
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import _pywrap_util_port
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
# Uses the same mechanism as above to selectively enable/disable MLIR
# compilation.
def is_mlir_bridge_enabled():
return None
try:
from tensorflow.python.framework.is_mlir_bridge_test_false import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
try:
from tensorflow.python.framework.is_mlir_bridge_test_true import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
pass
def _get_object_count_by_type(exclude=()):
return (
collections.Counter([type(obj).__name__ for obj in gc.get_objects()]) -
collections.Counter([type(obj).__name__ for obj in exclude]))
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tf_session.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(compat.as_bytes(_SHARDED_SAVE_OP_PATTERN),
attr_tensor_string_value)):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(compat.as_bytes(_TABLE_SHARED_NAME_PATTERN),
node.attr["shared_name"].s):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return _pywrap_util_port.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return _pywrap_util_port.IsBuiltWithROCm()
def IsBuiltWithXLA():
return _pywrap_util_port.IsBuiltWithXLA()
def IsBuiltWithNvcc():
return _pywrap_util_port.IsBuiltWithNvcc()
def GpuSupportsHalfMatMulAndConv():
return _pywrap_util_port.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return _pywrap_util_port.IsMklEnabled()
def InstallStackTraceHandler():
_pywrap_stacktrace_handler.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 3-, 4-, or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {3: [0, 2, 1], 4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
@contextlib.contextmanager
def skip_if_error(test_obj, error_type, messages=None):
"""Context manager to skip cases not considered failures by the tests.
Note that this does not work if used in setUpClass/tearDownClass.
Usage in setUp/tearDown works fine just like regular test methods.
Args:
test_obj: A test object provided as `self` in the test methods; this object
is usually an instance of `unittest.TestCase`'s subclass and should have
`skipTest` method.
error_type: The error type to skip. Note that if `messages` are given, both
`error_type` and `messages` need to match for the test to be skipped.
messages: Optional, a string or list of strings. If `None`, the test will be
skipped if `error_type` matches what is raised; otherwise, the test is
skipped if any of the `messages` is contained in the message of the error
raised, and `error_type` matches the error raised.
Yields:
Nothing.
"""
if messages:
messages = nest.flatten(messages)
try:
yield
except error_type as e:
if not messages or any(message in str(e) for message in messages):
test_obj.skipTest("Skipping error: {}: {}".format(type(e), str(e)))
else:
raise
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def enable_output_all_intermediates(fn):
"""Force-enable outputing all intermediates from functional control flow ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
output_all_intermediates_old = \
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = \
output_all_intermediates_old
return wrapper
def assert_no_new_pyobjects_executing_eagerly(func=None, warmup_iters=2):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
Args:
func: The function to test.
warmup_iters: The numer of warmup iterations, excluded from measuring.
Returns:
The wrapped function performing the test.
"""
def wrap_f(f):
def decorator(self, *args, **kwargs):
"""Warms up, gets object counts, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various
# versions of python2.7.x.
for _ in range(warmup_iters):
f(self, *args, **kwargs)
# Since we aren't in the normal test lifecylce, we need to manually run
# cleanups to clear out their object references.
self.doCleanups()
# Some objects are newly created by _get_object_count_by_type(). So
# create and save as a dummy variable to include it as a baseline.
obj_count_by_type = _get_object_count_by_type()
gc.collect()
# unittest.doCleanups adds to self._outcome with each unwound call.
# These objects are retained across gc collections so we exclude them
# from the object count calculation.
obj_count_by_type = _get_object_count_by_type(
exclude=gc.get_referents(self._outcome.errors,
self._outcome.skipped))
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, *args, **kwargs)
# Since we aren't in the normal test lifecylce, we need to manually run
# cleanups to clear out their object references.
self.doCleanups()
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
obj_count_by_type = (
_get_object_count_by_type(
exclude=gc.get_referents(self._outcome.errors,
self._outcome.skipped)) -
obj_count_by_type)
# In some cases (specifically on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert not obj_count_by_type, (
"The following objects were newly created: %s" %
str(obj_count_by_type))
gc.enable()
return decorator
if func is None:
return wrap_f
else:
return wrap_f(func)
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except (ReferenceError, AttributeError):
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, denylist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(denylist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in denylist:
if b is obj:
return "<test code>"
if obj is denylist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, denylist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
denylist: same as denylist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, denylist):
return "{}{}".format(get_ignore_reason(obj, denylist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, denylist):
"""Builds a reference graph as <referrer> -> <list of referents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
denylist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
denylist = denylist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, denylist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, denylist)
reprs[r_id] = describe(r, denylist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
sort_by_key = lambda k: k[0]
combinations = []
for key, values in sorted(kwargs.items(), key=sort_by_key):
if not isinstance(values, list):
values = [values]
combinations.append([(key, value) for value in values])
return [OrderedDict(result) for result in itertools.product(*combinations)]
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_async_and_sync_mode(f):
"""Execute the test in async mode and sync mode."""
@parameterized.named_parameters([("Async", True), ("", False)])
@functools.wraps(f)
def decorator(self, async_mode, *args, **kwargs):
if async_mode:
with context.execution_mode(context.ASYNC):
f(self, *args, **kwargs)
else:
with context.execution_mode(context.SYNC):
f(self, *args, **kwargs)
return decorator
def eager_lazy_remote_copy_on_and_off(f):
"""Execute the test method w/o lazy tensor copy for function remote inputs."""
@parameterized.named_parameters([("WithLazyRemoteCopy", True), ("", False)])
@functools.wraps(f)
def decorator(self, lazily_remote_copy, *args, **kwargs):
if lazily_remote_copy:
context.context().lazy_remote_inputs_copy = True
else:
context.context().lazy_remote_inputs_copy = False
f(self, *args, **kwargs)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return tf_decorator.make_decorator(f, decorated)
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.inside_function():
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if context.executing_eagerly():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_all_in_deprecated_graph_mode_only(cls):
"""Execute all tests in a class in graph mode."""
base_decorator = deprecated_graph_mode_only
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def with_forward_compatibility_horizons(*horizons):
"""Executes the decorated test with the specified forward-compat horizons.
Args:
*horizons: A list of (year, month, day) tuples. If the list includes
`None`, then the test will also be run with no forward-compatibility
horizon set.
Returns:
A decorator that will execute the test with the specified horizons.
"""
if not horizons:
raise ValueError("Expected at least one horizon.")
for horizon in horizons:
if not ((horizon is None) or
(len(horizon) == 3 and all(isinstance(x, int) for x in horizon))):
raise ValueError("Bad horizon value: %r" % horizon)
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`with_forward_compatibility_horizons` only "
"supports test methods.")
def decorated(self, *args, **kwargs):
for horizon in horizons:
if horizon is None:
f(self, *args, **kwargs)
else:
(year, month, day) = horizon
with forward_compatibility_horizon(year, month, day):
f(self, *args, **kwargs)
return decorated
return decorator
@deprecation.deprecated(None,
"Use `tf.config.list_physical_devices('GPU')` instead.")
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
For example,
>>> gpu_available = tf.test.is_gpu_available()
>>> is_cuda_gpu_available = tf.test.is_gpu_available(cuda_only=True)
>>> is_cuda_gpu_min_3 = tf.test.is_gpu_available(True, (3,0))
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
# This was needed earlier when we had support for SYCL in TensorFlow.
del cuda_only
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
gpu_info = gpu_util.compute_capability_from_device_desc(local_device)
cc = gpu_info.compute_capability or (0, 0)
if not min_cuda_compute_capability or cc >= min_cuda_compute_capability:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evaluate `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def disable_cudnn_autotune(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CuDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_tf_cudnn_use_autotune = os.environ.get("TF_CUDNN_USE_AUTOTUNE")
os.environ["TF_CUDNN_USE_AUTOTUNE"] = "false"
original_xla_flags = os.environ.get("XLA_FLAGS")
new_xla_flags = "--xla_gpu_autotune_level=0"
if original_xla_flags:
new_xla_flags = original_xla_flags + " " + new_xla_flags
os.environ["XLA_FLAGS"] = new_xla_flags
result = f(self, *args, **kwargs)
if (original_tf_cudnn_use_autotune is None):
del os.environ["TF_CUDNN_USE_AUTOTUNE"]
else:
os.environ["TF_CUDNN_USE_AUTOTUNE"] = original_tf_cudnn_use_autotune
if (original_xla_flags is None):
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = original_xla_flags
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tf_session.TF_GetXlaConstantFoldingDisabled()
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# Updates test function by selectively disabling it.
def _disable_test(execute_func):
def disable_test_impl(func):
def decorator(func):
def decorated(self, *args, **kwargs):
if execute_func:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_test_impl
# The description is just for documentation purposes.
def disable_xla(description): # pylint: disable=unused-argument
"""Execute the test method only if xla is not enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_mlir_bridge(description): # pylint: disable=unused-argument
"""Execute the test method only if MLIR bridge is not enabled."""
execute_func = not is_mlir_bridge_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_tfrt(unused_description):
def disable_tfrt_impl(cls_or_func):
"""Execute the test only if tfrt is not enabled."""
if tf_inspect.isclass(cls_or_func):
if tfrt_utils.enabled():
return None
else:
return cls_or_func
else:
def decorator(func):
def decorated(self, *args, **kwargs):
if tfrt_utils.enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if cls_or_func is not None:
return decorator(cls_or_func)
return decorator
return disable_tfrt_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
"""This test is not intended to be run with XLA auto jit enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tf_session.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
# The description is just for documentation purposes.
def run_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute test with TensorFloat-32 disabled.
While almost every real-world deep learning model runs fine with
TensorFloat-32, many tests use assertAllClose or similar methods.
TensorFloat-32 matmuls typically will cause such methods to fail with the
default tolerances.
Args:
description: A description used for documentation purposes, describing why
the test requires TensorFloat-32 to be disabled.
Returns:
Decorator which runs a test with TensorFloat-32 disabled.
"""
def decorator(f):
@functools.wraps(f)
def decorated(self, *args, **kwargs):
allowed = config.tensor_float_32_execution_enabled()
try:
config.enable_tensor_float_32_execution(False)
f(self, *args, **kwargs)
finally:
config.enable_tensor_float_32_execution(allowed)
return decorated
return decorator
# The description is just for documentation purposes.
def run_all_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute all tests in a class with TensorFloat-32 disabled."""
return for_all_test_methods(run_without_tensor_float_32, description)
def matmul_without_tf32(a, b, *args, **kwargs):
"""Run matmul but cast float32 inputs to float64 if TensorFloat-32 is enabled.
This effectively runs matmul without TensorFloat-32. It should only be used in
tests when verifying some other op or functions works correctly, e.g. to test
`tf.linalg.sqrtm` by matrix multiplying the output of the op by itself. In
such cases, the matmul itself is not being tested so it's OK to run it with
higher precision.
If a matmul itself is being tested, or some other op which uses matmul, use
`run_without_tensor_float_32` instead.
This also casts complex64 inputs to complex128, since TensorFloat-32 can also
be used with complex64
Args:
a: First input to tf.linalg.matmul
b: Second input to tf.linalg.matmul
args: Other positional arguments to tf.linalg.matmul
**kwargs: Other keyword arguments to tf.linalg.matmul
Returns:
A tensor with the same type as `a`.
"""
if config.tensor_float_32_execution_enabled() and a.dtype == "float32":
a = math_ops.cast(a, "float64")
b = math_ops.cast(b, "float64")
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
elif config.tensor_float_32_execution_enabled() and a.dtype == "complex64":
a = math_ops.cast(a, "complex128")
b = math_ops.cast(b, "complex128")
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
else:
return math_ops.matmul(a, b, *args, **kwargs)
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
pywrap_tf_session.TF_SetXlaAutoJitMode("2")
pywrap_tf_session.TF_SetXlaMinClusterSize(1)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(False)
pywrap_tf_session.TF_SetTfXlaCpuGlobalJit(True)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(True)
# Check if the mlir bridge has been explicitly enabled or disabled. If
# is_mlir_bridge_enabled() returns None, the user did not explictly enable
# or disable the bridge so do not update enable_mlir_bridge.
if is_mlir_bridge_enabled():
context.context().enable_mlir_bridge = True
elif is_mlir_bridge_enabled() is not None:
context.context().enable_mlir_bridge = False
self._threads = []
self._tempdir = None
self._cached_session = None
self._test_start_time = None
def setUp(self):
super(TensorFlowTestCase, self).setUp()
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
summary_state.writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
self._test_start_time = time.time()
def tearDown(self):
# If a subclass overrides setUp and doesn't call the parent class's setUp,
# then we may not have set the start time.
if self._test_start_time is not None:
logging.info("time(%s): %ss", self.id(),
round(time.time() - self._test_start_time, 2))
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
super(TensorFlowTestCase, self).tearDown()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message, msg=msg)
elif isinstance(expected_message_maybe_ascii, (str, bytes)):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s." %
(type(expected_message_maybe_ascii), type(message)))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
# Convert tensors and composite tensors to numpy arrays.
return nest.map_structure(lambda t: t.numpy(), tensor,
expand_composites=True)
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""A context manager for a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
``` python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest(
"Tests that have the name \"test_session\" are automatically skipped "
"by TensorFlow test fixture, as the name is reserved for creating "
"sessions within tests. Please rename your test if you have a test "
"with this name.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is tensor-like then convert it to ndarray
if tensor_util.is_tensor(a):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
# np.allclose does not always work for our custom bfloat16 extension type
# when type promotions are involved, so we first cast any bfloat16 arrays
# to float32.
a_dtype = a.dtype
a = a.astype(np.float32) if a.dtype == dtypes.bfloat16.as_numpy_dtype else a
b = b.astype(np.float32) if b.dtype == dtypes.bfloat16.as_numpy_dtype else b
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a_dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join(str(p) for p in path) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections_abc.Mapping)
if a_is_dict != isinstance(b, collections_abc.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Note: the implementation follows
[`numpy.allclose`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html)
(and numpy.testing.assert_allclose). It checks whether two arrays are
element-wise equal within a tolerance. The relative difference
(`rtol * abs(b)`) and the absolute difference `atol` are added together
to compare against the absolute difference between `a` and `b`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
except AssertionError:
return
msg = msg or ""
raise AssertionError("The two values are close at all elements. %s" % msg)
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %r. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = %r" % x)
msgs.append("not equal rhs = %r" % y)
# Handle mixed string types as a result of PY2to3 migration. That is, the
# mixing between bytes (b-prefix strings, PY2 default) and unicodes
# (u-prefix strings, PY3 default).
if six.PY3:
if (a.dtype.kind != b.dtype.kind and
{a.dtype.kind, b.dtype.kind}.issubset({"U", "S", "O"})):
a_list = []
b_list = []
# OK to flatten `a` and `b` because they are guaranteed to have the
# same shape.
for out_list, flat_arr in [(a_list, a.flat), (b_list, b.flat)]:
for item in flat_arr:
if isinstance(item, str):
out_list.append(item.encode("utf-8"))
else:
out_list.append(item)
a = np.array(a_list)
b = np.array(b_list)
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertNotAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors do not have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
try:
self.assertAllEqual(a, b)
except AssertionError:
return
raise AssertionError("The two values are equal at all elements. %s" % msg)
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
if np.ndim(value) == 0:
return [prefix + "[0] : " + str(value)]
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3+ compatibility issues
if not six.PY2:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
import portpicker # pylint: disable=g-import-not-at-top
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
@contextlib.contextmanager
def _fake_gradient_tape_context_manager():
"""tf.gradients(...) implemented as tf.GradientTape context manager interface.
This is useful to test tf.gradients() in tests that uses tf.GradientTape().
Yields:
gradient tape instance that's implemented by tf.gradients() underneath.
"""
try:
class FakeGradientTape:
def watch(self, x):
pass
def gradient(self, y, x, grad_ys=None):
result = gradients_impl.gradients(y, x, grad_ys)
# Unlike `tape.gradient()`, `tf.gradients()` returns a list for a single
# element. So unpack if needed to match `tape.gradient()` behavior.
if not isinstance(x, (list, tuple)):
assert len(result) == 1
return result[0]
return result
yield FakeGradientTape()
finally:
pass
class AbstractGradientTape:
"""Abstract GradientTape context manager that has multiple implementations.
This is useful to test both tf.GradientTape() and tf.gradients() without
duplicating tests.
"""
def __init__(self, use_tape, persistent=False):
self._use_tape = use_tape
self._persistent = persistent
def __enter__(self):
if self._use_tape:
self._tape_impl = backprop.GradientTape(persistent=self._persistent)
else:
self._tape_impl = _fake_gradient_tape_context_manager()
return self._tape_impl.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self._tape_impl.__exit__(exc_type, exc_val, exc_tb)
@contextlib.contextmanager
def run_functions_eagerly(run_eagerly):
"""Runs functions eagerly if `run_eagerly` is true.
WARNING: Setting `run_eagerly` to True in tests running in V1 graph mode
*WILL NOT* make the tf.function to run eagerly because eager is disabled by
default in V1. Instead, tf.function will run as a traced graph function.
Ensures that the state (for running functions eagerly) is back to the initial
`def_function.RUN_FUNCTIONS_EAGERLY` state.
Args:
run_eagerly: Boolean determining whether to run the function eagerly or not.
Raises:
ValueError if `run_eagerly` is not a boolean.
Yields:
Nothing.
"""
if not isinstance(run_eagerly, bool):
raise ValueError(
"Expected bool for `run_eagerly` but got {}".format(run_eagerly))
is_eager = context.executing_eagerly()
if not is_eager and run_eagerly:
logging.warning(
"Running tf.function eagerly in V1 graph mode is not supported. "
"tf.function will be run as a traced graph function.")
initial_state = def_function.functions_run_eagerly()
def_function.run_functions_eagerly(run_eagerly)
try:
yield
finally:
def_function.run_functions_eagerly(initial_state)
|
{
"content_hash": "92a9c1a63026afab01c0263f1a4c9eae",
"timestamp": "",
"source": "github",
"line_count": 3475,
"max_line_length": 145,
"avg_line_length": 35.37640287769784,
"alnum_prop": 0.6554952697811003,
"repo_name": "cxxgtxy/tensorflow",
"id": "6b54f03e88282827e79e425577f1ba7295975263",
"size": "123654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/test_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "186817"
},
{
"name": "C++",
"bytes": "24882047"
},
{
"name": "CMake",
"bytes": "164374"
},
{
"name": "Go",
"bytes": "854846"
},
{
"name": "HTML",
"bytes": "564161"
},
{
"name": "Java",
"bytes": "307246"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "225621"
},
{
"name": "Python",
"bytes": "22009999"
},
{
"name": "Shell",
"bytes": "341543"
},
{
"name": "TypeScript",
"bytes": "797437"
}
],
"symlink_target": ""
}
|
"""
nc_to_na.py
=============
Holds the class NCToNA (sub-classing CDMSToNA) that converts a NetCDF file to
one or more NASA Ames files.
"""
# Imports from python standard library
import sys
import logging
# Import from nappy package
import nappy
from nappy.na_error import na_error
import nappy.utils
import nappy.utils.common_utils
import nappy.nc_interface.cdms_to_na
import nappy.nc_interface.na_content_collector
# Import external packages (if available)
if sys.platform.find("win") > -1:
raise na_error.NAPlatformError("Windows does not support CDMS. CDMS is required to convert to CDMS objects and NetCDF.")
try:
import cdms2 as cdms
import numpy
except:
try:
import cdms
import numpy
except:
raise Exception("Could not import third-party software. Nappy requires the CDMS and Numeric packages to be installed to convert to CDMS and NetCDF.")
cdms.setAutoBounds("off")
# Define global variables
DEBUG = nappy.utils.getDebug()
default_delimiter = nappy.utils.getDefault("default_delimiter")
default_float_format = nappy.utils.getDefault("default_float_format")
comment_override_rule = nappy.utils.getDefault("comment_override_rule")
add_column_headers = bool(nappy.utils.getDefault("add_column_headers"))
config_dict = nappy.utils.getConfigDict()
header_partitions = config_dict["header_partitions"]
hp = header_partitions
# Define global variables
permitted_overwrite_metadata = ("DATE", "RDATE", "ANAME", "MNAME",
"ONAME", "ORG", "SNAME", "VNAME", "SCOM", "NCOM")
items_as_lists = ["DATE", "RDATE", "ANAME", "VNAME"]
logging.basicConfig()
log = logging.getLogger(__name__)
class NCToNA(nappy.nc_interface.cdms_to_na.CDMSToNA):
"""
Converts a NetCDF file to one or more NASA Ames files.
"""
def __init__(self, nc_file, var_ids=None, na_items_to_override={},
only_return_file_names=False, exclude_vars=[],
requested_ffi=None,
):
"""
Sets up instance variables.
Typical usage is:
>>> import nappy.nc_interface.nc_to_na as nc_to_na
>>> c = nc_to_na.NCToNA("old_file.nc")
>>> c.convert()
>>> c.writeNAFiles("new_file.na", delimiter=",")
OR:
>>> c = nc_to_na.NCToNA("old_file.nc")
>>> file_names = c.constructNAFileNames()
"""
self.nc_file = nc_file
# Now need to read CDMS file so parent class methods are compatible
(cdms_variables, global_attributes) = self._readCDMSFile(var_ids, exclude_vars)
nappy.nc_interface.cdms_to_na.CDMSToNA.__init__(self, cdms_variables, global_attributes=global_attributes,
na_items_to_override=na_items_to_override,
only_return_file_names=only_return_file_names,
requested_ffi=requested_ffi)
def _readCDMSFile(self, var_ids=None, exclude_vars=[]):
"""
Reads the file and returns all the CDMS variables in a list as well
as the global attributes: (cdms_variable_list, global_atts_list)
If var_ids is defined then only get those.
"""
fin = cdms.open(self.nc_file)
cdms_variables = []
# Make sure var_ids is a list
if type(var_ids) == type("string"):
var_ids = [var_ids]
for var_id in fin.listvariables():
if var_ids == None or var_id in var_ids:
if var_id not in exclude_vars:
# Check whether singleton variable, if so create variable
vm = fin[var_id]
var = fin(var_id)
if hasattr(vm, "rank") and vm.rank() == 0:
var = cdms.createVariable(numpy.array(float(fin(var_id))), id=vm.id, attributes=vm.attributes)
cdms_variables.append(var)
globals = fin.attributes.items()
return (cdms_variables, globals)
def constructNAFileNames(self, na_file=None):
"""
Works out what the file names of the output NA files will be and
returns a list of them.
"""
self.convert()
file_names = []
# create file name if not given
if na_file == None:
base_name = self.nc_file
if base_name[-3:] == ".nc":
base_name = base_name[:-3]
na_file = base_name + ".na"
file_counter = 1
# Now, create some valid file names
for this_na_dict in self.na_dict_list:
if len(self.na_dict_list) == 1:
suffix = ""
else:
suffix = "_%s" % file_counter
# Create file name
name_parts = na_file.split(".")
new_name = (".".join(name_parts[:-1])) + suffix + "." + name_parts[-1]
file_names.append(new_name)
file_counter += 1
return file_names
def writeNAFiles(self, na_file=None, delimiter=default_delimiter, annotation=False,
float_format=default_float_format, size_limit=None, no_header=False):
"""
Writes the self.na_dict_list content to one or more NASA Ames files.
Output file names are based on the self.nc_file name unless specified
in the na_file_name argument in which case that provides the main name
that is appended to if multiple output file names are required.
TODO: no_header is NOT implemented.
"""
self.convert() # just in case not already called
# Gets a list of NA file_names that will be produced.
file_names = self.constructNAFileNames(na_file)
# Set up some counters: file_counter is the expected number of files.
# full_file_counter includes any files that have been split across multiple output NA files
# because size_limit exceeded.
file_counter = 1
full_file_counter = 1
file_list = []
# Get any NASA Ames dictionary values that should be overwritten with local values
local_attributes = nappy.utils.getLocalAttributesConfigDict()
local_na_atts = local_attributes["na_attributes"]
# define final override list by using defaults then locally provided changes
overriders = local_na_atts
for (okey, ovalue) in self.na_items_to_override.items():
overriders[okey] = ovalue
# Now loop through writing the outputs
for na_dict_and_var_ids in self.na_dict_list:
file_name = file_names[file_counter - 1]
msg = "\nWriting output NASA Ames file: %s" % file_name
if DEBUG: log.debug(msg)
self.output_message.append(msg)
# Set up current na dict
(this_na_dict, vars_to_write) = na_dict_and_var_ids
# Override content of NASA Ames if they are permitted
for key in overriders.keys():
if key in permitted_overwrite_metadata:
if key in items_as_lists:
new_item = overriders[key].split()
if key in ("DATE", "RDATE"):
new_item = [int(list_item) for list_item in new_item]
else:
new_item = overriders[key]
# Do specific overwrite for comments by inserting lines at start
if key in ("SCOM", "NCOM"):
# Use rule defined in config file in terms of where to put new comments
if comment_override_rule == "replace":
comments_list = new_item[:]
elif comment_override_rule in ("insert", "extend"):
new_comments = new_item[:]
existing_comments = this_na_dict.get(key, [])
comments_list = self._cleanWrapComments(existing_comments, new_comments, key, comment_override_rule)
else:
raise Exception("Did not recognise comment_override_rule: " + str(comment_override_rule))
this_na_dict[key] = comments_list
this_na_dict["N%sL" % key] = len(comments_list)
elif not this_na_dict.has_key(key) or new_item != this_na_dict[key]:
this_na_dict[key] = new_item
msg = "Metadata overwritten in output file: '%s' is now '%s'" % (key, this_na_dict[key])
if DEBUG: log.debug(msg)
self.output_message.append(msg)
# For certain FFIs create final Normal comments as a list of column headers before data section
if add_column_headers == True:
self._updateWithColumnHeaders(this_na_dict, delimiter)
# Cope with size limits if specified and FFI is 1001
# Seems to be writing different chunks of a too long array to different na_dicts to then write to separate files.
if size_limit is not None and (this_na_dict["FFI"] == 1001 and len(this_na_dict["V"][0]) > size_limit):
files_written = self._writeNAFileSubsetsWithinSizeLimit(this_na_dict, file_name, delimiter=delimiter,
float_format=float_format, size_limit=size_limit,
annotation=annotation)
file_list.extend(files_written)
# If not having to split file into multiple outputs (normal condition)
else:
log.info("Output NA file name: %s" % file_name)
x = nappy.openNAFile(file_name, 'w', this_na_dict)
x.write(delimiter=delimiter, float_format=float_format, annotation=annotation)
x.close()
file_list.append(file_name)
# Report on what has been written
msg = "\nWrote the following variables:" + "\n " + ("\n ".join(vars_to_write[0]))
if DEBUG: log.debug(msg)
self.output_message.append(msg)
msg = ""
aux_var_count = vars_to_write[1]
if len(aux_var_count) > 0:
msg = "\nWrote the following auxiliary variables:" + "\n " + ("\n ".join(aux_var_count))
singleton_var_count = vars_to_write[2]
if len(singleton_var_count) > 0:
msg = "\nWrote the following Singleton variables:" + "\n " + ("\n ".join(singleton_var_count))
if len(file_list) > 0:
msg = msg + ("\n\nNASA Ames file(s) written successfully: \n%s" % "\n".join(file_list))
full_file_counter += len(file_list)
file_counter += 1
if DEBUG: log.debug(msg)
self.output_message.append(msg)
full_file_count = full_file_counter - 1
if full_file_count == 1:
plural = ""
else:
plural = "s"
msg = "\n%s file%s written." % (full_file_count, plural)
if DEBUG: log.debug(msg)
self.output_message.append(msg)
self.output_files_written = file_list
return self.output_message
def _writeNAFileSubsetsWithinSizeLimit(self, this_na_dict, file_name, delimiter,
float_format, size_limit, annotation):
"""
If self.size_limit is specified and FFI is 1001 we can chunk the output into
different files in a NASA Ames compliant way.
Returns list of file names of outputs written.
"""
file_names = []
var_list = this_na_dict["V"]
array_length = len(var_list[0])
nvol_info = divmod(array_length, size_limit)
nvol = nvol_info[0]
# create the number of volumes (files) that need to be written.
if nvol_info[1] > 0: nvol = nvol + 1
start = 0
letter_count = 0
ivol = 0
# Loop through until full array length has been written to a set of files.
while start < array_length:
ivol = ivol + 1
end = start + size_limit
if end > array_length:
end = array_length
current_block = []
# Write new V array
for v in var_list:
current_block.append(v[start:end])
# Adjust X accordingly in the na dictionary, because independent variable has been reduced in size
na_dict_copy = nappy.utils.common_utils.modifyNADictCopy(this_na_dict, current_block,
start, end, ivol, nvol)
# Append a letter to the file name for writing this block to
file_name_plus_letter = "%s-%.3d.na" % (file_name[:-3], ivol)
file_list.append(file_name_plus_letter)
# Write data to output file
x = nappy.openNAFile(file_name_plus_letter, 'w', na_dict_copy)
x.write(delimiter=delimiter, float_format=float_format, annotation=annotation)
x.close()
msg = "\nOutput files split on size limit: %s\nFilename used: %s" % (size_limit, file_name_plus_letter)
if DEBUG: log.debug(msg)
self.output_message.append(msg)
letter_count = letter_count + 1
start = end
file_names.append(file_name_plus_letter)
return file_names
def _updateWithColumnHeaders(self, na_dict, delimiter):
"""
Updates the NCOM and NCOML parts of the na_dict so that
the last normal comments line is in fact a set of headers
for the data section. E.g.:
UTs Spd Direc
30446.9 305 2592
20447.9 204 2596
The 'delimiter' argument is used to separate out the arguments.
This option is only compatible with a limited range of FFIs and
only works if there are no Auxiliary variables defined.
"""
ffi = na_dict["FFI"]
compatible_ffis = (1001, 1010, 2110)
if ffi not in compatible_ffis or na_dict["NAUXV"] > 0:
log.debug("Column Headers are not written for FFIs other than: %s" % str(compatible_ffis))
return
if ffi in (1001, 2110):
col_names = [na_dict["XNAME"][0]]
elif ffi in (1010,):
col_names = []
col_names.extend(na_dict["VNAME"])
col_names_line = ",".join(col_names)
na_dict["NCOM"].append(col_names_line)
na_dict["NNCOML"] = len(na_dict["NCOM"])
return
def _cleanWrapComments(self, existing_comments, new_comments, key, comment_override_rule):
"""
Combines new_comments with existing_comments where comments are
either Special or Normal. 'key' defines this being defined as either
"SCOM" or "NCOM". 'comment_override_rule' is either "insert" (new_comments first)
or "extend" (existing_comments first).
Returns a new list of combined_comments.
"""
if existing_comments == []: return new_comments
if new_comments == []: return existing_comments
# Strip start header if used
c1 = key[0].lower()
start_line = hp[c1 + "c_start"]
start_used = False
if existing_comments[0] == start_line:
existing_comments = existing_comments[1:]
start_used = start_line
# Now check last line
end_line = hp[c1 + "c_end"]
end_used = False
if existing_comments[-1] == end_line:
existing_comments = existing_comments[:-1]
end_used = end_line
# Check for alternative last line in NCOM
if end_used == False and key == "NCOM":
end_line2 = hp["data_next"]
if existing_comments[-1] == end_line2:
existing_comments = existing_comments[:-1]
end_used = end_line2
# Now put back together
ordered_comments = [existing_comments, new_comments]
if comment_override_rule == "insert":
ordered_comments.reverse()
combined_comments = ordered_comments[0] + ordered_comments[1]
if start_used:
combined_comments.insert(0, start_used)
if end_used:
combined_comments.append(end_used)
return combined_comments
|
{
"content_hash": "7d57a3a765a4d88252dfa6462cf12a9e",
"timestamp": "",
"source": "github",
"line_count": 417,
"max_line_length": 157,
"avg_line_length": 40.80815347721823,
"alnum_prop": 0.5493917846859023,
"repo_name": "eufarn7sp/egads-eufar",
"id": "5c1521cbed27884fc9ee1a9bc13cffb3d6673880",
"size": "17255",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "egads/thirdparty/nappy/nc_interface/nc_to_na.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "309547"
}
],
"symlink_target": ""
}
|
"""Run the first page of every benchmark that has a composable measurement.
Ideally this test would be comprehensive, but the above serves as a
kind of smoke test.
"""
import os
import unittest
from telemetry import test
from telemetry.core import discover
from telemetry.page import page_measurement
from telemetry.unittest import gtest_testrunner
from telemetry.unittest import options_for_unittests
def SmokeTestGenerator(benchmark):
# In general you should @test.Disabled individual benchmarks that fail,
# instead of this entire smoke test suite.
# TODO(achuith): Multiple tests failing on CrOS. crbug.com/351114
@test.Disabled('chromeos')
def BenchmarkSmokeTest(self):
# Only measure a single page so that this test cycles reasonably quickly.
benchmark.options['pageset_repeat'] = 1
benchmark.options['page_repeat'] = 1
class SinglePageBenchmark(benchmark): # pylint: disable=W0232
def CreatePageSet(self, options):
# pylint: disable=E1002
ps = super(SinglePageBenchmark, self).CreatePageSet(options)
ps.pages = ps.pages[:1]
return ps
# Set the benchmark's default arguments.
options = options_for_unittests.GetCopy()
options.output_format = 'none'
parser = options.CreateParser()
benchmark.AddCommandLineArgs(parser)
test.AddCommandLineArgs(parser)
benchmark.SetArgumentDefaults(parser)
options.MergeDefaultValues(parser.get_default_values())
benchmark.ProcessCommandLineArgs(None, options)
test.ProcessCommandLineArgs(None, options)
self.assertEqual(0, SinglePageBenchmark().Run(options),
msg='Failed: %s' % benchmark)
return BenchmarkSmokeTest
def load_tests(_, _2, _3):
suite = gtest_testrunner.GTestTestSuite()
benchmarks_dir = os.path.dirname(__file__)
top_level_dir = os.path.dirname(benchmarks_dir)
measurements_dir = os.path.join(top_level_dir, 'measurements')
all_measurements = discover.DiscoverClasses(
measurements_dir, top_level_dir, page_measurement.PageMeasurement,
pattern='*.py').values()
all_benchmarks = discover.DiscoverClasses(
benchmarks_dir, top_level_dir, test.Test, pattern='*.py').values()
for benchmark in all_benchmarks:
if benchmark.PageTestClass() not in all_measurements:
# If the benchmark is not in measurements, then it is not composable.
# Ideally we'd like to test these as well, but the non-composable
# benchmarks are usually long-running benchmarks.
continue
# TODO(tonyg): Smoke doesn't work with session_restore yet.
if benchmark.Name().startswith('session_restore'):
continue
if hasattr(benchmark, 'generated_profile_archive'):
# We'd like to test these, but don't know how yet.
continue
class BenchmarkSmokeTest(unittest.TestCase):
pass
setattr(BenchmarkSmokeTest, benchmark.Name(), SmokeTestGenerator(benchmark))
suite.addTest(BenchmarkSmokeTest(benchmark.Name()))
return suite
|
{
"content_hash": "2824985326758de775e8ceed0a45bb0a",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 80,
"avg_line_length": 34.77906976744186,
"alnum_prop": 0.7265128719491809,
"repo_name": "AndroidOpenDevelopment/android_external_chromium_org",
"id": "32755c4ad3a37cdbff447e6f53cd0afdc5812be5",
"size": "3154",
"binary": false,
"copies": "8",
"ref": "refs/heads/lp",
"path": "tools/perf/benchmarks/benchmark_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Assembly",
"bytes": "24741"
},
{
"name": "C",
"bytes": "3938821"
},
{
"name": "C++",
"bytes": "200378039"
},
{
"name": "CSS",
"bytes": "942505"
},
{
"name": "Java",
"bytes": "5192594"
},
{
"name": "JavaScript",
"bytes": "11002659"
},
{
"name": "Makefile",
"bytes": "20865646"
},
{
"name": "Objective-C",
"bytes": "1198443"
},
{
"name": "Objective-C++",
"bytes": "7082902"
},
{
"name": "PHP",
"bytes": "61320"
},
{
"name": "Perl",
"bytes": "69392"
},
{
"name": "Python",
"bytes": "6310657"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "470717"
},
{
"name": "Standard ML",
"bytes": "1589"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
}
|
from nova.api.openstack.compute.schemas import availability_zone as schema
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import availability_zones
from nova import compute
import nova.conf
from nova.policies import availability_zone as az_policies
from nova import servicegroup
CONF = nova.conf.CONF
ATTRIBUTE_NAME = "availability_zone"
class AvailabilityZoneController(wsgi.Controller):
"""The Availability Zone API controller for the OpenStack API."""
def __init__(self):
super(AvailabilityZoneController, self).__init__()
self.servicegroup_api = servicegroup.API()
self.host_api = compute.HostAPI()
def _get_filtered_availability_zones(self, zones, is_available):
result = []
for zone in zones:
# Hide internal_service_availability_zone
if zone == CONF.internal_service_availability_zone:
continue
result.append({'zoneName': zone,
'zoneState': {'available': is_available},
"hosts": None})
return result
def _describe_availability_zones(self, context, **kwargs):
ctxt = context.elevated()
available_zones, not_available_zones = \
availability_zones.get_availability_zones(ctxt)
filtered_available_zones = \
self._get_filtered_availability_zones(available_zones, True)
filtered_not_available_zones = \
self._get_filtered_availability_zones(not_available_zones, False)
return {'availabilityZoneInfo': filtered_available_zones +
filtered_not_available_zones}
def _describe_availability_zones_verbose(self, context, **kwargs):
ctxt = context.elevated()
available_zones, not_available_zones = \
availability_zones.get_availability_zones(ctxt)
# Available services
enabled_services = self.host_api.service_get_all(
context, {'disabled': False}, set_zones=True, all_cells=True)
zone_hosts = {}
host_services = {}
api_services = ('nova-osapi_compute', 'nova-ec2', 'nova-metadata')
for service in enabled_services:
if service.binary in api_services:
# Skip API services in the listing since they are not
# maintained in the same way as other services
continue
zone_hosts.setdefault(service['availability_zone'], [])
if service['host'] not in zone_hosts[service['availability_zone']]:
zone_hosts[service['availability_zone']].append(
service['host'])
host_services.setdefault(service['availability_zone'] +
service['host'], [])
host_services[service['availability_zone'] + service['host']].\
append(service)
result = []
for zone in available_zones:
hosts = {}
for host in zone_hosts.get(zone, []):
hosts[host] = {}
for service in host_services[zone + host]:
alive = self.servicegroup_api.service_is_up(service)
hosts[host][service['binary']] = {'available': alive,
'active': True != service['disabled'],
'updated_at': service['updated_at']}
result.append({'zoneName': zone,
'zoneState': {'available': True},
"hosts": hosts})
for zone in not_available_zones:
result.append({'zoneName': zone,
'zoneState': {'available': False},
"hosts": None})
return {'availabilityZoneInfo': result}
@extensions.expected_errors(())
def index(self, req):
"""Returns a summary list of availability zone."""
context = req.environ['nova.context']
context.can(az_policies.POLICY_ROOT % 'list')
return self._describe_availability_zones(context)
@extensions.expected_errors(())
def detail(self, req):
"""Returns a detailed list of availability zone."""
context = req.environ['nova.context']
context.can(az_policies.POLICY_ROOT % 'detail')
return self._describe_availability_zones_verbose(context)
# NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
# parameter as this is placed to handle scheduler_hint extension for V2.1.
def server_create(server_dict, create_kwargs, body_deprecated_param):
# NOTE(alex_xu): For v2.1 compat mode, we strip the spaces when create
# availability_zone. But we don't strip at here for backward-compatible
# with some users already created availability_zone with
# leading/trailing spaces with legacy v2 API.
create_kwargs['availability_zone'] = server_dict.get(ATTRIBUTE_NAME)
def get_server_create_schema(version):
if version == "2.0":
return schema.server_create_v20
return schema.server_create
|
{
"content_hash": "c9033a2fe7a2d6d88376d406da605482",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 79,
"avg_line_length": 41.778688524590166,
"alnum_prop": 0.6080047086521483,
"repo_name": "Juniper/nova",
"id": "33e61d3fb114533c0cde5f50b69ebdb982d0fe07",
"size": "5710",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/availability_zone.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "23962"
},
{
"name": "Python",
"bytes": "19816434"
},
{
"name": "Shell",
"bytes": "27717"
},
{
"name": "Smarty",
"bytes": "339635"
}
],
"symlink_target": ""
}
|
import os
import re
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
versionfile = open(os.path.join(here, "nodular", "_version.py")).read()
mo = re.search(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]", versionfile, re.M)
if mo:
version = mo.group(1)
else:
raise RuntimeError("Unable to find version string in nodular/_version.py.")
requires = [
'six',
'simplejson',
'Flask-SQLAlchemy',
'SQLAlchemy>=1.0',
'Flask',
'coaster>=0.6.dev0',
]
dependency_links = [
'https://github.com/hasgeek/coaster/archive/master.zip#egg=coaster-0.6.dev0'
]
setup(
name='nodular',
version=version,
description='Revisioned content objects',
long_description=README + '\n\n' + CHANGES,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Topic :: Software Development :: Libraries',
],
author='Kiran Jonnalagadda',
author_email='kiran@hasgeek.com',
url='https://github.com/hasgeek/nodular',
keywords='nodular',
packages=['nodular'],
include_package_data=True,
zip_safe=True,
test_suite='tests',
install_requires=requires,
dependency_links=dependency_links,
)
|
{
"content_hash": "a6fdb0b549b7ed2df9b9811471d2b9d2",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 80,
"avg_line_length": 28.90909090909091,
"alnum_prop": 0.6232704402515723,
"repo_name": "hasgeek/nodular",
"id": "39c75f3ee5fbcb9e34dcc9cea48e92ae5bd19af8",
"size": "1590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "98892"
},
{
"name": "Shell",
"bytes": "99"
}
],
"symlink_target": ""
}
|
"""
test_build
~~~~~~~~~~
Test all builders that have no special checks.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from util import with_app, test_root, path
from textwrap import dedent
def teardown_module():
(test_root / '_build').rmtree(True)
# just let the remaining ones run for now
@with_app(buildername='pickle')
def test_pickle(app):
app.builder.build_all()
@with_app(buildername='json')
def test_json(app):
app.builder.build_all()
@with_app(buildername='linkcheck')
def test_linkcheck(app):
app.builder.build_all()
@with_app(buildername='text')
def test_text(app):
app.builder.build_all()
@with_app(buildername='htmlhelp')
def test_htmlhelp(app):
app.builder.build_all()
@with_app(buildername='qthelp')
def test_qthelp(app):
app.builder.build_all()
@with_app(buildername='epub')
def test_epub(app):
app.builder.build_all()
@with_app(buildername='changes')
def test_changes(app):
app.builder.build_all()
try:
from docutils.writers.manpage import Writer
except ImportError:
pass
else:
@with_app(buildername='man')
def test_man(app):
app.builder.build_all()
assert (app.outdir / 'SphinxTests.1').exists()
@with_app(buildername='singlehtml', cleanenv=True)
def test_singlehtml(app):
app.builder.build_all()
@with_app(buildername='xml')
def test_xml(app):
app.builder.build_all()
@with_app(buildername='pseudoxml')
def test_pseudoxml(app):
app.builder.build_all()
@with_app(buildername='html', srcdir='(temp)')
def test_multibyte_path(app):
srcdir = path(app.srcdir)
mb_name = u'\u65e5\u672c\u8a9e'
(srcdir / mb_name).makedirs()
(srcdir / mb_name / (mb_name + '.txt')).write_text(dedent("""
multi byte file name page
==========================
"""))
master_doc = srcdir / 'contents.txt'
master_doc.write_bytes((master_doc.text() + dedent("""
.. toctree::
%(mb_name)s/%(mb_name)s
""" % locals())
).encode('utf-8'))
app.builder.build_all()
|
{
"content_hash": "127478cf8161045fe1f38f3cafd29c5b",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 68,
"avg_line_length": 23.34065934065934,
"alnum_prop": 0.6393596986817326,
"repo_name": "kiwicopple/MyMDb",
"id": "b172184b1d29a144cb235a5a3c87e8a4687ea43f",
"size": "2148",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "venv/build/sphinx/tests/test_build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "433713"
},
{
"name": "C++",
"bytes": "21783"
},
{
"name": "CSS",
"bytes": "88676"
},
{
"name": "JavaScript",
"bytes": "192343"
},
{
"name": "Makefile",
"bytes": "8470"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "10594687"
},
{
"name": "Shell",
"bytes": "885"
},
{
"name": "TeX",
"bytes": "112147"
}
],
"symlink_target": ""
}
|
"""
Custom controler
"""
|
{
"content_hash": "3db678cf1f08a7526674034a2dfc400f",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 16,
"avg_line_length": 8,
"alnum_prop": 0.625,
"repo_name": "mpeuster/estate",
"id": "b3dc53b7d03d3cd2e1d31986b0fd7c18391cfa31",
"size": "24",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "experiments/scale-down-hack/pox/pox/upb/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1905"
},
{
"name": "C",
"bytes": "3514"
},
{
"name": "C++",
"bytes": "126792"
},
{
"name": "HTML",
"bytes": "4995"
},
{
"name": "JavaScript",
"bytes": "45240"
},
{
"name": "Makefile",
"bytes": "6607"
},
{
"name": "Python",
"bytes": "6728980"
},
{
"name": "Ruby",
"bytes": "49238"
},
{
"name": "Shell",
"bytes": "96661"
}
],
"symlink_target": ""
}
|
import re
import sys
def ReorderHeaders(path):
with open(path, 'r') as f:
source = f.read()
all_lines = iter(source.split('\n'))
before_includes_lines = []
includes_lines = []
after_includes_lines = []
# Collect all the lines prior to the first #include in before_includes_lines.
try:
while True:
line = all_lines.next()
if line.startswith('#include'):
includes_lines.append(line)
break
before_includes_lines.append(line)
except StopIteration:
pass
# Collect all the #include and whitespace lines in includes_lines.
try:
while True:
line = all_lines.next()
if not line:
continue
if not line.startswith('#include'):
after_includes_lines.append(line)
break
includes_lines.append(line)
except StopIteration:
pass
# Collect the remaining lines in after_includes_lines.
after_includes_lines += list(all_lines)
# Filter for includes that finds the #include of the header file associated with the source file
# being processed. E.g. if 'path' is source/common/common/hex.cc, this filter matches
# "common/common/hex.h".
def file_header_filter():
return lambda f: f.endswith('.h"') and path.endswith(f[1:-3] + '.cc')
def regex_filter(regex):
return lambda f: re.match(regex, f)
# Filters that define the #include blocks
block_filters = [
file_header_filter(),
regex_filter('<.*\.h>'),
regex_filter('<.*>'),
regex_filter('"envoy/.*"'),
regex_filter('"common/.*"'),
regex_filter('"source/common/.*"'),
regex_filter('"exe/.*"'),
regex_filter('"server/.*"'),
regex_filter('"test/.*"'),
]
blocks = []
already_included = set([])
for b in block_filters:
block = []
for line in includes_lines:
header = line[len('#include '):]
if line not in already_included and b(header):
block.append(line)
already_included.add(line)
if len(block) > 0:
blocks.append(block)
# Anything not covered by block_filters gets its own block.
misc_headers = list(set(includes_lines).difference(already_included))
if len(misc_headers) > 0:
blocks.append(misc_headers)
reordered_includes_lines = '\n\n'.join(
['\n'.join(sorted(block)) for block in blocks])
if reordered_includes_lines:
reordered_includes_lines += '\n'
return '\n'.join(
filter(lambda x: x, [
'\n'.join(before_includes_lines),
reordered_includes_lines,
'\n'.join(after_includes_lines),
]))
if __name__ == '__main__':
if len(sys.argv) == 2:
sys.stdout.write(ReorderHeaders(sys.argv[1]))
sys.exit(0)
elif len(sys.argv) == 3 and sys.argv[1] == '--rewrite':
path = sys.argv[2]
reorderd_source = ReorderHeaders(path)
with open(path, 'w') as f:
f.write(reorderd_source)
sys.exit(0)
print 'Usage: %s [--rewrite] <source file path>' % sys.argv[0]
sys.exit(1)
|
{
"content_hash": "05c886e4ee5166abf86e3bf7a527f219",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 98,
"avg_line_length": 28.152380952380952,
"alnum_prop": 0.6187415426251691,
"repo_name": "craffert0/envoy",
"id": "148f4ca25bac95aed131b4270998fbbe5609c682",
"size": "3628",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/header_order.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "577"
},
{
"name": "C++",
"bytes": "6411224"
},
{
"name": "Emacs Lisp",
"bytes": "966"
},
{
"name": "Makefile",
"bytes": "2758"
},
{
"name": "Python",
"bytes": "409310"
},
{
"name": "Shell",
"bytes": "74618"
}
],
"symlink_target": ""
}
|
from office365.entity_collection import EntityCollection
from office365.onenote.entity_hierarchy_model import OnenoteEntityHierarchyModel
from office365.onenote.operations.onenote import OnenoteOperation
from office365.onenote.pages.page_links import PageLinks
from office365.runtime.queries.service_operation import ServiceOperationQuery
from office365.runtime.paths.resource_path import ResourcePath
class OnenoteSection(OnenoteEntityHierarchyModel):
"""A section in a OneNote notebook. Sections can contain pages."""
def copy_to_section_group(self, group_id, _id, rename_as=None, site_collection_id=None, site_id=None):
"""For Copy operations, you follow an asynchronous calling pattern: First call the Copy action,
and then poll the operation endpoint for the result.
:param str group_id: The id of the group to copy to. Use only when copying to a Microsoft 365 group.
:param str _id: Required. The id of the destination section group.
:param str rename_as: The name of the copy. Defaults to the name of the existing item.
:param str site_collection_id:
:param str site_id:
"""
return_type = OnenoteOperation(self.context)
payload = {
"groupId": group_id,
"id": _id,
"renameAs": rename_as,
"siteCollectionId": site_collection_id,
"siteId": site_id
}
qry = ServiceOperationQuery(self, "copyToSectionGroup", None, payload, None, return_type)
self.context.add_query(qry)
return return_type
@property
def is_default(self):
"""Indicates whether this is the user's default section. Read-only."""
return self.properties.get("isDefault", None)
@property
def links(self):
"""Links for opening the section. The oneNoteClientURL link opens the section in the OneNote native client
if it's installed. The oneNoteWebURL link opens the section in OneNote on the web.
"""
return self.properties.get("links", PageLinks())
@property
def pages(self):
"""
The collection of pages in the section. Read-only. Nullable.
:rtype: EntityCollection
"""
from office365.onenote.pages.page import OnenotePage
return self.get_property('pages',
EntityCollection(self.context, OnenotePage, ResourcePath("pages", self.resource_path)))
@property
def parent_notebook(self):
"""The notebook that contains the page. Read-only.
:rtype: Notebook
"""
from office365.onenote.notebooks.notebook import Notebook
return self.get_property('parentNotebook',
Notebook(self.context, ResourcePath("parentNotebook", self.resource_path)))
@property
def parent_section_group(self):
"""The section group that contains the section. Read-only.
:rtype: SectionGroup
"""
from office365.onenote.sectiongroups.section_group import SectionGroup
return self.get_property('parentSectionGroup',
SectionGroup(self.context, ResourcePath("parentSectionGroup", self.resource_path)))
def get_property(self, name, default_value=None):
if default_value is None:
property_mapping = {
"parentNotebook": self.parent_notebook,
"parentSectionGroup": self.parent_section_group
}
default_value = property_mapping.get(name, None)
return super(OnenoteSection, self).get_property(name, default_value)
|
{
"content_hash": "6071bc2aaed4b21638b713918de06f30",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 120,
"avg_line_length": 42.61176470588235,
"alnum_prop": 0.6612368856985091,
"repo_name": "vgrem/Office365-REST-Python-Client",
"id": "8f9a5705fe56c9f9bdbc7efd21f35869ec03745b",
"size": "3622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "office365/onenote/sections/section.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1659292"
}
],
"symlink_target": ""
}
|
import doctest
import pickle
import warnings
from helpers import unittest, LuigiTestCase, with_config
from datetime import datetime, timedelta
import luigi
import luigi.task
import luigi.util
import collections
from luigi.task_register import load_task
class DummyTask(luigi.Task):
param = luigi.Parameter()
bool_param = luigi.BoolParameter()
int_param = luigi.IntParameter()
float_param = luigi.FloatParameter()
date_param = luigi.DateParameter()
datehour_param = luigi.DateHourParameter()
timedelta_param = luigi.TimeDeltaParameter()
insignificant_param = luigi.Parameter(significant=False)
DUMMY_TASK_OK_PARAMS = dict(
param='test',
bool_param=True,
int_param=666,
float_param=123.456,
date_param=datetime(2014, 9, 13).date(),
datehour_param=datetime(2014, 9, 13, 9),
timedelta_param=timedelta(44), # doesn't support seconds
insignificant_param='test')
class DefaultInsignificantParamTask(luigi.Task):
insignificant_param = luigi.Parameter(significant=False, default='value')
necessary_param = luigi.Parameter(significant=False)
class TaskTest(unittest.TestCase):
def test_tasks_doctest(self):
doctest.testmod(luigi.task)
def test_task_to_str_to_task(self):
original = DummyTask(**DUMMY_TASK_OK_PARAMS)
other = DummyTask.from_str_params(original.to_str_params())
self.assertEqual(original, other)
def test_task_from_str_insignificant(self):
params = {'necessary_param': 'needed'}
original = DefaultInsignificantParamTask(**params)
other = DefaultInsignificantParamTask.from_str_params(params)
self.assertEqual(original, other)
def test_task_missing_necessary_param(self):
with self.assertRaises(luigi.parameter.MissingParameterException):
DefaultInsignificantParamTask.from_str_params({})
def test_external_tasks_loadable(self):
task = load_task("luigi", "ExternalTask", {})
self.assertTrue(isinstance(task, luigi.ExternalTask))
def test_getpaths(self):
class RequiredTask(luigi.Task):
def output(self):
return luigi.LocalTarget("/path/to/target/file")
t = RequiredTask()
reqs = {}
reqs["bare"] = t
reqs["dict"] = {"key": t}
reqs["OrderedDict"] = collections.OrderedDict([("key", t)])
reqs["list"] = [t]
reqs["tuple"] = (t,)
reqs["generator"] = (t for _ in range(10))
struct = luigi.task.getpaths(reqs)
self.assertIsInstance(struct, dict)
self.assertIsInstance(struct["bare"], luigi.Target)
self.assertIsInstance(struct["dict"], dict)
self.assertIsInstance(struct["OrderedDict"], collections.OrderedDict)
self.assertIsInstance(struct["list"], list)
self.assertIsInstance(struct["tuple"], tuple)
self.assertTrue(hasattr(struct["generator"], "__iter__"))
def test_flatten(self):
flatten = luigi.task.flatten
self.assertEqual(sorted(flatten({'a': 'foo', 'b': 'bar'})), ['bar', 'foo'])
self.assertEqual(sorted(flatten(['foo', ['bar', 'troll']])), ['bar', 'foo', 'troll'])
self.assertEqual(flatten('foo'), ['foo'])
self.assertEqual(flatten(42), [42])
self.assertEqual(flatten((len(i) for i in ["foo", "troll"])), [3, 5])
self.assertRaises(TypeError, flatten, (len(i) for i in ["foo", "troll", None]))
def test_externalized_task_picklable(self):
task = luigi.task.externalize(luigi.Task())
pickled_task = pickle.dumps(task)
self.assertEqual(task, pickle.loads(pickled_task))
def test_no_unpicklable_properties(self):
task = luigi.Task()
task.set_tracking_url = lambda tracking_url: tracking_url
task.set_status_message = lambda message: message
with task.no_unpicklable_properties():
pickle.dumps(task)
self.assertIsNotNone(task.set_tracking_url)
self.assertIsNotNone(task.set_status_message)
tracking_url = task.set_tracking_url('http://test.luigi.com/')
self.assertEqual(tracking_url, 'http://test.luigi.com/')
message = task.set_status_message('message')
self.assertEqual(message, 'message')
def test_no_warn_if_param_types_ok(self):
with warnings.catch_warnings(record=True) as w:
DummyTask(**DUMMY_TASK_OK_PARAMS)
self.assertEqual(len(w), 0, msg='No warning should be raised when correct parameter types are used')
def test_warn_on_non_str_param(self):
params = dict(**DUMMY_TASK_OK_PARAMS)
params['param'] = 42
with self.assertWarnsRegex(UserWarning, 'Parameter "param" with value "42" is not of type string.'):
DummyTask(**params)
def test_warn_on_non_timedelta_param(self):
params = dict(**DUMMY_TASK_OK_PARAMS)
class MockTimedelta:
days = 1
seconds = 1
params['timedelta_param'] = MockTimedelta()
with self.assertWarnsRegex(UserWarning, 'Parameter "timedelta_param" with value ".*" is not of type timedelta.'):
DummyTask(**params)
def test_disable_window_seconds(self):
"""
Deprecated disable_window_seconds param uses disable_window value
"""
class ATask(luigi.Task):
disable_window = 17
task = ATask()
self.assertEqual(task.disable_window_seconds, 17)
@with_config({"ATaskWithBadParam": {"bad_param": "bad_value"}})
def test_bad_param(self):
class ATaskWithBadParam(luigi.Task):
bad_param = luigi.IntParameter()
with self.assertRaisesRegex(ValueError, r"ATaskWithBadParam\[args=\(\), kwargs={}\]: Error when parsing the default value of 'bad_param'"):
ATaskWithBadParam()
@with_config(
{
"TaskA": {
"a": "a",
"b": "b",
"c": "c",
},
"TaskB": {
"a": "a",
"b": "b",
"c": "c",
},
}
)
def test_unconsumed_params(self):
class TaskA(luigi.Task):
a = luigi.Parameter(default="a")
class TaskB(luigi.Task):
a = luigi.Parameter(default="a")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings(
action="ignore",
category=Warning,
)
warnings.simplefilter(
action="always",
category=luigi.parameter.UnconsumedParameterWarning,
)
TaskA()
TaskB()
assert len(w) == 4
expected = [
("b", "TaskA"),
("c", "TaskA"),
("b", "TaskB"),
("c", "TaskB"),
]
for i, (expected_value, task_name) in zip(w, expected):
assert issubclass(i.category, luigi.parameter.UnconsumedParameterWarning)
assert str(i.message) == (
"The configuration contains the parameter "
f"'{expected_value}' with value '{expected_value}' that is not consumed by "
f"the task '{task_name}'."
)
class ExternalizeTaskTest(LuigiTestCase):
def test_externalize_taskclass(self):
class MyTask(luigi.Task):
def run(self):
pass
self.assertIsNotNone(MyTask.run) # Assert what we believe
task_object = luigi.task.externalize(MyTask)()
self.assertIsNone(task_object.run)
self.assertIsNotNone(MyTask.run) # Check immutability
self.assertIsNotNone(MyTask().run) # Check immutability
def test_externalize_taskobject(self):
class MyTask(luigi.Task):
def run(self):
pass
task_object = luigi.task.externalize(MyTask())
self.assertIsNone(task_object.run)
self.assertIsNotNone(MyTask.run) # Check immutability
self.assertIsNotNone(MyTask().run) # Check immutability
def test_externalize_taskclass_readable_name(self):
class MyTask(luigi.Task):
def run(self):
pass
task_class = luigi.task.externalize(MyTask)
self.assertIsNot(task_class, MyTask)
self.assertIn("MyTask", task_class.__name__)
def test_externalize_taskclass_instance_cache(self):
class MyTask(luigi.Task):
def run(self):
pass
task_class = luigi.task.externalize(MyTask)
self.assertIsNot(task_class, MyTask)
self.assertIs(MyTask(), MyTask()) # Assert it have enabled the instance caching
self.assertIsNot(task_class(), MyTask()) # Now, they should not be the same of course
def test_externalize_same_id(self):
class MyTask(luigi.Task):
def run(self):
pass
task_normal = MyTask()
task_ext_1 = luigi.task.externalize(MyTask)()
task_ext_2 = luigi.task.externalize(MyTask())
self.assertEqual(task_normal.task_id, task_ext_1.task_id)
self.assertEqual(task_normal.task_id, task_ext_2.task_id)
def test_externalize_same_id_with_task_namespace(self):
# Dependent on the new behavior from spotify/luigi#1953
class MyTask(luigi.Task):
task_namespace = "something.domething"
def run(self):
pass
task_normal = MyTask()
task_ext_1 = luigi.task.externalize(MyTask())
task_ext_2 = luigi.task.externalize(MyTask)()
self.assertEqual(task_normal.task_id, task_ext_1.task_id)
self.assertEqual(task_normal.task_id, task_ext_2.task_id)
self.assertEqual(str(task_normal), str(task_ext_1))
self.assertEqual(str(task_normal), str(task_ext_2))
def test_externalize_same_id_with_luigi_namespace(self):
# Dependent on the new behavior from spotify/luigi#1953
luigi.namespace('lets.externalize')
class MyTask(luigi.Task):
def run(self):
pass
luigi.namespace()
task_normal = MyTask()
task_ext_1 = luigi.task.externalize(MyTask())
task_ext_2 = luigi.task.externalize(MyTask)()
self.assertEqual(task_normal.task_id, task_ext_1.task_id)
self.assertEqual(task_normal.task_id, task_ext_2.task_id)
self.assertEqual(str(task_normal), str(task_ext_1))
self.assertEqual(str(task_normal), str(task_ext_2))
def test_externalize_with_requires(self):
class MyTask(luigi.Task):
def run(self):
pass
@luigi.util.requires(luigi.task.externalize(MyTask))
class Requirer(luigi.Task):
def run(self):
pass
self.assertIsNotNone(MyTask.run) # Check immutability
self.assertIsNotNone(MyTask().run) # Check immutability
def test_externalize_doesnt_affect_the_registry(self):
class MyTask(luigi.Task):
pass
reg_orig = luigi.task_register.Register._get_reg()
luigi.task.externalize(MyTask)
reg_afterwards = luigi.task_register.Register._get_reg()
self.assertEqual(reg_orig, reg_afterwards)
def test_can_uniquely_command_line_parse(self):
class MyTask(luigi.Task):
pass
# This first check is just an assumption rather than assertion
self.assertTrue(self.run_locally(['MyTask']))
luigi.task.externalize(MyTask)
# Now we check we don't encounter "ambiguous task" issues
self.assertTrue(self.run_locally(['MyTask']))
# We do this once again, is there previously was a bug like this.
luigi.task.externalize(MyTask)
self.assertTrue(self.run_locally(['MyTask']))
class TaskNamespaceTest(LuigiTestCase):
def setup_tasks(self):
class Foo(luigi.Task):
pass
class FooSubclass(Foo):
pass
return (Foo, FooSubclass, self.go_mynamespace())
def go_mynamespace(self):
luigi.namespace("mynamespace")
class Foo(luigi.Task):
p = luigi.IntParameter()
class Bar(Foo):
task_namespace = "othernamespace" # namespace override
class Baz(Bar): # inherits namespace for Bar
pass
luigi.namespace()
return collections.namedtuple('mynamespace', 'Foo Bar Baz')(Foo, Bar, Baz)
def test_vanilla(self):
(Foo, FooSubclass, namespace_test_helper) = self.setup_tasks()
self.assertEqual(Foo.task_family, "Foo")
self.assertEqual(str(Foo()), "Foo()")
self.assertEqual(FooSubclass.task_family, "FooSubclass")
self.assertEqual(str(FooSubclass()), "FooSubclass()")
def test_namespace(self):
(Foo, FooSubclass, namespace_test_helper) = self.setup_tasks()
self.assertEqual(namespace_test_helper.Foo.task_family, "mynamespace.Foo")
self.assertEqual(str(namespace_test_helper.Foo(1)), "mynamespace.Foo(p=1)")
self.assertEqual(namespace_test_helper.Bar.task_namespace, "othernamespace")
self.assertEqual(namespace_test_helper.Bar.task_family, "othernamespace.Bar")
self.assertEqual(str(namespace_test_helper.Bar(1)), "othernamespace.Bar(p=1)")
self.assertEqual(namespace_test_helper.Baz.task_namespace, "othernamespace")
self.assertEqual(namespace_test_helper.Baz.task_family, "othernamespace.Baz")
self.assertEqual(str(namespace_test_helper.Baz(1)), "othernamespace.Baz(p=1)")
def test_uses_latest_namespace(self):
luigi.namespace('a')
class _BaseTask(luigi.Task):
pass
luigi.namespace('b')
class _ChildTask(_BaseTask):
pass
luigi.namespace() # Reset everything
child_task = _ChildTask()
self.assertEqual(child_task.task_family, 'b._ChildTask')
self.assertEqual(str(child_task), 'b._ChildTask()')
def test_with_scope(self):
luigi.namespace('wohoo', scope='task_test')
luigi.namespace('bleh', scope='')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='task_test')
luigi.namespace(scope='')
self.assertEqual(MyTask.get_task_namespace(), 'wohoo')
def test_with_scope_not_matching(self):
luigi.namespace('wohoo', scope='incorrect_namespace')
luigi.namespace('bleh', scope='')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='incorrect_namespace')
luigi.namespace(scope='')
self.assertEqual(MyTask.get_task_namespace(), 'bleh')
class AutoNamespaceTest(LuigiTestCase):
this_module = 'task_test'
def test_auto_namespace_global(self):
luigi.auto_namespace()
class MyTask(luigi.Task):
pass
luigi.namespace()
self.assertEqual(MyTask.get_task_namespace(), self.this_module)
def test_auto_namespace_scope(self):
luigi.auto_namespace(scope='task_test')
luigi.namespace('bleh', scope='')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='task_test')
luigi.namespace(scope='')
self.assertEqual(MyTask.get_task_namespace(), self.this_module)
def test_auto_namespace_not_matching(self):
luigi.auto_namespace(scope='incorrect_namespace')
luigi.namespace('bleh', scope='')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='incorrect_namespace')
luigi.namespace(scope='')
self.assertEqual(MyTask.get_task_namespace(), 'bleh')
def test_auto_namespace_not_matching_2(self):
luigi.auto_namespace(scope='incorrect_namespace')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='incorrect_namespace')
self.assertEqual(MyTask.get_task_namespace(), '')
class InitSubclassTest(LuigiTestCase):
def test_task_works_with_init_subclass(self):
class ReceivesClassKwargs(luigi.Task):
def __init_subclass__(cls, x, **kwargs):
super(ReceivesClassKwargs, cls).__init_subclass__()
cls.x = x
class Receiver(ReceivesClassKwargs, x=1):
pass
self.assertEquals(Receiver.x, 1)
|
{
"content_hash": "83d716e97723021e7f1506fd655adde7",
"timestamp": "",
"source": "github",
"line_count": 456,
"max_line_length": 147,
"avg_line_length": 35.708333333333336,
"alnum_prop": 0.6153657188478782,
"repo_name": "riga/luigi",
"id": "cd95b88b8768bf13d9b636213a2d6673010e8e84",
"size": "16886",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/task_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5575"
},
{
"name": "HTML",
"bytes": "43591"
},
{
"name": "JavaScript",
"bytes": "178078"
},
{
"name": "Python",
"bytes": "2145021"
},
{
"name": "Shell",
"bytes": "2973"
}
],
"symlink_target": ""
}
|
__author__ = "Nils Tobias Schmidt"
__email__ = "schmidt89 at informatik.uni-marburg.de"
from threading import Event
import threading
class StopThread(threading.Thread):
''' Extends the `Thread` with an `Event` and the `terminate` method
like the `multiprocessing` api offers it.
Calling it will trigger the `Event`.
Just implement your cleanup code for this event.
'''
def __init__(self, *args, **kwargs):
super(StopThread, self).__init__(*args, **kwargs)
self.shall_terminate_event = Event()
def terminate(self):
''' Immitate the `processing` API and offer a way to do some clean up in the `Thread`. '''
self.shall_terminate_event.set()
def shall_terminate(self):
''' Can be queried to know if the `Thread` shall do some cleanup '''
return self.shall_terminate_event.is_set()
|
{
"content_hash": "df3d5e6ce31c445eb4cd45d86b38a81e",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 98,
"avg_line_length": 35.44,
"alnum_prop": 0.6399548532731377,
"repo_name": "nachtmaar/androlyze",
"id": "0d3c1cb2dd5c3682d3ef7699349391cb34d4e2fa",
"size": "906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "androlyze/util/StopThread.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "509940"
},
{
"name": "Shell",
"bytes": "11367"
}
],
"symlink_target": ""
}
|
"""Autogenerated file, do not edit. Submit translations on Transifex."""
MESSAGES = {
"%d min remaining to read": "ulteriori %d minuti di lettura",
"(active)": "(attivo)",
"Also available in:": "Disponibile anche in:",
"Archive": "Archivio",
"Atom feed": "Feed Atom",
"Authors": "Autori",
"Categories": "Categorie",
"Comments": "Commenti",
"LANGUAGE": "Italiano",
"Languages:": "Lingue:",
"More posts about %s": "Altri articoli collegati %s",
"Newer posts": "Articoli più recenti",
"Next post": "Articolo successivo",
"Next": "Successivo",
"No posts found.": "Nessun articolo trovato.",
"Nothing found.": "Non trovato.",
"Older posts": "Articoli precedenti",
"Original site": "Sito originale",
"Posted:": "Pubblicato:",
"Posts about %s": "Articoli su %s",
"Posts by %s": "Articoli di %s",
"Posts for year %s": "Articoli per l'anno %s",
"Posts for {month_day_year}": "Articoli per il {month_day_year}",
"Posts for {month_year}": "Articoli per {month_year}",
"Previous post": "Articolo precedente",
"Previous": "Precedente",
"Publication date": "Data di pubblicazione",
"RSS feed": "Feed RSS",
"Read in English": "Leggi in italiano",
"Read more": "Continua la lettura",
"Skip to main content": "Vai al testo principale",
"Source": "Sorgente",
"Subcategories:": "Sottocategorie:",
"Tags and Categories": "Tag e categorie",
"Tags": "Tag",
"Toggle navigation": "Attiva la navigazione",
"Uncategorized": "Senza categorie",
"Up": "Su",
"Updates": "Aggiornamenti",
"Write your page here.": "Scrivi qui la tua pagina.",
"Write your post here.": "Scrivi qui il tuo post.",
"old posts, page %d": "vecchi articoli, pagina %d",
"page %d": "pagina %d",
"updated": "aggiornato",
}
|
{
"content_hash": "a01f2fc56d655e55335740440f81d887",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 72,
"avg_line_length": 38.395833333333336,
"alnum_prop": 0.6131307650569723,
"repo_name": "okin/nikola",
"id": "2af1a62ca793b5d2b5a8d4894ee88fe3f8a7e36d",
"size": "1869",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nikola/data/themes/base/messages/messages_it.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25780"
},
{
"name": "HTML",
"bytes": "239"
},
{
"name": "JavaScript",
"bytes": "1888"
},
{
"name": "Jupyter Notebook",
"bytes": "568"
},
{
"name": "Python",
"bytes": "1236378"
},
{
"name": "Shell",
"bytes": "9822"
},
{
"name": "XSLT",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
import EncoderFactory
from DatasetManager import DatasetManager
import BucketFactory
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score
from sklearn.pipeline import FeatureUnion, Pipeline
from sklearn.preprocessing import StandardScaler
import time
import os
import sys
from sys import argv
import pickle
from collections import defaultdict
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
dataset_ref = argv[1]
params_dir = argv[2]
results_dir = argv[3]
bucket_method = argv[4]
cls_encoding = argv[5]
cls_method = argv[6]
if bucket_method == "state":
bucket_encoding = "last"
else:
bucket_encoding = "agg"
method_name = "%s_%s"%(bucket_method, cls_encoding)
dataset_ref_to_datasets = {
"bpic2011": ["bpic2011_f%s"%formula for formula in range(1,5)],
"bpic2015": ["bpic2015_%s_f2"%(municipality) for municipality in range(1,6)],
"insurance": ["insurance_activity", "insurance_followup"],
"sepsis_cases": ["sepsis_cases_1", "sepsis_cases_2", "sepsis_cases_4"]
}
encoding_dict = {
"laststate": ["static", "last"],
"agg": ["static", "agg"],
"index": ["static", "index"],
"combined": ["static", "last", "agg"]
}
datasets = [dataset_ref] if dataset_ref not in dataset_ref_to_datasets else dataset_ref_to_datasets[dataset_ref]
methods = encoding_dict[cls_encoding]
train_ratio = 0.8
random_state = 22
# create results directory
if not os.path.exists(os.path.join(params_dir)):
os.makedirs(os.path.join(params_dir))
for dataset_name in datasets:
# load optimal params
optimal_params_filename = os.path.join(params_dir, "optimal_params_%s_%s_%s.pickle" % (cls_method, dataset_name, method_name))
if not os.path.isfile(optimal_params_filename) or os.path.getsize(optimal_params_filename) <= 0:
continue
with open(optimal_params_filename, "rb") as fin:
args = pickle.load(fin)
# read the data
dataset_manager = DatasetManager(dataset_name)
data = dataset_manager.read_dataset()
cls_encoder_args = {'case_id_col': dataset_manager.case_id_col,
'static_cat_cols': dataset_manager.static_cat_cols,
'static_num_cols': dataset_manager.static_num_cols,
'dynamic_cat_cols': dataset_manager.dynamic_cat_cols,
'dynamic_num_cols': dataset_manager.dynamic_num_cols,
'fillna': True}
# determine min and max (truncated) prefix lengths
min_prefix_length = 1
if "traffic_fines" in dataset_name:
max_prefix_length = 10
elif "bpic2017" in dataset_name:
max_prefix_length = min(20, dataset_manager.get_pos_case_length_quantile(data, 0.90))
else:
max_prefix_length = min(40, dataset_manager.get_pos_case_length_quantile(data, 0.90))
# split into training and test
train, test = dataset_manager.split_data_strict(data, train_ratio, split="temporal")
test_case_counts = test.groupby(dataset_manager.case_id_col).size()
test_case_counts = test_case_counts[test_case_counts >= max_prefix_length]
test = test[test[dataset_manager.case_id_col].isin(test_case_counts.index)]
outfile = os.path.join(results_dir, "results_long_traces_%s_%s_%s.csv" % (cls_method, dataset_name, method_name))
dt_train_prefixes = dataset_manager.generate_prefix_data(train, min_prefix_length, max_prefix_length)
dt_test_prefixes = dataset_manager.generate_prefix_data(test, min_prefix_length, max_prefix_length)
# Bucketing prefixes based on control flow
bucketer_args = {'encoding_method':bucket_encoding,
'case_id_col':dataset_manager.case_id_col,
'cat_cols':[dataset_manager.activity_col],
'num_cols':[],
'random_state':random_state}
if bucket_method == "cluster":
bucketer_args["n_clusters"] = int(args["n_clusters"])
bucketer = BucketFactory.get_bucketer(bucket_method, **bucketer_args)
bucket_assignments_train = bucketer.fit_predict(dt_train_prefixes)
bucket_assignments_test = bucketer.predict(dt_test_prefixes)
preds_all = []
test_y_all = []
nr_events_all = []
for bucket in set(bucket_assignments_test):
if bucket_method == "prefix":
current_args = args[bucket]
else:
current_args = args
relevant_train_cases_bucket = dataset_manager.get_indexes(dt_train_prefixes)[bucket_assignments_train == bucket]
relevant_test_cases_bucket = dataset_manager.get_indexes(dt_test_prefixes)[bucket_assignments_test == bucket]
dt_test_bucket = dataset_manager.get_relevant_data_by_indexes(dt_test_prefixes, relevant_test_cases_bucket)
test_y = dataset_manager.get_label_numeric(dt_test_bucket)
nr_events_all.extend(list(dataset_manager.get_prefix_lengths(dt_test_bucket)))
if len(relevant_train_cases_bucket) == 0:
preds = [dataset_manager.get_class_ratio(train)] * len(relevant_test_cases_bucket)
else:
dt_train_bucket = dataset_manager.get_relevant_data_by_indexes(dt_train_prefixes, relevant_train_cases_bucket) # one row per event
train_y = dataset_manager.get_label_numeric(dt_train_bucket)
if len(set(train_y)) < 2:
preds = [train_y[0]] * len(relevant_test_cases_bucket)
else:
feature_combiner = FeatureUnion([(method, EncoderFactory.get_encoder(method, **cls_encoder_args)) for method in methods])
if cls_method == "rf":
cls = RandomForestClassifier(n_estimators=500,
max_features=current_args['max_features'],
random_state=random_state)
elif cls_method == "xgboost":
cls = xgb.XGBClassifier(objective='binary:logistic',
n_estimators=500,
learning_rate= current_args['learning_rate'],
subsample=current_args['subsample'],
max_depth=int(current_args['max_depth']),
colsample_bytree=current_args['colsample_bytree'],
min_child_weight=int(current_args['min_child_weight']),
seed=random_state)
elif cls_method == "logit":
cls = LogisticRegression(C=2**current_args['C'],
random_state=random_state)
elif cls_method == "svm":
cls = SVC(C=2**current_args['C'],
gamma=2**current_args['gamma'],
random_state=random_state)
if cls_method == "svm" or cls_method == "logit":
pipeline = Pipeline([('encoder', feature_combiner), ('scaler', StandardScaler()), ('cls', cls)])
else:
pipeline = Pipeline([('encoder', feature_combiner), ('cls', cls)])
pipeline.fit(dt_train_bucket, train_y)
if cls_method == "svm":
preds = pipeline.decision_function(dt_test_bucket)
else:
preds_pos_label_idx = np.where(cls.classes_ == 1)[0][0]
preds = pipeline.predict_proba(dt_test_bucket)[:,preds_pos_label_idx]
preds_all.extend(preds)
test_y_all.extend(test_y)
with open(outfile, 'w') as fout:
fout.write("%s;%s;%s;%s;%s;%s;%s\n"%("dataset", "method", "cls", "nr_events", "n_iter", "metric", "score"))
dt_results = pd.DataFrame({"actual": test_y_all, "predicted": preds_all, "nr_events": nr_events_all})
for nr_events, group in dt_results.groupby("nr_events"):
if len(set(group.actual)) < 2:
fout.write("%s;%s;%s;%s;%s;%s;%s\n"%(dataset_name, method_name, cls_method, nr_events, -1, "auc", np.nan))
else:
fout.write("%s;%s;%s;%s;%s;%s;%s\n"%(dataset_name, method_name, cls_method, nr_events, -1, "auc", roc_auc_score(group.actual, group.predicted)))
fout.write("%s;%s;%s;%s;%s;%s;%s\n"%(dataset_name, method_name, cls_method, -1, -1, "auc", roc_auc_score(dt_results.actual, dt_results.predicted)))
|
{
"content_hash": "1e146c71b5e14158245e2d617a581210",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 160,
"avg_line_length": 45.41798941798942,
"alnum_prop": 0.5971575023299162,
"repo_name": "irhete/predictive-monitoring-benchmark",
"id": "46480f5f639f4f49473a6925e7a7bc99e66e7eda",
"size": "8584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/experiments_long_cases.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "5957"
},
{
"name": "Python",
"bytes": "149552"
},
{
"name": "R",
"bytes": "3494"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from daguerre.views import (AdjustedImageRedirectView, AjaxAdjustmentInfoView,
AjaxUpdateAreaView)
urlpatterns = [
url(r'^adjust/(?P<storage_path>.+)$',
AdjustedImageRedirectView.as_view(),
name="daguerre_adjusted_image_redirect"),
url(r'^info/(?P<storage_path>.+)$',
AjaxAdjustmentInfoView.as_view(),
name="daguerre_ajax_adjustment_info"),
url(r'^area/(?P<storage_path>.+?)(?:/(?P<pk>\d+))?$',
AjaxUpdateAreaView.as_view(),
name="daguerre_ajax_update_area"),
]
|
{
"content_hash": "4c1acd2da3417c0d5f61e60d5990f4fd",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 78,
"avg_line_length": 34.470588235294116,
"alnum_prop": 0.621160409556314,
"repo_name": "littleweaver/django-daguerre",
"id": "3ca4d682534ed7f498aaa655f5abc48e47a347df",
"size": "586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daguerre/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1263"
},
{
"name": "JavaScript",
"bytes": "32965"
},
{
"name": "Python",
"bytes": "136624"
}
],
"symlink_target": ""
}
|
import sqlalchemy as sa
from tests import TestCase
from sqlalchemy_utils import UUIDType
import uuid
class TestUUIDType(TestCase):
def create_models(self):
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(UUIDType, default=uuid.uuid4, primary_key=True)
def __repr__(self):
return 'User(%r)' % self.id
self.User = User
def test_commit(self):
obj = self.User()
obj.id = uuid.uuid4().hex
self.session.add(obj)
self.session.commit()
u = self.session.query(self.User).one()
assert u.id == obj.id
def test_coerce(self):
obj = self.User()
obj.id = identifier = uuid.uuid4().hex
assert isinstance(obj.id, uuid.UUID)
assert obj.id.hex == identifier
obj.id = identifier = uuid.uuid4().bytes
assert isinstance(obj.id, uuid.UUID)
assert obj.id.bytes == identifier
|
{
"content_hash": "3ad473648553cb441f77b21d6aa2a8db",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 74,
"avg_line_length": 24.615384615384617,
"alnum_prop": 0.584375,
"repo_name": "joshfriend/sqlalchemy-utils",
"id": "5a1de102b8be9981e79b0b7aae9c5d910a738710",
"size": "960",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/types/test_uuid.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5604"
},
{
"name": "Python",
"bytes": "462856"
},
{
"name": "Shell",
"bytes": "5116"
}
],
"symlink_target": ""
}
|
import sys
import os
this_path = os.path.split(sys.modules['__main__'].__file__)[0]
sys.path.insert(0, this_path + '/lib')
import multiapt.defaultconfig
all = multiapt.defaultconfig.__dict__.keys()
all.sort()
print '# -*- coding: utf-8 -*-'
print '#'
print '# All available configuration parameters, and their default values.'
print '#'
for k in all:
if k[:1] == '_': continue
print '# %s = %s' % (k, repr(multiapt.defaultconfig.__dict__[k]))
|
{
"content_hash": "5746471d49ce34f1294a4104004b2529",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 75,
"avg_line_length": 23.736842105263158,
"alnum_prop": 0.6452328159645233,
"repo_name": "zeha/multiapt",
"id": "1fffb990e61c31a77f9af085b4cfd0802eac958f",
"size": "494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "print_config_template.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Python",
"bytes": "637846"
}
],
"symlink_target": ""
}
|
'''
Text Markup
===========
.. versionadded:: 1.1.0
We provide a simple text-markup for inline text styling. The syntax look the
same as the `BBCode <http://en.wikipedia.org/wiki/BBCode>`_.
A tag is defined as ``[tag]``, and should have a corresponding
``[/tag]`` closing tag. For example::
[b]Hello [color=ff0000]world[/color][/b]
The following tags are availables:
``[b][/b]``
Activate bold text
``[i][/i]``
Activate italic text
``[u][/u]``
Underlined text
``[s][/s]``
Strikethrough text
``[font=<str>][/font]``
Change the font
``[size=<integer>][/size]``
Change the font size
``[color=#<color>][/color]``
Change the text color
``[ref=<str>][/ref]``
Add an interactive zone. The reference + all the word box inside the
reference will be available in :attr:`MarkupLabel.refs`
``[anchor=<str>]``
Put an anchor in the text. You can get the position of your anchor within
the text with :attr:`MarkupLabel.anchors`
``[sub][/sub]``
Display the text at a subscript position relative to the text before it.
``[sup][/sup]``
Display the text at a superscript position relative to the text before it.
If you need to escape the markup from the current text, use
:func:`kivy.utils.escape_markup`.
'''
__all__ = ('MarkupLabel', )
import re
from kivy.properties import dpi2px
from kivy.parser import parse_color
from kivy.logger import Logger
from kivy.core.text import Label, LabelBase
from kivy.core.text.text_layout import layout_text, LayoutWord, LayoutLine
from copy import copy
from math import ceil
from functools import partial
# We need to do this trick when documentation is generated
MarkupLabelBase = Label
if Label is None:
MarkupLabelBase = LabelBase
class MarkupLabel(MarkupLabelBase):
'''Markup text label.
See module documentation for more informations.
'''
def __init__(self, *largs, **kwargs):
self._style_stack = {}
self._refs = {}
self._anchors = {}
super(MarkupLabel, self).__init__(*largs, **kwargs)
self._internal_size = 0, 0
self._cached_lines = []
@property
def refs(self):
'''Get the bounding box of all the ``[ref=...]``::
{ 'refA': ((x1, y1, x2, y2), (x1, y1, x2, y2)), ... }
'''
return self._refs
@property
def anchors(self):
'''Get the position of all the ``[anchor=...]``::
{ 'anchorA': (x, y), 'anchorB': (x, y), ... }
'''
return self._anchors
@property
def markup(self):
'''Return the text with all the markup splitted::
>>> MarkupLabel('[b]Hello world[/b]').markup
>>> ('[b]', 'Hello world', '[/b]')
'''
s = re.split('(\[.*?\])', self.label)
s = [x for x in s if x != '']
return s
def _push_style(self, k):
if not k in self._style_stack:
self._style_stack[k] = []
self._style_stack[k].append(self.options[k])
def _pop_style(self, k):
if k not in self._style_stack or len(self._style_stack[k]) == 0:
Logger.warning('Label: pop style stack without push')
return
v = self._style_stack[k].pop()
self.options[k] = v
def render(self, real=False):
options = copy(self.options)
if not real:
ret = self._pre_render()
else:
ret = self._render_real()
self.options = options
return ret
def _pre_render(self):
# split markup, words, and lines
# result: list of word with position and width/height
# during the first pass, we don't care about h/valign
self._cached_lines = lines = []
self._refs = {}
self._anchors = {}
clipped = False
w = h = 0
uw, uh = self.text_size
spush = self._push_style
spop = self._pop_style
opts = options = self.options
options['_ref'] = None
options['_anchor'] = None
options['script'] = 'normal'
shorten = options['shorten']
# if shorten, then don't split lines to fit uw, because it will be
# flattened later when shortening and broken up lines if broken
# mid-word will have space mid-word when lines are joined
uw_temp = None if shorten else uw
xpad = options['padding_x']
uhh = (None if uh is not None and options['valign'][-1] != 'p' or
options['shorten'] else uh)
options['strip'] = options['strip'] or options['halign'][-1] == 'y'
for item in self.markup:
if item == '[b]':
spush('bold')
options['bold'] = True
self.resolve_font_name()
elif item == '[/b]':
spop('bold')
self.resolve_font_name()
elif item == '[i]':
spush('italic')
options['italic'] = True
self.resolve_font_name()
elif item == '[/i]':
spop('italic')
self.resolve_font_name()
elif item == '[u]':
spush('underline')
options['underline'] = True
self.resolve_font_name()
elif item == '[/u]':
spop('underline')
self.resolve_font_name()
elif item == '[s]':
spush('strikethrough')
options['strikethrough'] = True
self.resolve_font_name()
elif item == '[/s]':
spop('strikethrough')
self.resolve_font_name()
elif item[:6] == '[size=':
item = item[6:-1]
try:
if item[-2:] in ('px', 'pt', 'in', 'cm', 'mm', 'dp', 'sp'):
size = dpi2px(item[:-2], item[-2:])
else:
size = int(item)
except ValueError:
raise
size = options['font_size']
spush('font_size')
options['font_size'] = size
elif item == '[/size]':
spop('font_size')
elif item[:7] == '[color=':
color = parse_color(item[7:-1])
spush('color')
options['color'] = color
elif item == '[/color]':
spop('color')
elif item[:6] == '[font=':
fontname = item[6:-1]
spush('font_name')
options['font_name'] = fontname
self.resolve_font_name()
elif item == '[/font]':
spop('font_name')
self.resolve_font_name()
elif item[:5] == '[sub]':
spush('font_size')
spush('script')
options['font_size'] = options['font_size'] * .5
options['script'] = 'subscript'
elif item == '[/sub]':
spop('font_size')
spop('script')
elif item[:5] == '[sup]':
spush('font_size')
spush('script')
options['font_size'] = options['font_size'] * .5
options['script'] = 'superscript'
elif item == '[/sup]':
spop('font_size')
spop('script')
elif item[:5] == '[ref=':
ref = item[5:-1]
spush('_ref')
options['_ref'] = ref
elif item == '[/ref]':
spop('_ref')
elif not clipped and item[:8] == '[anchor=':
options['_anchor'] = item[8:-1]
elif not clipped:
item = item.replace('&bl;', '[').replace(
'&br;', ']').replace('&', '&')
opts = copy(options)
extents = self.get_cached_extents()
opts['space_width'] = extents(' ')[0]
w, h, clipped = layout_text(item, lines, (w, h),
(uw_temp, uhh), opts, extents, True, False)
if len(lines): # remove any trailing spaces from the last line
old_opts = self.options
self.options = copy(opts)
w, h, clipped = layout_text('', lines, (w, h), (uw_temp, uhh),
self.options, self.get_cached_extents(), True, True)
self.options = old_opts
if shorten:
options['_ref'] = None # no refs for you!
options['_anchor'] = None
w, h, lines = self.shorten_post(lines, w, h)
self._cached_lines = lines
# when valign is not top, for markup we layout everything (text_size[1]
# is temporarily set to None) and after layout cut to size if too tall
elif uh != uhh and h > uh and len(lines) > 1:
if options['valign'][-1] == 'm': # bottom
i = 0
while i < len(lines) - 1 and h > uh:
h -= lines[i].h
i += 1
del lines[:i]
else: # middle
i = 0
top = int(h / 2. + uh / 2.) # remove extra top portion
while i < len(lines) - 1 and h > top:
h -= lines[i].h
i += 1
del lines[:i]
i = len(lines) - 1 # remove remaining bottom portion
while i and h > uh:
h -= lines[i].h
i -= 1
del lines[i + 1:]
# now justify the text
if options['halign'][-1] == 'y' and uw is not None:
# XXX: update refs to justified pos
# when justify, each line shouldv'e been stripped already
split = partial(re.split, re.compile('( +)'))
uww = uw - 2 * xpad
chr = type(self.text)
space = chr(' ')
empty = chr('')
for i in range(len(lines)):
line = lines[i]
words = line.words
# if there's nothing to justify, we're done
if (not line.w or int(uww - line.w) <= 0 or not len(words) or
line.is_last_line):
continue
done = False
parts = [None, ] * len(words) # contains words split by space
idxs = [None, ] * len(words) # indices of the space in parts
# break each word into spaces and add spaces until it's full
# do first round of split in case we don't need to split all
for w in range(len(words)):
word = words[w]
sw = word.options['space_width']
p = parts[w] = split(word.text)
idxs[w] = [v for v in range(len(p)) if
p[v].startswith(' ')]
# now we have the indices of the spaces in split list
for k in idxs[w]:
# try to add single space at each space
if line.w + sw > uww:
done = True
break
line.w += sw
word.lw += sw
p[k] += space
if done:
break
# there's not a single space in the line?
if not any(idxs):
continue
# now keep adding spaces to already split words until done
while not done:
for w in range(len(words)):
if not idxs[w]:
continue
word = words[w]
sw = word.options['space_width']
p = parts[w]
for k in idxs[w]:
# try to add single space at each space
if line.w + sw > uww:
done = True
break
line.w += sw
word.lw += sw
p[k] += space
if done:
break
# if not completely full, push last words to right edge
diff = int(uww - line.w)
if diff > 0:
# find the last word that had a space
for w in range(len(words) - 1, -1, -1):
if not idxs[w]:
continue
break
old_opts = self.options
self.options = word.options
word = words[w]
# split that word into left/right and push right till uww
l_text = empty.join(parts[w][:idxs[w][-1]])
r_text = empty.join(parts[w][idxs[w][-1]:])
left = LayoutWord(word.options,
self.get_extents(l_text)[0], word.lh, l_text)
right = LayoutWord(word.options,
self.get_extents(r_text)[0], word.lh, r_text)
left.lw = max(left.lw, word.lw + diff - right.lw)
self.options = old_opts
# now put words back together with right/left inserted
for k in range(len(words)):
if idxs[k]:
words[k].text = empty.join(parts[k])
words[w] = right
words.insert(w, left)
else:
for k in range(len(words)):
if idxs[k]:
words[k].text = empty.join(parts[k])
line.w = uww
w = max(w, uww)
self._internal_size = w, h
if uw:
w = uw
if uh:
h = uh
if h > 1 and w < 2:
w = 2
if w < 1:
w = 1
if h < 1:
h = 1
return int(w), int(h)
def render_lines(self, lines, options, render_text, y, size):
xpad = options['padding_x']
w = size[0]
halign = options['halign']
refs = self._refs
anchors = self._anchors
for layout_line in lines: # for plain label each line has only one str
lw, lh = layout_line.w, layout_line.h
x = xpad
if halign[0] == 'c': # center
x = int((w - lw) / 2.)
elif halign[0] == 'r': # right
x = max(0, int(w - lw - xpad))
layout_line.x = x
layout_line.y = y
psp = pph = 0
for word in layout_line.words:
options = self.options = word.options
# the word height is not scaled by line_height, only lh was
wh = options['line_height'] * word.lh
# calculate sub/super script pos
if options['script'] == 'superscript':
script_pos = max(0, psp if psp else self.get_descent())
psp = script_pos
pph = wh
elif options['script'] == 'subscript':
script_pos = min(lh - wh, ((psp + pph) - wh)
if pph else (lh - wh))
pph = wh
psp = script_pos
else:
script_pos = (lh - wh) / 1.25
psp = pph = 0
if len(word.text):
render_text(word.text, x, y + script_pos)
# should we record refs ?
ref = options['_ref']
if ref is not None:
if not ref in refs:
refs[ref] = []
refs[ref].append((x, y, x + word.lw, y + wh))
# Should we record anchors?
anchor = options['_anchor']
if anchor is not None:
if not anchor in anchors:
anchors[anchor] = (x, y)
x += word.lw
y += lh
return y
def shorten_post(self, lines, w, h, margin=2):
''' Shortens the text to a single line according to the label options.
This function operates on a text that has already been laid out because
for markup, parts of text can have different size and options.
If :attr:`text_size` [0] is None, the lines are returned unchanged.
Otherwise, the lines are converted to a single line fitting within the
constrained width, :attr:`text_size` [0].
:params:
`lines`: list of `LayoutLine` instances describing the text.
`w`: int, the width of the text in lines, including padding.
`h`: int, the height of the text in lines, including padding.
`margin` int, the additional space left on the sides. This is in
addition to :attr:`padding_x`.
:returns:
3-tuple of (xw, h, lines), where w, and h is similar to the input
and contains the resulting width / height of the text, including
padding. lines, is a list containing a single `LayoutLine`, which
contains the words for the line.
'''
def n(line, c):
''' A function similar to text.find, except it's an iterator that
returns successive occurrences of string c in list line. line is
not a string, but a list of LayoutWord instances that we walk
from left to right returning the indices of c in the words as we
encounter them. Note that the options can be different among the
words.
:returns:
3-tuple: the index of the word in line, the index of the
occurrence in word, and the extents (width) of the combined
words until this occurrence, not including the occurrence char.
If no more are found it returns (-1, -1, total_w) where total_w
is the full width of all the words.
'''
total_w = 0
for w in range(len(line)):
word = line[w]
if not word.lw:
continue
f = partial(word.text.find, c)
i = f()
while i != -1:
self.options = word.options
yield w, i, total_w + self.get_extents(word.text[:i])[0]
i = f(i + 1)
self.options = word.options
total_w += self.get_extents(word.text)[0]
yield -1, -1, total_w # this should never be reached, really
def p(line, c):
''' Similar to the `n` function, except it returns occurrences of c
from right to left in the list, line, similar to rfind.
'''
total_w = 0
offset = 0 if len(c) else 1
for w in range(len(line) - 1, -1, -1):
word = line[w]
if not word.lw:
continue
f = partial(word.text.rfind, c)
i = f()
while i != -1:
self.options = word.options
yield (w, i, total_w +
self.get_extents(word.text[i + 1:])[0])
if i:
i = f(0, i - offset)
else:
if not c:
self.options = word.options
yield (w, -1, total_w +
self.get_extents(word.text)[0])
break
self.options = word.options
total_w += self.get_extents(word.text)[0]
yield -1, -1, total_w # this should never be reached, really
def n_restricted(line, uw, c):
''' Similar to the function `n`, except it only returns the first
occurrence and it's not an iterator. Furthermore, if the first
occurrence doesn't fit within width uw, it returns the index of
whatever amount of text will still fit in uw.
:returns:
similar to the function `n`, except it's a 4-tuple, with the
last element a boolean, indicating if we had to clip the text
to fit in uw (True) or if the whole text until the first
occurrence fitted in uw (False).
'''
total_w = 0
if not len(line):
return 0, 0, 0
for w in range(len(line)):
word = line[w]
f = partial(word.text.find, c)
self.options = word.options
extents = self.get_cached_extents()
i = f()
if i != -1:
ww = extents(word.text[:i])[0]
if i != -1 and total_w + ww <= uw: # found and it fits
return w, i, total_w + ww, False
elif i == -1:
ww = extents(word.text)[0]
if total_w + ww <= uw: # wasn't found and all fits
total_w += ww
continue
i = len(word.text)
# now just find whatever amount of the word does fit
e = 0
while e != i and total_w + extents(word.text[:e])[0] <= uw:
e += 1
e = max(0, e - 1)
return w, e, total_w + extents(word.text[:e])[0], True
return -1, -1, total_w, False
def p_restricted(line, uw, c):
''' Similar to `n_restricted`, except it returns the first
occurrence starting from the right, like `p`.
'''
total_w = 0
if not len(line):
return 0, 0, 0
for w in range(len(line) - 1, -1, -1):
word = line[w]
f = partial(word.text.rfind, c)
self.options = word.options
extents = self.get_cached_extents()
i = f()
if i != -1:
ww = extents(word.text[i + 1:])[0]
if i != -1 and total_w + ww <= uw: # found and it fits
return w, i, total_w + ww, False
elif i == -1:
ww = extents(word.text)[0]
if total_w + ww <= uw: # wasn't found and all fits
total_w += ww
continue
# now just find whatever amount of the word does fit
s = len(word.text) - 1
while s >= 0 and total_w + extents(word.text[s:])[0] <= uw:
s -= 1
return w, s, total_w + extents(word.text[s + 1:])[0], True
return -1, -1, total_w, False
textwidth = self.get_cached_extents()
uw = self.text_size[0]
if uw is None:
return w, h, lines
old_opts = copy(self.options)
uw = max(0, int(uw - old_opts['padding_x'] * 2 - margin))
chr = type(self.text)
ssize = textwidth(' ')
c = old_opts['split_str']
line_height = old_opts['line_height']
xpad, ypad = old_opts['padding_x'], old_opts['padding_y']
dir = old_opts['shorten_from'][0]
# flatten lines into single line
line = []
last_w = 0
for l in range(len(lines)):
# concatenate (non-empty) inside lines with a space
this_line = lines[l]
if last_w and this_line.w and not this_line.line_wrap:
line.append(LayoutWord(old_opts, ssize[0], ssize[1], chr(' ')))
last_w = this_line.w or last_w
for word in this_line.words:
if word.lw:
line.append(word)
# if that fits, just return the flattened line
lw = sum([word.lw for word in line])
if lw <= uw:
lh = max([word.lh for word in line] + [0]) * line_height
return lw + 2 * xpad, lh + 2 * ypad, [LayoutLine(0, 0,
lw, lh, 1, 0, line)]
# find the size of ellipsis that'll fit
elps_s = textwidth('...')
if elps_s[0] > uw: # even ellipsis didn't fit...
s = textwidth('..')
if s[0] <= uw:
return (s[0] + 2 * xpad, s[1] * line_height + 2 * ypad,
[LayoutLine(0, 0, s[0], s[1], 1, 0, [LayoutWord(old_opts,
s[0], s[1], '..')])])
else:
s = textwidth('.')
return (s[0] + 2 * xpad, s[1] * line_height + 2 * ypad,
[LayoutLine(0, 0, s[0], s[1], 1, 0, [LayoutWord(old_opts,
s[0], s[1], '.')])])
elps = LayoutWord(old_opts, elps_s[0], elps_s[1], '...')
uw -= elps_s[0]
# now find the first left and right words that fit
w1, e1, l1, clipped1 = n_restricted(line, uw, c)
w2, s2, l2, clipped2 = p_restricted(line, uw, c)
if dir != 'l': # center or right
line1 = None
if clipped1 or clipped2 or l1 + l2 > uw:
# if either was clipped or both don't fit, just take first
if len(c):
self.options = old_opts
old_opts['split_str'] = ''
res = self.shorten_post(lines, w, h, margin)
self.options['split_str'] = c
return res
line1 = line[:w1]
last_word = line[w1]
last_text = last_word.text[:e1]
self.options = last_word.options
s = self.get_extents(last_text)
line1.append(LayoutWord(last_word.options, s[0], s[1],
last_text))
elif (w1, e1) == (-1, -1): # this shouldn't occur
line1 = line
if line1:
line1.append(elps)
lw = sum([word.lw for word in line1])
lh = max([word.lh for word in line1]) * line_height
self.options = old_opts
return lw + 2 * xpad, lh + 2 * ypad, [LayoutLine(0, 0,
lw, lh, 1, 0, line1)]
# now we know that both the first and last word fit, and that
# there's at least one instances of the split_str in the line
if (w1, e1) != (w2, s2): # more than one split_str
if dir == 'r':
f = n(line, c) # iterator
assert next(f)[:-1] == (w1, e1) # first word should match
ww1, ee1, l1 = next(f)
while l2 + l1 <= uw:
w1, e1 = ww1, ee1
ww1, ee1, l1 = next(f)
if (w1, e1) == (w2, s2):
break
else: # center
f = n(line, c) # iterator
f_inv = p(line, c) # iterator
assert next(f)[:-1] == (w1, e1)
assert next(f_inv)[:-1] == (w2, s2)
while True:
if l1 <= l2:
ww1, ee1, l1 = next(f) # hypothesize that next fit
if l2 + l1 > uw:
break
w1, e1 = ww1, ee1
if (w1, e1) == (w2, s2):
break
else:
ww2, ss2, l2 = next(f_inv)
if l2 + l1 > uw:
break
w2, s2 = ww2, ss2
if (w1, e1) == (w2, s2):
break
else: # left
line1 = [elps]
if clipped1 or clipped2 or l1 + l2 > uw:
# if either was clipped or both don't fit, just take last
if len(c):
self.options = old_opts
old_opts['split_str'] = ''
res = self.shorten_post(lines, w, h, margin)
self.options['split_str'] = c
return res
first_word = line[w2]
first_text = first_word.text[s2 + 1:]
self.options = first_word.options
s = self.get_extents(first_text)
line1.append(LayoutWord(first_word.options, s[0], s[1],
first_text))
line1.extend(line[w2 + 1:])
elif (w1, e1) == (-1, -1): # this shouldn't occur
line1 = line
if len(line1) != 1:
lw = sum([word.lw for word in line1])
lh = max([word.lh for word in line1]) * line_height
self.options = old_opts
return lw + 2 * xpad, lh + 2 * ypad, [LayoutLine(0, 0,
lw, lh, 1, 0, line1)]
# now we know that both the first and last word fit, and that
# there's at least one instances of the split_str in the line
if (w1, e1) != (w2, s2): # more than one split_str
f_inv = p(line, c) # iterator
assert next(f_inv)[:-1] == (w2, s2) # last word should match
ww2, ss2, l2 = next(f_inv)
while l2 + l1 <= uw:
w2, s2 = ww2, ss2
ww2, ss2, l2 = next(f_inv)
if (w1, e1) == (w2, s2):
break
# now add back the left half
line1 = line[:w1]
last_word = line[w1]
last_text = last_word.text[:e1]
self.options = last_word.options
s = self.get_extents(last_text)
if len(last_text):
line1.append(LayoutWord(last_word.options, s[0], s[1], last_text))
elps.options = last_word.options
line1.append(elps)
# now add back the right half
first_word = line[w2]
first_text = first_word.text[s2 + 1:]
self.options = first_word.options
s = self.get_extents(first_text)
if len(first_text):
line1.append(LayoutWord(first_word.options, s[0], s[1],
first_text))
line1.extend(line[w2 + 1:])
lw = sum([word.lw for word in line1])
lh = max([word.lh for word in line1]) * line_height
self.options = old_opts
return lw + 2 * xpad, lh + 2 * ypad, [LayoutLine(0, 0,
lw, lh, 1, 0, line1)]
|
{
"content_hash": "6fa21c4c2d8e8a55ec99df842d6b1b36",
"timestamp": "",
"source": "github",
"line_count": 772,
"max_line_length": 79,
"avg_line_length": 39.77590673575129,
"alnum_prop": 0.44934379783111345,
"repo_name": "vitorio/kivy",
"id": "e91bccc65f7923742a00789e038c875bd1f35f92",
"size": "30707",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "kivy/core/text/markup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "329293"
},
{
"name": "Emacs Lisp",
"bytes": "9695"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "19384"
},
{
"name": "Makefile",
"bytes": "4201"
},
{
"name": "Objective-C",
"bytes": "14779"
},
{
"name": "Python",
"bytes": "3574220"
},
{
"name": "VimL",
"bytes": "1123"
}
],
"symlink_target": ""
}
|
import os
import imath
import IECore
import GafferScene
import GafferSceneTest
import GafferOSL
import GafferOSLTest
import GafferAppleseed
class AppleseedShaderAdaptorTest( GafferOSLTest.OSLTestCase ) :
def testDirtyPropagation( self ) :
adaptor = GafferAppleseed.AppleseedShaderAdaptor()
self.assertEqual( adaptor.affects( adaptor["in"]["attributes"] ), [ adaptor["out"]["attributes"] ] )
def testAdaption( self ) :
sphere = GafferScene.Sphere()
shader = GafferOSL.OSLShader( "blackbody" )
shader.loadShader( "as_blackbody" )
assignment = GafferScene.ShaderAssignment()
assignment["in"].setInput( sphere["out"] )
assignment["shader"].setInput( shader["out"] )
self.assertEqual( assignment["out"].attributes( "/sphere" ).keys(), [ "osl:surface" ] )
adaptor = GafferAppleseed.AppleseedShaderAdaptor()
adaptor["in"].setInput( assignment["out"] )
self.assertEqual( adaptor["out"].attributes( "/sphere" ).keys(), [ "osl:surface" ] )
network = adaptor["out"].attributes( "/sphere" )["osl:surface"]
self.assertEqual( len( network ), 2 )
self.assertEqual( network.getShader( "blackbody" ).name, "as_blackbody" )
self.assertEqual( network.getShader( "blackbody" ).type, "osl:shader" )
self.assertEqual( network.getShader( "material" ).name, "as_closure2surface" )
self.assertEqual( network.getShader( "material" ).type, "osl:surface" )
self.assertEqual( network.input( ( "material", "in_input" ) ), ( "blackbody", "out_color" ) )
self.assertEqual( network.getOutput(), ( "material", "" ) )
def testAdaptionOfEmptyShader( self ) :
sphere = GafferScene.Sphere()
shader = GafferOSL.OSLShader()
shader.loadShader( self.compileShader( os.path.dirname( __file__ ) + "/shaders/empty.osl" ) )
assignment = GafferScene.ShaderAssignment()
assignment["in"].setInput( sphere["out"] )
assignment["shader"].setInput( shader["out"] )
self.assertEqual( assignment["out"].attributes( "/sphere" ).keys(), [ "osl:surface" ] )
adaptor = GafferAppleseed.AppleseedShaderAdaptor()
adaptor["in"].setInput( assignment["out"] )
self.assertEqual( adaptor["out"].attributes( "/sphere" ).keys(), [ "osl:surface" ] )
network = adaptor["out"].attributes( "/sphere" )["osl:surface"]
self.assertEqual( len( network ), 1 )
self.assertEqual( network.getShader( "tex2Surface" ).name, "as_texture2surface" )
self.assertEqual( network.getShader( "tex2Surface" ).type, "osl:surface" )
self.assertEqual( network.getShader( "tex2Surface" ).parameters["in_color"].value, imath.Color3f( 1, 0, 0 ) )
self.assertEqual( network.getOutput(), ( "tex2Surface", "" ) )
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "97b30847bf998ae01f24b066442314bf",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 111,
"avg_line_length": 36.35616438356164,
"alnum_prop": 0.6989449886963075,
"repo_name": "GafferHQ/gaffer",
"id": "4e6985396d2f691b4e48ea1293c45221c5abc113",
"size": "4457",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "python/GafferAppleseedTest/AppleseedShaderAdaptorTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5790"
},
{
"name": "C",
"bytes": "61993"
},
{
"name": "C++",
"bytes": "9572701"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6208"
},
{
"name": "Python",
"bytes": "10280178"
},
{
"name": "Ruby",
"bytes": "419"
},
{
"name": "Shell",
"bytes": "14580"
}
],
"symlink_target": ""
}
|
"""Python wrappers for CloudFormation intrinsic functions
These are all available without preamble in a pyplate's global namespace.
These help make the pyplate look a little more like python than JSON, and can
be ignored if you want to write the raw JSON directly. (But you don't want
that, right? After all, that's why you're using pyplates.)
Notes:
* All "Return" values are a little midleading, in that what really gets
returned is the JSON-ified CFN intrinsic function. When using these in
pyplates, though, it's probably easiest to forget that and only be
concerned with how CFN will handle this, hence the Return values.
* To avoid the issue of using the wrong types with functions that take
sequences as input (join, select), argument unpacking is used. Therefore,
pass the sequence elements one at a time, rather than the sequence itself,
after passing the separator (for join) or index (for select).
* Using CloudFormation's Join function versus a pythonic 'string'.join
allows you to use CFN's intrinsic functions inside a join statement. The
pythonic string join method may literally interpret a call to an intrinsic
function, causing the resulting JSON to be interpreted as a string and
ignored by the CloudFormation template parser
* Functions related to conditions tend to overlap with python keywords, so they
are prefixed with ``c_`` to differentiate them (so, Fn::And become ``c_and``)
.. note:
Documentation for the functions is verbatim from the AWS Docs,
except where identifiers are changed to fit with normal python style.
Links:
* `Intrinsic Functions <cfn-functions_>`_
* `Intrinsic Condition Functions <cfn-functions-conditions_>`_
"""
# Where needed, exception error messages are stored on the intrinsic function
# wrappers to make testing the function failure cases very easy
from __future__ import absolute_import
from nova.core.cfn_pyplates.exceptions import IntrinsicFuncInputError
__all__ = [
'base64',
'find_in_map',
'get_att',
'get_azs',
'join',
'select',
'ref',
'c_ref',
'c_and',
'c_or',
'c_not',
'c_if',
'c_equals',
]
def base64(value):
"""The intrinsic function Fn::Base64 returns the Base64 representation of \
the input string.
This function is typically used to pass encoded data to
Amazon EC2 instances by way of the UserData property.
Args:
value: The string value you want to convert to Base64
Returns: The original string, in Base64 representation
"""
return {'Fn::Base64': value}
def find_in_map(map_name, key, value):
"""The intrinsic function Fn::FindInMap returns the value of a key from a \
mapping declared in the Mappings section.
Args:
map_name: The logical name of the mapping declared in the Mappings
section that contains the key-value pair.
key: The name of the mapping key whose value you want.
value: The value for the named mapping key.
Returns: The map value.
.. note:
find_in_map is represented here "just in case".
Most likely, using the python equivalent in a pyplate will be
easier to both look at and maintain.
"""
return {'Fn::FindInMap': [map_name, key, value]}
def get_att(logical_name, attribute):
"""The intrinsic function Fn:GetAtt returns the value of an attribute from \
a resource in the template.
Args:
logical_name: The logical name of the resource that
contains the attribute you want.
attribute: The name of the resource-specific attribute
whose value you want. See the resource's reference
page
[#cfn-resources] for details about the attributes available
for that resource type.
Returns: The attribute value.
"""
return {'Fn::GetAtt': [logical_name, attribute]}
def get_azs(region=''):
"""The intrinsic function Fn::GetAZs returns an array that lists all \
Availability Zones for the specified region.
Because customers have access to different Availability Zones, the
intrinsic function Fn::GetAZs enables template authors to write
templates that adapt to the calling user's access. This frees you from
having to hard-code a full list of Availability Zones for a specified
region.
Args:
region: The name of the region for which you want to get the
Availability Zones.
You can use the AWS::Region pseudo parameter to specify the region
in which the stack is created. Specifying an empty string is
equivalent to specifying AWS::Region.
Returns: The list of Availability Zones for the region.
"""
return {'Fn::GetAZs': region}
def join(sep, *args):
"""The intrinsic function Fn::Join appends a set of values into a single \
value, separated by the specified delimiter.
If a delimiter is the empty string, the set of values are
concatenated with no delimiter.
Args:
delimiter: The value you want to occur between fragments. The delimiter
will occur between fragments only.
It will not terminate the final value.
*args: Any number of values you want combined, passed as
positional arguments
Returns: The combined string.
"""
if len(args) < 2:
raise IntrinsicFuncInputError(join._errmsg_needinput)
return {'Fn::Join': [sep, list(args)]}
join._errmsg_needinput = 'Unable to join on one or less things!'
def select(index, *args):
"""The intrinsic function Fn::Select returns a single object from a list of objects by index.
.. note:
select is represented here "just in case".
Most likely, using the python equivalent in a pyplate will be
easier to both look at and maintain, but in the event that selects
need to take place after CFN has interpolated all the intrinsic
functions, it may still be useful.
.. warning:: Important
Fn::Select does not check for null values or if the index is out of
bounds of the array. Both conditions will result in a stack error,
so you should be certain that the index you choose is valid, and that
the list contains non-null values.
Args:
index: The index of the object to retrieve. This must be a value
from zero to N-1, where N represents the number of
elements in the array.
*args: Any number of objects to select from, passed as
positional arguments. None of the arguments can be ``None``
Returns: The selected object.
"""
try:
index = int(index)
except ValueError:
raise IntrinsicFuncInputError(select._errmsg_int)
if not args:
raise IntrinsicFuncInputError(select._errmsg_empty)
if [x for x in args if x is None]:
raise IntrinsicFuncInputError(select._errmsg_null)
try:
args[index]
except IndexError:
raise IntrinsicFuncInputError(select._errmsg_index)
return {'Fn::Select': [index, list(args)]}
select._errmsg_int = 'Index must be a number!'
select._errmsg_empty = 'Unable to select from an empty list!'
select._errmsg_null = 'List of selections include null values!'
select._errmsg_index = 'Provided index is invalid!'
def ref(logical_name):
"""The intrinsic function Ref returns the value of the specified parameter or resource.
When you are declaring a resource in a template and you need to
specify another template resource by name, you can use the Ref to refer
to that other resource. In general, Ref returns the name of the
resource. For example, a reference to an
AWS::AutoScaling::AutoScalingGroup returns the name of that Auto
Scaling group resource.
For some resources, an identifier is returned that has another
significant meaning in the context of the resource. An AWS::EC2::EIP
resource, for instance, returns the IP address, and an
AWS::EC2::Instance returns the instance ID.
Args:
logical_name: The logical name of the resource or parameter you want
to dereference.
Returns:
* When you specify a parameter's logical name, it returns the value
of the parameter.
* When you specify a resource's logical name, it returns a value
that you can typically use to refer to that resource.
.. note:: You can also use Ref to add values to Output messages.
"""
return {'Ref': logical_name}
def c_ref(condition_name):
"""The intrinsic function Condition used to reference a named condition
When you refer to a condition in another condition or associate the
condition with a resource, you use the Condition: key. For the Fn::If
function, you only need to specify the condition name.
Args:
condition_name: The name of the condition you want to reference.
Returns:
* A reference to the named condition
"""
return {'Condition': condition_name}
def _validate_logical_condition_counts(fn, conditions):
# c_and / c_or have these limits applied, this keeps it DRY
if len(conditions) < 2:
raise IntrinsicFuncInputError(fn._errmsg_min)
elif len(conditions) > 10:
raise IntrinsicFuncInputError(fn._errmsg_max)
def c_and(*conditions):
"""The intrinsic conditional function Fn::And
Returns true (for the pruposes of the cfn template) if all the specified conditions
evaluate to true, or returns false if any one of the conditions evaluates to false.
Fn::And acts as an AND operator.
The minimum number of conditions that you can include is 2, and the maximum is 10.
Args:
*conditions: The conditions to evaluate
"""
_validate_logical_condition_counts(c_and, conditions)
return {'Fn::And': list(conditions)}
c_and._errmsg_min = "Minimum umber of conditions for 'c_and' condition is 2"
c_and._errmsg_max = "Maximum umber of conditions for 'c_and' condition is 10"
def c_or(*conditions):
"""The intrinsic conditional function Fn::Or
Returns true (for the pruposes of the cfn template) if any one of the specified conditions
evaluate to true, or returns false if all of the conditions evaluates to false.
Fn::Or acts as an OR operator.
The minimum number of conditions that you can include is 2, and the maximum is 10.
Args:
*conditions: The conditions to evaluate
"""
_validate_logical_condition_counts(c_or, conditions)
return {'Fn::Or': list(conditions)}
c_or._errmsg_min = "Minimum umber of conditions for 'c_or' condition is 2"
c_or._errmsg_max = "Maximum umber of conditions for 'c_or' condition is 10"
def c_not(condition):
"""The intrinsic conditional function Fn::Not
Returns true for a condition that evaluates to false or
returns false for a condition that evaluates to true.
Fn::Not acts as a NOT operator.
"""
return {'Fn::Not': [condition]}
def c_equals(value_1, value_2):
"""The intrinsic conditional function Fn::Equals
Compares if two values are equal.
Returns true if the two values are equal or false if they aren't.
"""
return {'Fn::Equals': [value_1, value_2]}
def c_if(condition_name, value_if_true, value_if_false):
"""The intrinsic conditional function Fn::If representsa a ternary conditional operator
Returns one value if the specified condition evaluates to true and another value
if the specified condition evaluates to false.
Currently, AWS CloudFormation supports the Fn::If intrinsic function in the
metadata attribute, update policy attribute, and property values in the Resources
section and Outputs sections of a template.
You can use the AWS::NoValue pseudo parameter as a return value
to remove the corresponding property.
"""
return {'Fn::If': [condition_name, value_if_true, value_if_false]}
|
{
"content_hash": "a68790f4a1e1d47a610d1b454ac3d49e",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 97,
"avg_line_length": 33.81869688385269,
"alnum_prop": 0.6955101357011224,
"repo_name": "gilt/nova",
"id": "a68fb75eb59fa54b6bc28921b7d0ae9d403033a5",
"size": "12558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/core/cfn_pyplates/functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "31"
},
{
"name": "Python",
"bytes": "150512"
},
{
"name": "Shell",
"bytes": "36056"
}
],
"symlink_target": ""
}
|
from typing import Iterable, List, Optional
from ..celeryconf import app
from ..discount.models import Sale
from .models import Attribute, Product, ProductType, ProductVariant
from .utils.attributes import generate_name_for_variant
from .utils.variant_prices import (
update_product_minimal_variant_price,
update_products_minimal_variant_prices,
update_products_minimal_variant_prices_of_catalogues,
update_products_minimal_variant_prices_of_discount,
)
def _update_variants_names(instance: ProductVariant, saved_attributes: Iterable):
"""Product variant names are created from names of assigned attributes.
After change in attribute value name, for all product variants using this
attributes we need to update the names.
"""
initial_attributes = set(instance.variant_attributes.all())
attributes_changed = initial_attributes.intersection(saved_attributes)
if not attributes_changed:
return
variants_to_be_updated = ProductVariant.objects.filter(
product__in=instance.products.all(),
product__product_type__variant_attributes__in=attributes_changed,
)
variants_to_be_updated = variants_to_be_updated.prefetch_related(
"attributes__values__translations"
).all()
for variant in variants_to_be_updated:
variant.name = generate_name_for_variant(variant)
variant.save(update_fields=["name"])
@app.task
def update_variants_names(product_type_pk: List[int], saved_attributes_ids: List[int]):
instance = ProductType.objects.get(pk=product_type_pk)
saved_attributes = Attribute.objects.filter(pk__in=saved_attributes_ids)
_update_variants_names(instance, saved_attributes)
@app.task
def update_product_minimal_variant_price_task(product_pk: int):
product = Product.objects.get(pk=product_pk)
update_product_minimal_variant_price(product)
@app.task
def update_products_minimal_variant_prices_of_catalogues_task(
product_ids: Optional[List[int]] = None,
category_ids: Optional[List[int]] = None,
collection_ids: Optional[List[int]] = None,
):
update_products_minimal_variant_prices_of_catalogues(
product_ids, category_ids, collection_ids
)
@app.task
def update_products_minimal_variant_prices_of_discount_task(discount_pk: int):
discount = Sale.objects.get(pk=discount_pk)
update_products_minimal_variant_prices_of_discount(discount)
@app.task
def update_products_minimal_variant_prices_task(product_ids: List[int]):
products = Product.objects.filter(pk__in=product_ids)
update_products_minimal_variant_prices(products)
|
{
"content_hash": "0314b41891edb2049e960a82acc9e044",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 87,
"avg_line_length": 37.142857142857146,
"alnum_prop": 0.7415384615384616,
"repo_name": "maferelo/saleor",
"id": "29bdd6202687fddc5148b1c3f18396a048eab979",
"size": "2600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/product/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "64217"
},
{
"name": "HTML",
"bytes": "394723"
},
{
"name": "JavaScript",
"bytes": "61157"
},
{
"name": "Python",
"bytes": "585270"
}
],
"symlink_target": ""
}
|
"""
The citext PostgreSQL extension supports indexing of case-insensitive text
strings and thus eliminates the need for operations such as iexact and other
modifiers to enforce use of an index.
"""
from django.db import IntegrityError
from . import PostgreSQLTestCase
from .models import CITextTestModel
class CITextTestCase(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
CITextTestModel.objects.create(name='JoHn')
def test_equal_lowercase(self):
"""
citext removes the need for iexact as the index is case-insensitive.
"""
self.assertEqual(CITextTestModel.objects.filter(name='john').count(), 1)
def test_fail_case(self):
"""
Creating an entry for a citext-field which clashes with an existing
value isn't allowed.
"""
with self.assertRaises(IntegrityError):
CITextTestModel.objects.create(name='John')
|
{
"content_hash": "e8fa023b81886372ab3e0758eed3d60f",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 80,
"avg_line_length": 31,
"alnum_prop": 0.7010752688172043,
"repo_name": "guettli/django",
"id": "7f86f6a2056aa0046a1bc0aed9739bb57490acda",
"size": "930",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "tests/postgres_tests/test_citext.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53138"
},
{
"name": "HTML",
"bytes": "172977"
},
{
"name": "JavaScript",
"bytes": "448151"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12149968"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
import sys
sys.path.append("..")
import paddle
from op_test import OpTest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
create_test_class,
get_xpu_op_support_types,
XPUOpTestWrapper,
)
paddle.enable_static()
def huber_loss_forward(val, delta):
abs_val = abs(val)
if abs_val <= delta:
return 0.5 * val * val
else:
return delta * (abs_val - 0.5 * delta)
class XPUTestHuberLossOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'huber_loss'
self.use_dynamic_create_class = False
class TestHuberLossOp(XPUOpTest):
def setUp(self):
self.set_xpu()
self.op_type = 'huber_loss'
self.place = paddle.XPUPlace(0)
self.init_dtype()
self.set_inputs()
self.set_attrs()
self.set_outputs()
def set_inputs(self):
shape = self.set_shape()
x = np.random.uniform(0, 1.0, shape).astype(self.dtype)
y = np.random.uniform(0, 1.0, shape).astype(self.dtype)
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(x),
'Y': OpTest.np_dtype_to_fluid_dtype(y),
}
def set_attrs(self):
self.attrs = {'delta': 0.5}
def set_outputs(self):
delta = self.attrs['delta']
shape = self.set_shape()
residual = self.inputs['Y'] - self.inputs['X']
loss = np.vectorize(huber_loss_forward)(residual, delta).astype(
self.dtype
)
self.outputs = {'Residual': residual, 'Out': loss.reshape(shape)}
def set_shape(self):
return (100, 1)
def set_xpu(self):
self.__class__.use_xpu = True
def init_dtype(self):
self.dtype = self.in_type
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad_normal(self):
self.check_grad_with_place(self.place, ['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
self.check_grad_with_place(
self.place, ['Y'], 'Out', no_grad_set=set("residual")
)
def test_check_grad_ingore_y(self):
self.check_grad_with_place(
self.place, ['X'], 'Out', no_grad_set=set('residual')
)
class TestHuberLossOp1(TestHuberLossOp):
def set_shape(self):
return 640
class TestHuberLossOp2(TestHuberLossOp):
def set_shape(self):
return (10, 10)
class TestHuberLossOp3(TestHuberLossOp):
def set_shape(self):
return (10, 10, 1)
support_types = get_xpu_op_support_types('huber_loss')
for stype in support_types:
create_test_class(globals(), XPUTestHuberLossOp, stype)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "25e5f8f2c1c042b89d1746ab785709f5",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 77,
"avg_line_length": 27.203703703703702,
"alnum_prop": 0.5547991831177672,
"repo_name": "luotao1/Paddle",
"id": "50d77fc1a3d5f0315bf2eedc13bc2f783bd64554",
"size": "3551",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/xpu/test_huber_loss_op_xpu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
}
|
"""
A fake XenAPI SDK.
"""
import base64
import pickle
import pprint
import random
import uuid
from xml.sax import saxutils
import zlib
from oslo.serialization import jsonutils
from oslo.utils import timeutils
from oslo.utils import units
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.xenapi.client import session as xenapi_session
_CLASSES = ['host', 'network', 'session', 'pool', 'SR', 'VBD',
'PBD', 'VDI', 'VIF', 'PIF', 'VM', 'VLAN', 'task']
_db_content = {}
LOG = logging.getLogger(__name__)
def log_db_contents(msg=None):
text = msg or ""
content = pprint.pformat(_db_content)
LOG.debug("%(text)s: _db_content => %(content)s",
{'text': text, 'content': content})
def reset():
for c in _CLASSES:
_db_content[c] = {}
host = create_host('fake')
create_vm('fake dom 0',
'Running',
is_a_template=False,
is_control_domain=True,
resident_on=host)
def reset_table(table):
if table not in _CLASSES:
return
_db_content[table] = {}
def _create_pool(name_label):
return _create_object('pool',
{'name_label': name_label})
def create_host(name_label, hostname='fake_name', address='fake_addr'):
host_ref = _create_object('host',
{'name_label': name_label,
'hostname': hostname,
'address': address})
host_default_sr_ref = _create_local_srs(host_ref)
_create_local_pif(host_ref)
# Create a pool if we don't have one already
if len(_db_content['pool']) == 0:
pool_ref = _create_pool('')
_db_content['pool'][pool_ref]['master'] = host_ref
_db_content['pool'][pool_ref]['default-SR'] = host_default_sr_ref
_db_content['pool'][pool_ref]['suspend-image-SR'] = host_default_sr_ref
def create_network(name_label, bridge):
return _create_object('network',
{'name_label': name_label,
'bridge': bridge})
def create_vm(name_label, status, **kwargs):
if status == 'Running':
domid = random.randrange(1, 1 << 16)
resident_on = _db_content['host'].keys()[0]
else:
domid = -1
resident_on = ''
vm_rec = kwargs.copy()
vm_rec.update({'name_label': name_label,
'domid': domid,
'power_state': status,
'blocked_operations': {},
'resident_on': resident_on})
vm_ref = _create_object('VM', vm_rec)
after_VM_create(vm_ref, vm_rec)
return vm_ref
def destroy_vm(vm_ref):
vm_rec = _db_content['VM'][vm_ref]
vbd_refs = vm_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VM'][vm_ref]
def destroy_vbd(vbd_ref):
vbd_rec = _db_content['VBD'][vbd_ref]
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].remove(vbd_ref)
vdi_ref = vbd_rec['VDI']
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].remove(vbd_ref)
del _db_content['VBD'][vbd_ref]
def destroy_vdi(vdi_ref):
vdi_rec = _db_content['VDI'][vdi_ref]
vbd_refs = vdi_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VDI'][vdi_ref]
def create_vdi(name_label, sr_ref, **kwargs):
vdi_rec = {
'SR': sr_ref,
'read_only': False,
'type': '',
'name_label': name_label,
'name_description': '',
'sharable': False,
'other_config': {},
'location': '',
'xenstore_data': {},
'sm_config': {'vhd-parent': None},
'physical_utilisation': '123',
'managed': True,
}
vdi_rec.update(kwargs)
vdi_ref = _create_object('VDI', vdi_rec)
after_VDI_create(vdi_ref, vdi_rec)
return vdi_ref
def after_VDI_create(vdi_ref, vdi_rec):
vdi_rec.setdefault('VBDs', [])
def create_vbd(vm_ref, vdi_ref, userdevice=0, other_config=None):
if other_config is None:
other_config = {}
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': str(userdevice),
'currently_attached': False,
'other_config': other_config}
vbd_ref = _create_object('VBD', vbd_rec)
after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
def after_VBD_create(vbd_ref, vbd_rec):
"""Create read-only fields and backref from VM and VDI to VBD when VBD
is created.
"""
vbd_rec['currently_attached'] = False
vbd_rec['device'] = ''
vbd_rec.setdefault('other_config', {})
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].append(vbd_ref)
vm_name_label = _db_content['VM'][vm_ref]['name_label']
vbd_rec['vm_name_label'] = vm_name_label
vdi_ref = vbd_rec['VDI']
if vdi_ref and vdi_ref != "OpaqueRef:NULL":
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].append(vbd_ref)
def after_VIF_create(vif_ref, vif_rec):
"""Create backref from VM to VIF when VIF is created.
"""
vm_ref = vif_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VIFs'].append(vif_ref)
def after_VM_create(vm_ref, vm_rec):
"""Create read-only fields in the VM record."""
vm_rec.setdefault('domid', -1)
vm_rec.setdefault('is_control_domain', False)
vm_rec.setdefault('is_a_template', False)
vm_rec.setdefault('memory_static_max', str(8 * units.Gi))
vm_rec.setdefault('memory_dynamic_max', str(8 * units.Gi))
vm_rec.setdefault('VCPUs_max', str(4))
vm_rec.setdefault('VBDs', [])
vm_rec.setdefault('VIFs', [])
vm_rec.setdefault('resident_on', '')
def create_pbd(host_ref, sr_ref, attached):
config = {'path': '/var/run/sr-mount/%s' % sr_ref}
return _create_object('PBD',
{'device_config': config,
'host': host_ref,
'SR': sr_ref,
'currently_attached': attached})
def create_task(name_label):
return _create_object('task',
{'name_label': name_label,
'status': 'pending'})
def _create_local_srs(host_ref):
"""Create an SR that looks like the one created on the local disk by
default by the XenServer installer. Also, fake the installation of
an ISO SR.
"""
create_sr(name_label='Local storage ISO',
type='iso',
other_config={'i18n-original-value-name_label':
'Local storage ISO',
'i18n-key': 'local-storage-iso'},
physical_size=80000,
physical_utilisation=40000,
virtual_allocation=80000,
host_ref=host_ref)
return create_sr(name_label='Local storage',
type='ext',
other_config={'i18n-original-value-name_label':
'Local storage',
'i18n-key': 'local-storage'},
physical_size=40000,
physical_utilisation=20000,
virtual_allocation=10000,
host_ref=host_ref)
def create_sr(**kwargs):
sr_ref = _create_object(
'SR',
{'name_label': kwargs.get('name_label'),
'type': kwargs.get('type'),
'content_type': kwargs.get('type', 'user'),
'shared': kwargs.get('shared', False),
'physical_size': kwargs.get('physical_size', str(1 << 30)),
'physical_utilisation': str(
kwargs.get('physical_utilisation', 0)),
'virtual_allocation': str(kwargs.get('virtual_allocation', 0)),
'other_config': kwargs.get('other_config', {}),
'VDIs': kwargs.get('VDIs', [])})
pbd_ref = create_pbd(kwargs.get('host_ref'), sr_ref, True)
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
return sr_ref
def _create_local_pif(host_ref):
pif_ref = _create_object('PIF',
{'name-label': 'Fake PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': -1,
'device': 'fake0',
'host_uuid': host_ref,
'network': '',
'IP': '10.1.1.1',
'IPv6': '',
'uuid': '',
'management': 'true'})
_db_content['PIF'][pif_ref]['uuid'] = pif_ref
return pif_ref
def _create_object(table, obj):
ref = str(uuid.uuid4())
obj['uuid'] = str(uuid.uuid4())
_db_content[table][ref] = obj
return ref
def _create_sr(table, obj):
sr_type = obj[6]
# Forces fake to support iscsi only
if sr_type != 'iscsi' and sr_type != 'nfs':
raise Failure(['SR_UNKNOWN_DRIVER', sr_type])
host_ref = _db_content['host'].keys()[0]
sr_ref = _create_object(table, obj[2])
if sr_type == 'iscsi':
vdi_ref = create_vdi('', sr_ref)
pbd_ref = create_pbd(host_ref, sr_ref, True)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
_db_content['PBD'][pbd_ref]['SR'] = sr_ref
return sr_ref
def _create_vlan(pif_ref, vlan_num, network_ref):
pif_rec = get_record('PIF', pif_ref)
vlan_pif_ref = _create_object('PIF',
{'name-label': 'Fake VLAN PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': vlan_num,
'device': pif_rec['device'],
'host_uuid': pif_rec['host_uuid']})
return _create_object('VLAN',
{'tagged-pif': pif_ref,
'untagged-pif': vlan_pif_ref,
'tag': vlan_num})
def get_all(table):
return _db_content[table].keys()
def get_all_records(table):
return _db_content[table]
def _query_matches(record, query):
# Simple support for the XenServer query language:
# 'field "host"="<uuid>" and field "SR"="<sr uuid>"'
# Tested through existing tests (e.g. calls to find_network_with_bridge)
and_clauses = query.split(" and ")
if len(and_clauses) > 1:
matches = True
for clause in and_clauses:
matches = matches and _query_matches(record, clause)
return matches
or_clauses = query.split(" or ")
if len(or_clauses) > 1:
matches = False
for clause in or_clauses:
matches = matches or _query_matches(record, clause)
return matches
if query[:4] == 'not ':
return not _query_matches(record, query[4:])
# Now it must be a single field - bad queries never match
if query[:5] != 'field':
return False
(field, value) = query[6:].split('=', 1)
# Some fields (e.g. name_label, memory_overhead) have double
# underscores in the DB, but only single underscores when querying
field = field.replace("__", "_").strip(" \"'")
value = value.strip(" \"'")
# Strings should be directly compared
if isinstance(record[field], str):
return record[field] == value
# But for all other value-checks, convert to a string first
# (Notably used for booleans - which can be lower or camel
# case and are interpreted/sanitised by XAPI)
return str(record[field]).lower() == value.lower()
def get_all_records_where(table_name, query):
matching_records = {}
table = _db_content[table_name]
for record in table:
if _query_matches(table[record], query):
matching_records[record] = table[record]
return matching_records
def get_record(table, ref):
if ref in _db_content[table]:
return _db_content[table].get(ref)
else:
raise Failure(['HANDLE_INVALID', table, ref])
def check_for_session_leaks():
if len(_db_content['session']) > 0:
raise exception.NovaException('Sessions have leaked: %s' %
_db_content['session'])
def as_value(s):
"""Helper function for simulating XenAPI plugin responses. It
escapes and wraps the given argument.
"""
return '<value>%s</value>' % saxutils.escape(s)
def as_json(*args, **kwargs):
"""Helper function for simulating XenAPI plugin responses for those
that are returning JSON. If this function is given plain arguments,
then these are rendered as a JSON list. If it's given keyword
arguments then these are rendered as a JSON dict.
"""
arg = args or kwargs
return jsonutils.dumps(arg)
class Failure(Exception):
def __init__(self, details):
self.details = details
def __str__(self):
try:
return str(self.details)
except Exception:
return "XenAPI Fake Failure: %s" % str(self.details)
def _details_map(self):
return dict([(str(i), self.details[i])
for i in range(len(self.details))])
class SessionBase(object):
"""Base class for Fake Sessions."""
def __init__(self, uri):
self._session = None
xenapi_session.apply_session_helpers(self)
def pool_get_default_SR(self, _1, pool_ref):
return _db_content['pool'].values()[0]['default-SR']
def VBD_insert(self, _1, vbd_ref, vdi_ref):
vbd_rec = get_record('VBD', vbd_ref)
get_record('VDI', vdi_ref)
vbd_rec['empty'] = False
vbd_rec['VDI'] = vdi_ref
def VBD_plug(self, _1, ref):
rec = get_record('VBD', ref)
if rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_ATTACHED', ref])
rec['currently_attached'] = True
rec['device'] = rec['userdevice']
def VBD_unplug(self, _1, ref):
rec = get_record('VBD', ref)
if not rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_DETACHED', ref])
rec['currently_attached'] = False
rec['device'] = ''
def VBD_add_to_other_config(self, _1, vbd_ref, key, value):
db_ref = _db_content['VBD'][vbd_ref]
if 'other_config' not in db_ref:
db_ref['other_config'] = {}
if key in db_ref['other_config']:
raise Failure(['MAP_DUPLICATE_KEY', 'VBD', 'other_config',
vbd_ref, key])
db_ref['other_config'][key] = value
def VBD_get_other_config(self, _1, vbd_ref):
db_ref = _db_content['VBD'][vbd_ref]
if 'other_config' not in db_ref:
return {}
return db_ref['other_config']
def PBD_create(self, _1, pbd_rec):
pbd_ref = _create_object('PBD', pbd_rec)
_db_content['PBD'][pbd_ref]['currently_attached'] = False
return pbd_ref
def PBD_plug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_ATTACHED', rec])
rec['currently_attached'] = True
sr_ref = rec['SR']
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
def PBD_unplug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if not rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_DETACHED', rec])
rec['currently_attached'] = False
sr_ref = rec['SR']
_db_content['SR'][sr_ref]['PBDs'].remove(pbd_ref)
def SR_introduce(self, _1, sr_uuid, label, desc, type, content_type,
shared, sm_config):
ref = None
rec = None
for ref, rec in _db_content['SR'].iteritems():
if rec.get('uuid') == sr_uuid:
# make forgotten = 0 and return ref
_db_content['SR'][ref]['forgotten'] = 0
return ref
# SR not found in db, so we create one
params = {'sr_uuid': sr_uuid,
'label': label,
'desc': desc,
'type': type,
'content_type': content_type,
'shared': shared,
'sm_config': sm_config}
sr_ref = _create_object('SR', params)
_db_content['SR'][sr_ref]['uuid'] = sr_uuid
_db_content['SR'][sr_ref]['forgotten'] = 0
vdi_per_lun = False
if type == 'iscsi':
# Just to be clear
vdi_per_lun = True
if vdi_per_lun:
# we need to create a vdi because this introduce
# is likely meant for a single vdi
vdi_ref = create_vdi('', sr_ref)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
return sr_ref
def SR_forget(self, _1, sr_ref):
_db_content['SR'][sr_ref]['forgotten'] = 1
def SR_scan(self, _1, sr_ref):
return
def VM_get_xenstore_data(self, _1, vm_ref):
return _db_content['VM'][vm_ref].get('xenstore_data', {})
def VM_remove_from_xenstore_data(self, _1, vm_ref, key):
db_ref = _db_content['VM'][vm_ref]
if 'xenstore_data' not in db_ref:
return
if key in db_ref['xenstore_data']:
del db_ref['xenstore_data'][key]
def VM_add_to_xenstore_data(self, _1, vm_ref, key, value):
db_ref = _db_content['VM'][vm_ref]
if 'xenstore_data' not in db_ref:
db_ref['xenstore_data'] = {}
db_ref['xenstore_data'][key] = value
def VM_pool_migrate(self, _1, vm_ref, host_ref, options):
pass
def VDI_remove_from_other_config(self, _1, vdi_ref, key):
db_ref = _db_content['VDI'][vdi_ref]
if 'other_config' not in db_ref:
return
if key in db_ref['other_config']:
del db_ref['other_config'][key]
def VDI_add_to_other_config(self, _1, vdi_ref, key, value):
db_ref = _db_content['VDI'][vdi_ref]
if 'other_config' not in db_ref:
db_ref['other_config'] = {}
if key in db_ref['other_config']:
raise Failure(['MAP_DUPLICATE_KEY', 'VDI', 'other_config',
vdi_ref, key])
db_ref['other_config'][key] = value
def VDI_copy(self, _1, vdi_to_copy_ref, sr_ref):
db_ref = _db_content['VDI'][vdi_to_copy_ref]
name_label = db_ref['name_label']
read_only = db_ref['read_only']
sharable = db_ref['sharable']
other_config = db_ref['other_config'].copy()
return create_vdi(name_label, sr_ref, sharable=sharable,
read_only=read_only, other_config=other_config)
def VDI_clone(self, _1, vdi_to_clone_ref):
db_ref = _db_content['VDI'][vdi_to_clone_ref]
sr_ref = db_ref['SR']
return self.VDI_copy(_1, vdi_to_clone_ref, sr_ref)
def host_compute_free_memory(self, _1, ref):
# Always return 12GB available
return 12 * units.Gi
def _plugin_agent_version(self, method, args):
return as_json(returncode='0', message='1.0\\r\\n')
def _plugin_agent_key_init(self, method, args):
return as_json(returncode='D0', message='1')
def _plugin_agent_password(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_inject_file(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_resetnetwork(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_agentupdate(self, method, args):
url = args["url"]
md5 = args["md5sum"]
message = "success with %(url)s and hash:%(md5)s" % dict(url=url,
md5=md5)
return as_json(returncode='0', message=message)
def _plugin_noop(self, method, args):
return ''
def _plugin_pickle_noop(self, method, args):
return pickle.dumps(None)
def _plugin_migration_transfer_vhd(self, method, args):
kwargs = pickle.loads(args['params'])['kwargs']
vdi_ref = self.xenapi_request('VDI.get_by_uuid',
(kwargs['vdi_uuid'], ))
assert vdi_ref
return pickle.dumps(None)
_plugin_glance_upload_vhd = _plugin_pickle_noop
_plugin_kernel_copy_vdi = _plugin_noop
_plugin_kernel_create_kernel_ramdisk = _plugin_noop
_plugin_kernel_remove_kernel_ramdisk = _plugin_noop
_plugin_migration_move_vhds_into_sr = _plugin_noop
def _plugin_xenhost_host_data(self, method, args):
return jsonutils.dumps({'host_memory': {'total': 10,
'overhead': 20,
'free': 30,
'free-computed': 40},
'host_hostname': 'fake-xenhost',
'host_cpu_info': {'cpu_count': 50},
})
def _plugin_poweraction(self, method, args):
return jsonutils.dumps({"power_action": method[5:]})
_plugin_xenhost_host_reboot = _plugin_poweraction
_plugin_xenhost_host_startup = _plugin_poweraction
_plugin_xenhost_host_shutdown = _plugin_poweraction
def _plugin_xenhost_set_host_enabled(self, method, args):
enabled = 'enabled' if args.get('enabled') == 'true' else 'disabled'
return jsonutils.dumps({"status": enabled})
def _plugin_xenhost_host_uptime(self, method, args):
return jsonutils.dumps({"uptime": "fake uptime"})
def _plugin_xenhost_get_pci_device_details(self, method, args):
"""Simulate the ouput of three pci devices.
Both of those devices are available for pci passtrough but
only one will match with the pci whitelist used in the
method test_pci_passthrough_devices_*().
Return a single list.
"""
# Driver is not pciback
dev_bad1 = ["Slot:\t0000:86:10.0", "Class:\t0604", "Vendor:\t10b5",
"Device:\t8747", "Rev:\tba", "Driver:\tpcieport", "\n"]
# Driver is pciback but vendor and device are bad
dev_bad2 = ["Slot:\t0000:88:00.0", "Class:\t0300", "Vendor:\t0bad",
"Device:\tcafe", "SVendor:\t10de", "SDevice:\t100d",
"Rev:\ta1", "Driver:\tpciback", "\n"]
# Driver is pciback and vendor, device are used for matching
dev_good = ["Slot:\t0000:87:00.0", "Class:\t0300", "Vendor:\t10de",
"Device:\t11bf", "SVendor:\t10de", "SDevice:\t100d",
"Rev:\ta1", "Driver:\tpciback", "\n"]
lspci_output = "\n".join(dev_bad1 + dev_bad2 + dev_good)
return pickle.dumps(lspci_output)
def _plugin_xenhost_get_pci_type(self, method, args):
return pickle.dumps("type-PCI")
def _plugin_console_get_console_log(self, method, args):
dom_id = args["dom_id"]
if dom_id == 0:
raise Failure('Guest does not have a console')
return base64.b64encode(zlib.compress("dom_id: %s" % dom_id))
def _plugin_nova_plugin_version_get_version(self, method, args):
return pickle.dumps("1.2")
def _plugin_xenhost_query_gc(self, method, args):
return pickle.dumps("False")
def host_call_plugin(self, _1, _2, plugin, method, args):
func = getattr(self, '_plugin_%s_%s' % (plugin, method), None)
if not func:
raise Exception('No simulation in host_call_plugin for %s,%s' %
(plugin, method))
return func(method, args)
def VDI_get_virtual_size(self, *args):
return 1 * units.Gi
def VDI_resize_online(self, *args):
return 'derp'
VDI_resize = VDI_resize_online
def _VM_reboot(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
if db_ref['power_state'] != 'Running':
raise Failure(['VM_BAD_POWER_STATE',
'fake-opaque-ref', db_ref['power_state'].lower(), 'halted'])
db_ref['power_state'] = 'Running'
db_ref['domid'] = random.randrange(1, 1 << 16)
def VM_clean_reboot(self, session, vm_ref):
return self._VM_reboot(session, vm_ref)
def VM_hard_reboot(self, session, vm_ref):
return self._VM_reboot(session, vm_ref)
def VM_hard_shutdown(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Halted'
db_ref['domid'] = -1
VM_clean_shutdown = VM_hard_shutdown
def VM_suspend(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Suspended'
def VM_pause(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Paused'
def pool_eject(self, session, host_ref):
pass
def pool_join(self, session, hostname, username, password):
pass
def pool_set_name_label(self, session, pool_ref, name):
pass
def host_migrate_receive(self, session, destref, nwref, options):
return "fake_migrate_data"
def VM_assert_can_migrate(self, session, vmref, migrate_data, live,
vdi_map, vif_map, options):
pass
def VM_migrate_send(self, session, mref, migrate_data, live, vdi_map,
vif_map, options):
pass
def VM_remove_from_blocked_operations(self, session, vm_ref, key):
# operation is idempotent, XenServer doesn't care if the key exists
_db_content['VM'][vm_ref]['blocked_operations'].pop(key, None)
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
return None
elif methodname == 'logout' or methodname == 'session.logout':
self._logout()
return None
else:
full_params = (self._session,) + params
meth = getattr(self, methodname, None)
if meth is None:
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s') %
methodname)
return meth(*full_params)
def _login(self, method, params):
self._session = str(uuid.uuid4())
_session_info = {'uuid': str(uuid.uuid4()),
'this_host': _db_content['host'].keys()[0]}
_db_content['session'][self._session] = _session_info
def _logout(self):
s = self._session
self._session = None
if s not in _db_content['session']:
raise exception.NovaException(
"Logging out a session that is invalid or already logged "
"out: %s" % s)
del _db_content['session'][s]
def __getattr__(self, name):
if name == 'handle':
return self._session
elif name == 'xenapi':
return _Dispatcher(self.xenapi_request, None)
elif name.startswith('login') or name.startswith('slave_local'):
return lambda *params: self._login(name, params)
elif name.startswith('Async'):
return lambda *params: self._async(name, params)
elif '.' in name:
impl = getattr(self, name.replace('.', '_'))
if impl is not None:
def callit(*params):
LOG.debug('Calling %(name)s %(impl)s',
{'name': name, 'impl': impl})
self._check_session(params)
return impl(*params)
return callit
if self._is_gettersetter(name, True):
LOG.debug('Calling getter %s', name)
return lambda *params: self._getter(name, params)
elif self._is_gettersetter(name, False):
LOG.debug('Calling setter %s', name)
return lambda *params: self._setter(name, params)
elif self._is_create(name):
return lambda *params: self._create(name, params)
elif self._is_destroy(name):
return lambda *params: self._destroy(name, params)
elif name == 'XenAPI':
return FakeXenAPI()
else:
return None
def _is_gettersetter(self, name, getter):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
bits[1].startswith(getter and 'get_' or 'set_'))
def _is_create(self, name):
return self._is_method(name, 'create')
def _is_destroy(self, name):
return self._is_method(name, 'destroy')
def _is_method(self, name, meth):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
bits[1] == meth)
def _getter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
if func == 'get_all':
self._check_arg_count(params, 1)
return get_all(cls)
if func == 'get_all_records':
self._check_arg_count(params, 1)
return get_all_records(cls)
if func == 'get_all_records_where':
self._check_arg_count(params, 2)
return get_all_records_where(cls, params[1])
if func == 'get_record':
self._check_arg_count(params, 2)
return get_record(cls, params[1])
if func in ('get_by_name_label', 'get_by_uuid'):
self._check_arg_count(params, 2)
return_singleton = (func == 'get_by_uuid')
return self._get_by_field(
_db_content[cls], func[len('get_by_'):], params[1],
return_singleton=return_singleton)
if len(params) == 2:
field = func[len('get_'):]
ref = params[1]
if (ref in _db_content[cls]):
if (field in _db_content[cls][ref]):
return _db_content[cls][ref][field]
else:
raise Failure(['HANDLE_INVALID', cls, ref])
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments') % name)
def _setter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
if len(params) == 3:
field = func[len('set_'):]
ref = params[1]
val = params[2]
if (ref in _db_content[cls] and
field in _db_content[cls][ref]):
_db_content[cls][ref][field] = val
return
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
'xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments or the database '
'is missing that field' % name)
def _create(self, name, params):
self._check_session(params)
is_sr_create = name == 'SR.create'
is_vlan_create = name == 'VLAN.create'
# Storage Repositories have a different API
expected = is_sr_create and 10 or is_vlan_create and 4 or 2
self._check_arg_count(params, expected)
(cls, _) = name.split('.')
ref = (is_sr_create and
_create_sr(cls, params) or
is_vlan_create and
_create_vlan(params[1], params[2], params[3]) or
_create_object(cls, params[1]))
# Call hook to provide any fixups needed (ex. creating backrefs)
after_hook = 'after_%s_create' % cls
if after_hook in globals():
globals()[after_hook](ref, params[1])
obj = get_record(cls, ref)
# Add RO fields
if cls == 'VM':
obj['power_state'] = 'Halted'
return ref
def _destroy(self, name, params):
self._check_session(params)
self._check_arg_count(params, 2)
table = name.split('.')[0]
ref = params[1]
if ref not in _db_content[table]:
raise Failure(['HANDLE_INVALID', table, ref])
# Call destroy function (if exists)
destroy_func = globals().get('destroy_%s' % table.lower())
if destroy_func:
destroy_func(ref)
else:
del _db_content[table][ref]
def _async(self, name, params):
task_ref = create_task(name)
task = _db_content['task'][task_ref]
func = name[len('Async.'):]
try:
result = self.xenapi_request(func, params[1:])
if result:
result = as_value(result)
task['result'] = result
task['status'] = 'success'
except Failure as exc:
task['error_info'] = exc.details
task['status'] = 'failed'
task['finished'] = timeutils.utcnow()
return task_ref
def _check_session(self, params):
if (self._session is None or
self._session not in _db_content['session']):
raise Failure(['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session:
LOG.debug('Raising NotImplemented')
raise NotImplementedError('Call to XenAPI without using .xenapi')
def _check_arg_count(self, params, expected):
actual = len(params)
if actual != expected:
raise Failure(['MESSAGE_PARAMETER_COUNT_MISMATCH',
expected, actual])
def _get_by_field(self, recs, k, v, return_singleton):
result = []
for ref, rec in recs.iteritems():
if rec.get(k) == v:
result.append(ref)
if return_singleton:
try:
return result[0]
except IndexError:
raise Failure(['UUID_INVALID', v, result, recs, k])
return result
class FakeXenAPI(object):
def __init__(self):
self.Failure = Failure
# Based upon _Method from xmlrpclib.
class _Dispatcher:
def __init__(self, send, name):
self.__send = send
self.__name = name
def __repr__(self):
if self.__name:
return '<xenapi.fake._Dispatcher for %s>' % self.__name
else:
return '<xenapi.fake._Dispatcher>'
def __getattr__(self, name):
if self.__name is None:
return _Dispatcher(self.__send, name)
else:
return _Dispatcher(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
|
{
"content_hash": "09e23c7dabec25911dcf22ad8f36c476",
"timestamp": "",
"source": "github",
"line_count": 1021,
"max_line_length": 79,
"avg_line_length": 34.47404505386876,
"alnum_prop": 0.5418489686914029,
"repo_name": "sajeeshcs/nested_quota_latest",
"id": "b6787da7573284ab2ab89075ebd4ca4d97efd1b5",
"size": "37298",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "nova/virt/xenapi/fake.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15283878"
},
{
"name": "Shell",
"bytes": "18626"
}
],
"symlink_target": ""
}
|
import os
import sys
import pandas
import datetime
import bigtempo.core as core
import bigtempo.auditor as auditor
dt = datetime.datetime
cities = ['CITY_A', 'CITY_B']
engine = core.DatasourceEngine()
def _get_test_data_dir():
return os.path.abspath(os.path.join('tests', 'acceptance_tests_data'))
def _get_test_data_filename(reference, symbol=None):
symbol_part = '' if not symbol else '{%s}' % symbol
return '%s%s.csv' % (reference, symbol_part)
def _get_test_data_filepath(reference, symbol=None):
return os.path.join(_get_test_data_dir(), _get_test_data_filename(reference, symbol))
@engine.datasource('SAMPLE',
tags=['SAMPLE_IN', 'DAILY'],
frequency='B')
class Sample(object):
def evaluate(self, context, symbol, start=None, end=None):
return pandas.DataFrame.from_csv(_get_test_data_filepath('SAMPLE', symbol))
@engine.datasource('WEEKLY_SAMPLE',
dependencies=['SAMPLE'],
tags=['SAMPLE_IN', 'WEEKLY'],
frequency='W-FRI')
class Weekly(object):
def evaluate(self, context, symbol, start=None, end=None):
return context.dependencies('SAMPLE').resample('W-FRI', how=lambda x: x[-1])
@engine.for_each(engine.select('SAMPLE_IN'))
def _rolling_mean_factory(source_reference):
@engine.datasource('ROLLING_MEAN:%s' % source_reference,
dependencies=[source_reference],
lookback=7,
tags=['ROLLING_MEAN'])
class RollingMean(object):
def evaluate(self, context, symbol, start=None, end=None):
input_ds = context.dependencies(source_reference)
return pandas.rolling_mean(input_ds, 7)
@engine.datasource('MONTHLY_SAMPLE',
dependencies=['SAMPLE'],
tags=['SAMPLE_IN', 'MONTHLY'],
frequency='M')
class Monthly(object):
def evaluate(self, context, symbol, start=None, end=None):
return context.dependencies('SAMPLE').resample('M', how=lambda x: x[-1])
@engine.for_each(engine.select('SAMPLE_IN').union('ROLLING_MEAN'))
def _percentual_change_factory(source_reference):
@engine.datasource('PERCENTUALT_CHANGE:%s' % source_reference,
dependencies=[source_reference],
lookback=1,
tags=['PERCENTUALT_CHANGE'])
class PctChange(object):
def evaluate(self, context, symbol, start=None, end=None):
return context.dependencies(source_reference).pct_change()
for city in cities:
auditor.generate_multiple(engine,
engine.select().all().difference('SAMPLE'),
city,
None,
None,
_get_test_data_filepath,
sys.modules[__name__])
auditor.generate_multiple(engine,
engine.select().all().difference('SAMPLE'),
city,
dt(2001, 1, 1),
dt(2002, 1, 1),
_get_test_data_filepath,
sys.modules[__name__])
def test_all_test_methods_are_being_generated():
result = []
for city in cities:
result.extend(list(auditor.generate_multiple(engine,
engine.select().all().difference('SAMPLE'),
city,
None,
None,
_get_test_data_filepath)))
assert len(result) == 22
|
{
"content_hash": "dbe118ab77442df15910d5ca3179b8c1",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 96,
"avg_line_length": 34.89908256880734,
"alnum_prop": 0.5291798107255521,
"repo_name": "rhlobo/bigtempo",
"id": "cc4bce5a5ab16d005f55470db354b472cb4adfda",
"size": "3830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bigtempo/tests/acceptance_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "97880"
},
{
"name": "Shell",
"bytes": "4896"
}
],
"symlink_target": ""
}
|
import io
from glossarize.glossarize import Glossary, DctFromTxtFile
class TestDctFromTxtFile(object):
_txt = u"""
cat
a domestic feline
of questionable intent
turtle
a lovely little reptile
%robot:
your plastic pal
who's fun to be with
"""
def test_read(self):
dct = DctFromTxtFile(io.StringIO(self._txt))
assert dct['%robot:'] == "your plastic pal\nwho's fun to be with"
class TestGlossary(object):
_template = r'\1 means {defn}; \1 is always {defn}'
_translator = r'\1 ({defn})'
_glossary = {'no': 'NO!'}
_glossary_yaml = u"""
hajimemashite: Nice to meet you
neko: cat
nekobaasu: cat bus
"""
def test_str(self):
glossary = Glossary(glossary = self._glossary, template = self._template)
result = glossary.annotate('just say no!')
assert result == 'just say no means NO!; no is always NO!!'
def test_buried_str(self):
glossary = Glossary(glossary = self._glossary, template = self._template)
result = glossary.annotate('Arnold knows no nose')
assert result == 'Arnold knows no means NO!; no is always NO! nose'
def test_yml(self):
glossary_file = io.StringIO(self._glossary_yaml)
glossary = Glossary(glossary = glossary_file, template = self._translator)
result = glossary.annotate('Hajimemashite! Yoroshiko onegai nekobaasu!')
assert result == 'Hajimemashite (Nice to meet you)! Yoroshiko onegai nekobaasu (cat bus)!'
|
{
"content_hash": "14037d8d60587eb40e159bc790e2f315",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 99,
"avg_line_length": 35.41304347826087,
"alnum_prop": 0.5948434622467772,
"repo_name": "catherinedevlin/py-glossarize",
"id": "4d770447925856850a666699949385caa17dcca5",
"size": "1629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/test_glossary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11458"
}
],
"symlink_target": ""
}
|
import functools
import inspect
from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_utils import excutils
from nova.db import base
from nova import hooks
from nova.i18n import _, _LE
from nova.network import model as network_model
from nova import objects
LOG = logging.getLogger(__name__)
@hooks.add_hook('instance_network_info')
def update_instance_cache_with_nw_info(impl, context, instance,
nw_info=None, update_cells=True):
try:
if not isinstance(nw_info, network_model.NetworkInfo):
nw_info = None
if nw_info is None:
nw_info = impl._get_instance_nw_info(context, instance)
LOG.debug('Updating instance_info_cache with network_info: %s',
nw_info, instance=instance)
# NOTE(comstud): The save() method actually handles updating or
# creating the instance. We don't need to retrieve the object
# from the DB first.
ic = objects.InstanceInfoCache.new(context, instance.uuid)
ic.network_info = nw_info
ic.save(update_cells=update_cells)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed storing info cache'), instance=instance)
def refresh_cache(f):
"""Decorator to update the instance_info_cache
Requires context and instance as function args
"""
argspec = inspect.getargspec(f)
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
try:
# get the instance from arguments (or raise ValueError)
instance = kwargs.get('instance')
if not instance:
instance = args[argspec.args.index('instance') - 2]
except ValueError:
msg = _('instance is a required argument to use @refresh_cache')
raise Exception(msg)
with lockutils.lock('refresh_cache-%s' % instance.uuid):
# We need to call the wrapped function with the lock held to ensure
# that it can call _get_instance_nw_info safely.
res = f(self, context, *args, **kwargs)
update_instance_cache_with_nw_info(self, context, instance,
nw_info=res)
# return the original function's return value
return res
return wrapper
SENTINEL = object()
class NetworkAPI(base.Base):
"""Base Network API for doing networking operations.
New operations available on specific clients must be added here as well.
"""
def get_all(self, context):
"""Get all the networks for client."""
raise NotImplementedError()
def get(self, context, network_uuid):
"""Get specific network for client."""
raise NotImplementedError()
def create(self, context, **kwargs):
"""Create a network."""
raise NotImplementedError()
def delete(self, context, network_uuid):
"""Delete a specific network."""
raise NotImplementedError()
def disassociate(self, context, network_uuid):
"""Disassociate a network for client."""
raise NotImplementedError()
def get_fixed_ip(self, context, id):
"""Get fixed IP by id."""
raise NotImplementedError()
def get_fixed_ip_by_address(self, context, address):
"""Get fixed IP by address."""
raise NotImplementedError()
def get_floating_ip(self, context, id):
"""Get floating IP by id."""
raise NotImplementedError()
def get_floating_ip_pools(self, context):
"""Get floating IP pools."""
raise NotImplementedError()
def get_floating_ip_by_address(self, context, address):
"""Get floating IP by address."""
raise NotImplementedError()
def get_floating_ips_by_project(self, context):
"""Get floating IPs by project."""
raise NotImplementedError()
def get_instance_id_by_floating_address(self, context, address):
"""Get instance id by floating address."""
raise NotImplementedError()
def get_vifs_by_instance(self, context, instance):
"""Get vifs by instance.
:param context: nova.context.RequestContext
:param instance: nova.objects.instance.Instance
:returns: nova.objects.virtual_interface.VirtualInterfaceList; the
fields address, uuid and net_uuid should be set for each VIF object
in the returned list.
"""
raise NotImplementedError()
def get_vif_by_mac_address(self, context, mac_address):
"""Get vif mac address."""
raise NotImplementedError()
def allocate_floating_ip(self, context, pool=None):
"""Adds (allocate) floating IP to a project from a pool."""
raise NotImplementedError()
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Removes (deallocates) a floating IP with address from a project."""
raise NotImplementedError()
def disassociate_and_release_floating_ip(self, context, instance,
floating_ip):
"""Removes (deallocates) and deletes the floating IP."""
raise NotImplementedError()
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associates a floating IP with a fixed IP."""
raise NotImplementedError()
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociates a floating IP from fixed IP it is associated with."""
raise NotImplementedError()
def allocate_for_instance(self, context, instance, vpn,
requested_networks, macs=None,
security_groups=None,
dhcp_options=None, bind_host_id=None):
"""Allocates all network structures for an instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param vpn: A boolean, if True, indicate a vpn to access the instance.
:param requested_networks: A list of requested_networks,
Optional value containing network_id, fixed_ip, and port_id.
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
:param security_groups: None or security groups to allocate for
instance.
:param dhcp_options: None or a set of key/value pairs that should
determine the DHCP BOOTP response, eg. for PXE booting an instance
configured with the baremetal hypervisor. It is expected that these
are already formatted for the neutron v2 api.
See nova/virt/driver.py:dhcp_options_for_instance for an example.
:param bind_host_id: the host ID to attach to the ports being created.
:returns: network info as from get_instance_nw_info() below
"""
raise NotImplementedError()
def deallocate_for_instance(self, context, instance,
requested_networks=None):
"""Deallocates all network structures related to instance."""
raise NotImplementedError()
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None,
bind_host_id=None):
"""Allocate port for instance."""
raise NotImplementedError()
def deallocate_port_for_instance(self, context, instance, port_id):
"""Deallocate port for instance."""
raise NotImplementedError()
def list_ports(self, *args, **kwargs):
"""List ports."""
raise NotImplementedError()
def show_port(self, *args, **kwargs):
"""Show specific port."""
raise NotImplementedError()
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Adds a fixed IP to instance from specified network."""
raise NotImplementedError()
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Removes a fixed IP from instance from specified network."""
raise NotImplementedError()
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force adds another network to a project."""
raise NotImplementedError()
def associate(self, context, network_uuid, host=SENTINEL,
project=SENTINEL):
"""Associate or disassociate host or project to network."""
raise NotImplementedError()
def get_instance_nw_info(self, context, instance, **kwargs):
"""Returns all network info related to an instance."""
with lockutils.lock('refresh_cache-%s' % instance.uuid):
result = self._get_instance_nw_info(context, instance, **kwargs)
# NOTE(comstud): Don't update API cell with new info_cache every
# time we pull network info for an instance. The periodic healing
# of info_cache causes too many cells messages. Healing the API
# will happen separately.
update_cells = kwargs.get('update_cells', False)
update_instance_cache_with_nw_info(self, context, instance,
nw_info=result,
update_cells=update_cells)
return result
def _get_instance_nw_info(self, context, instance, **kwargs):
"""Template method, so a subclass can implement for neutron/network."""
raise NotImplementedError()
def create_pci_requests_for_sriov_ports(self, context,
pci_requests,
requested_networks):
"""Check requested networks for any SR-IOV port request.
Create a PCI request object for each SR-IOV port, and add it to the
pci_requests object that contains a list of PCI request object.
"""
raise NotImplementedError()
def validate_networks(self, context, requested_networks, num_instances):
"""validate the networks passed at the time of creating
the server.
Return the number of instances that can be successfully allocated
with the requested network configuration.
"""
raise NotImplementedError()
def get_dns_domains(self, context):
"""Returns a list of available dns domains.
These can be used to create DNS entries for floating IPs.
"""
raise NotImplementedError()
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
raise NotImplementedError()
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
raise NotImplementedError()
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
raise NotImplementedError()
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
raise NotImplementedError()
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
raise NotImplementedError()
def create_public_dns_domain(self, context, domain, project=None):
"""Create a public DNS domain with optional nova project."""
raise NotImplementedError()
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures on hosts related to
instance.
"""
raise NotImplementedError()
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
raise NotImplementedError()
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
raise NotImplementedError()
def setup_instance_network_on_host(self, context, instance, host):
"""Setup network for specified instance on host.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param host: The host which network should be setup for instance.
"""
raise NotImplementedError()
def cleanup_instance_network_on_host(self, context, instance, host):
"""Cleanup network for specified instance on host.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param host: The host which network should be cleanup for instance.
"""
raise NotImplementedError()
def update_instance_vnic_index(self, context, instance, vif, index):
"""Update instance vnic index.
When the 'VNIC index' extension is supported this method will update
the vnic index of the instance on the port. A instance may have more
than one vnic.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param vif: The VIF in question.
:param index: The index on the instance for the VIF.
"""
pass
|
{
"content_hash": "c66a598c0c1255bd3d2cacbb48217173",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 79,
"avg_line_length": 39.89173789173789,
"alnum_prop": 0.6279102985287816,
"repo_name": "hanlind/nova",
"id": "010cfcd63ee54d5220866a1379b5e2ce2fd74707",
"size": "14637",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "nova/network/base_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "18681206"
},
{
"name": "Shell",
"bytes": "32127"
},
{
"name": "Smarty",
"bytes": "306159"
}
],
"symlink_target": ""
}
|
"""Index prefixing tests."""
import json
from conftest import IndexFlusher
from helpers import assert_hits_len, get_json, record_url
from invenio_search import current_search
def test_index_creation(app, prefixed_search):
"""Sanity check for index creation."""
suffix = current_search.current_suffix
es_aliases = prefixed_search.indices.get_alias()
# Keys are the indices
assert set(es_aliases.keys()) == {
"test-invenio-records-rest-testrecord{}".format(suffix),
}
aliases = set()
for index_info in es_aliases.values():
aliases |= set(index_info.get("aliases", {}).keys())
assert aliases == {
"test-invenio-records-rest",
"test-invenio-records-rest-testrecord",
}
def test_api_views(app, prefixed_search, db, test_data, search_url, search_class):
"""Test REST API views behavior."""
suffix = current_search.current_suffix
with app.test_client() as client:
HEADERS = [
("Accept", "application/json"),
("Content-Type", "application/json"),
]
# Create record
res = client.post(search_url, data=json.dumps(test_data[0]), headers=HEADERS)
recid = get_json(res)["id"]
assert res.status_code == 201
# Flush and check indices
IndexFlusher(search_class).flush_and_wait()
result = prefixed_search.search(index="test-invenio-records-rest")
assert len(result["hits"]["hits"]) == 1
record_doc = result["hits"]["hits"][0]
assert record_doc["_index"] == "test-invenio-records-rest-testrecord" + suffix
# Fetch the record
assert client.get(record_url(recid)).status_code == 200
# Record shows up in search
res = client.get(search_url)
assert_hits_len(res, 1)
# Delete the record
res = client.delete(record_url(recid))
IndexFlusher(search_class).flush_and_wait()
result = prefixed_search.search(index="test-invenio-records-rest")
assert len(result["hits"]["hits"]) == 0
# Deleted record should return 410
assert client.get(record_url(recid)).status_code == 410
# Record doesn't show up in search
res = client.get(search_url)
assert_hits_len(res, 0)
|
{
"content_hash": "c0fb7f16b56eea62661b72732d249a64",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 86,
"avg_line_length": 34.39393939393939,
"alnum_prop": 0.626431718061674,
"repo_name": "inveniosoftware/invenio-records-rest",
"id": "9189ab8a290c92a1237443dc31bd9586ce45e8b2",
"size": "2505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_index_prefixing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "297117"
},
{
"name": "Shell",
"bytes": "778"
}
],
"symlink_target": ""
}
|
'''
Created on Mar 17, 2013
__author__ = "Elizabeth 'pidge' Flanagan"
__copyright__ = "Copyright 2012-2013, Intel Corp."
__credits__ = ["Elizabeth Flanagan"]
__license__ = "GPL"
__version__ = "2.0"
__maintainer__ = "Elizabeth Flanagan"
__email__ = "elizabeth.flanagan@intel.com"
'''
from buildbot.steps.shell import ShellCommand
from twisted.python import log
import os, datetime
from autobuilder.config import *
class CreateIntelBSPPackage(ShellCommand):
haltOnFailure = False
flunkOnFailure = True
name = "Creating BSP Packages"
def __init__(self, factory, argdict=None, **kwargs):
self.factory = factory
self.machine = ""
self.workerdir=os.path.join(os.path.join(YOCTO_ABBASE, "yocto-worker"))
for k, v in argdict.iteritems():
if type(v) is bool:
setattr(self, k, str(v))
else:
setattr(self, k, v)
self.description = "Creating BSP packages for " + self.machine + " tarball"
self.timeout = 100000
kwargs['timeout']=self.timeout
ShellCommand.__init__(self, **kwargs)
def start(self):
DEST=self.getProperty("DEST")
buildername=self.getProperty("buildername")
revision=self.getProperty("got_revision_poky")
snapshot = ""
if str(self.getProperty("custom_release_me")) == "True":
is_milestone = self.getProperty("custom_is_milestone")
if is_milestone == "True":
snapshot = "+snapshot"
poky_name = self.getProperty("custom_poky_name")
poky_number = self.getProperty("custom_poky_number")
yocto_number = self.getProperty("custom_yocto_number")
revision = self.machine+'-'+ yocto_number
self.basedir=os.path.join(self.workerdir, buildername)
bsptardir = revision
cmd = "mkdir -p " + bsptardir + "/meta-intel; cd " + bsptardir + "; rm -rf meta-intel;"
cmd = cmd + "git clone " + self.getProperty("repourl_meta-intel") + " meta-intel;"
cmd = cmd + "cd meta-intel; git checkout " + self.getProperty("got_revision_meta-intel")+";"
# issue with buildbot choking on a find exec. figure this out later.
cmd = cmd + "for x in `find . -name .git\*`; do rm -rf $x; done;"
cmd = cmd + "for x in `/bin/ls|egrep -v '(common|tlk|conf|README|MAINT|meta-" + self.machine.replace("-noemgd", "").replace("-lsb", "") + ")'`; do rm -rf $x; done;"
cmd = cmd + "mkdir -p ./meta-" + self.machine.replace("-noemgd", "").replace("-lsb", "") + "/binary;"
cmd = cmd + "cp -RL " + DEST + "/machines/" + self.machine + "/core-image-sato-" + self.machine + ".hddimage ./meta-" + self.machine.replace("-noemgd", "").replace("-lsb", "") + "/binary;"
cmd = cmd + "cp -RL " + DEST + "/machines/" + self.machine + "/core-image-minimal-" + self.machine + ".hddimage ./meta-" + self.machine.replace("-noemgd", "").replace("-lsb", "") + "/binary;"
cmd = cmd + "echo '' >> ./README.tlk;"
cmd = cmd + "echo 'The following text is autogenerated during the autobuilder build process.' >> ./README.tlk;"
cmd = cmd + "echo 'It is not a part of the repositories used to create this BSP package.' >> ./README.tlk;"
cmd = cmd + "echo '------------------------------------------------------------------------' >> ./README.tlk;"
cmd = cmd + "echo 'Please note that the provided images were built using the meta-tlk layer' >> ./README.tlk;"
cmd = cmd + "echo '(time limited kernel). Build instructions to build non-tlk images are' >> ./README.tlk;"
cmd = cmd + "echo 'provided in ./meta-" + self.machine.replace("-noemgd", "").replace("-lsb", "") + "/README' >> ./README.tlk;"
cmd = cmd + "echo '' >> ./README.tlk; cd ..;"
cmd = cmd + "cd ..; tar cjvf " + self.machine + ".tar.bz2 meta-intel;"
cmd = cmd + "mkdir -p " + DEST + "/machines/" + self.machine +";"
cmd = cmd + "cp -RL " + self.machine + ".tar.bz2 " + DEST + "/machines/" + self.machine +";"
self.command=cmd
ShellCommand.start(self)
|
{
"content_hash": "7db25247bb6290f9a6730c9f1cdae5e0",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 199,
"avg_line_length": 57,
"alnum_prop": 0.5784600389863548,
"repo_name": "denny820909/builder",
"id": "42b6d3e1b7b2ea22235501e3a85c6126bab90411",
"size": "4104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/autobuilder/buildsteps/CreateIntelBSPPackage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "68706"
},
{
"name": "CSS",
"bytes": "18630"
},
{
"name": "D",
"bytes": "532"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "HTML",
"bytes": "69377"
},
{
"name": "Makefile",
"bytes": "1220"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "21088388"
},
{
"name": "Shell",
"bytes": "2766"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
}
|
import nose
import angr
import subprocess
import logging
l = logging.getLogger('angr.tests.sscanf')
import os
test_location = str(os.path.dirname(os.path.realpath(__file__)))
def run_sscanf(threads):
test_bin = os.path.join(test_location, "../../binaries/tests/x86_64/sscanf_test")
b = angr.Project(test_bin)
pg = b.factory.path_group(immutable=False, threads=threads)
# find the end of main
expected_outputs = {
"0x worked\n", "+0x worked\n", "base +16 worked\n", "base 16 worked\n",
"-0x worked\n", "base -16 worked\n", "Nope x\n",
"base 8 worked\n", "base +8 worked\n", "base +10 worked\n", "base 10 worked\n",
"base -8 worked\n", "base -10 worked\n", "Nope u\n",
"No switch\n",
}
pg.explore(find=0x400939, num_find=len(expected_outputs))
nose.tools.assert_equal(len(pg.found), len(expected_outputs))
# check the outputs
pipe = subprocess.PIPE
for f in pg.found:
test_input = f.state.posix.dumps(0)
test_output = f.state.posix.dumps(1)
expected_outputs.remove(test_output)
# check the output works as expected
p = subprocess.Popen(test_bin, stdout=pipe, stderr=pipe, stdin=pipe)
ret = p.communicate(test_input)[0]
nose.tools.assert_equal(ret, test_output)
# check that all of the outputs were seen
nose.tools.assert_equal(len(expected_outputs), 0)
def test_sscanf():
yield run_sscanf, None
yield run_sscanf, 8
if __name__ == "__main__":
run_sscanf(4)
|
{
"content_hash": "6b198e155b3ef0883c4deb833fc5f39c",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 87,
"avg_line_length": 31.122448979591837,
"alnum_prop": 0.6360655737704918,
"repo_name": "haylesr/angr",
"id": "368d22592bd26d4e4f6a48731445a44229e883f7",
"size": "1525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sscanf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "824"
},
{
"name": "Makefile",
"bytes": "291"
},
{
"name": "Python",
"bytes": "799030"
}
],
"symlink_target": ""
}
|
"""Ce package contient les backens de jeu.
Ce fichier contient la classe BaseJeu dont doit être hérité chaque jeu.
Cette classe est détaillée plus bas.
"""
from abstraits.obase import BaseObj
class BaseJeu(BaseObj):
"""Classe définissant un jeu.
Ce jeu est indépendant du plateau. En effet, un plateau peut être
utilisé pour différents jeux, par exemple on trouve beaucoup de jeux
différents utilisant 52 cartes.
Le jeu tel que défini dans cette classe est l'ensemble des règles
du jeu indépendemment du plateau. Le plateau lui va contenir les
cases, les différents pions et d'autres détails. Les informations
ponctuelles sur le jeu (la position des joueurs, les tours de chacun)
va être défini dans la partie.
"""
nom = "" # nom du jeu
def __init__(self):
"""Constructeur d'un jeu.
Il ne doit pas être redéfini. Pour ajouter des attributs,
redéfinir plutôt la méthode init qui est appelée par le constructeur.
"""
BaseObj.__init__(self)
self.partie = None
self.plateau = None
self.nb_joueurs_min = 1
self.nb_joueurs_max = 1
self._construire()
def __getnewargs__(self):
return ()
def peut_commencer(self):
"""La partie peut-elle commencer ?"""
return True
def peut_jouer(self, personnage):
"""Le joueur peut-il jouer ?
Cette méthode retourne True si le joueur peut jouer ou False sinon.
En outre, si une explication doit être donnée sur le fait que
ce joueur ne eput pas jouer, ce doit être ici.
"""
return True
def jouer(self, personnage, msg):
"""Joue au jeu.
La variable msg contient le message entré par le joueur voulant jouer.
Pour rappel, le contexte de jeu interprète les messages commençant
par un slash (/) comme des options. Tous les autres sont des
ordres adressés au jeu pour jouer et sont transmis à cette méthode.
Chaque jeu peut avoir une différente syntaxe pour jouer
(par exemple, le solitaire demandra qu'on entre deux coordonnées
séparés par un espace, ce sera aussi le cas des échecs, mais ce
ne sera pas le cas d'un jeu de carte où il faudra choisir le numéro
de la carte que l'on souhaite jouer par exemple). Cela est à
l'initiative de cette méthode de comprendre le message et de
l'interpréter.
Chaque jeu doit donc redéfinir cette méthode.
"""
self.partie.en_cours = True
# Options
# Le nom des méthodes est opt_ suivi du raccourci. Ainsi, pour
# quitter le jeu, il faut entrer /q ce qui redirigera vers
# la méthode opt_q
def opt_q(self, personnage, message):
"""Quitte le jeu."""
personnage.contextes.retirer()
personnage << "Vous quittez la partie."
self.partie.retirer_joueur(personnage)
if personnage in self.partie.observateurs:
self.partie.observateurs.remove(personnage)
if len(self.partie.joueurs + self.partie.observateurs) == 0:
self.partie.detruire()
def opt_reset(self, personnage, message):
"""Force tout le monde à quitter le jeu."""
if personnage.est_immortel():
for joueur in self.partie.joueurs + self.partie.observateurs:
joueur.contextes.retirer()
joueur << "Vous quittez la partie."
self.partie.retirer_joueur(joueur)
if joueur in self.partie.observateurs:
self.partie.observateurs.remove(joueur)
self.partie.detruire()
self.partie = None
else:
personnage << "|err|Vous ne pouvez faire cela.|ff|"
def opt_v(self, personnage, message):
"""Affiche le plateau."""
personnage << self.partie.afficher(personnage)
def opt_o(self, personnage, message):
"""Bascule entre le mode joueur et le mode observateur."""
if self.partie.en_cours:
personnage << "|err|La partie a déjà commencé.|ff|"
return
if personnage in self.partie.joueurs:
self.partie.retirer_joueur(personnage)
self.partie.observateurs.append(personnage)
personnage << "Vous êtes à présent observateur " \
"du plateau."
return
if personnage in self.partie.observateurs:
self.partie.observateurs.remove(personnage)
if self.partie.ajouter_joueur(personnage):
personnage << "Vous n'êtes plus observateur et pouvez " \
"jouer à la partie."
else:
personnage << "Vous ne pouvez rejoindre les joueurs actuels " \
"de la partie."
self.partie.observateurs.append(personnage)
|
{
"content_hash": "e3362c2ded898fde537304385a5c57d4",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 79,
"avg_line_length": 36.03703703703704,
"alnum_prop": 0.6265159301130524,
"repo_name": "vlegoff/tsunami",
"id": "54a77c3ec3af9d0e21cfa3a952a6f596fc972056",
"size": "6488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/secondaires/jeux/jeux/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
import json
import os
from unittest import mock
import pytest
from airflow.models import Connection
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_AWS_KEY
from tests.test_utils.gcp_system_helpers import GoogleSystemTest, provide_gcp_context
ROLE_ANR = os.environ.get('GCP_AWS_ROLE_ANR', "arn:aws:iam::123456:role/role_arn")
AUDIENCE = os.environ.get('GCP_AWS_AUDIENCE', 'aws-federation.airflow.apache.org')
@pytest.mark.system("google.cloud")
@pytest.mark.credential_file(GCP_AWS_KEY)
class AwsBaseHookSystemTest(GoogleSystemTest):
@provide_gcp_context(GCP_AWS_KEY)
def test_run_example_gcp_vision_autogenerated_id_dag(self):
mock_connection = Connection(
conn_type="aws",
extra=json.dumps(
{
"role_arn": ROLE_ANR,
"assume_role_method": "assume_role_with_web_identity",
"assume_role_with_web_identity_federation": 'google',
"assume_role_with_web_identity_federation_audience": AUDIENCE,
}
),
)
with mock.patch.dict('os.environ', AIRFLOW_CONN_AWS_DEFAULT=mock_connection.get_uri()):
hook = AwsBaseHook(client_type='s3')
client = hook.get_conn()
response = client.list_buckets()
assert 'Buckets' in response
|
{
"content_hash": "4bef9868c95fe2d5a0eaa2b92416812f",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 95,
"avg_line_length": 37.6578947368421,
"alnum_prop": 0.6519916142557652,
"repo_name": "danielvdende/incubator-airflow",
"id": "cb9b674eee4f76af7dc361372dddb1f453c7b9ae",
"size": "2217",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/providers/amazon/aws/hooks/test_base_aws_system.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21824455"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495567"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
}
|
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = '''
Test log filter.
'''
ts = Test.MakeATSProcess("ts", enable_cache=False)
replay_file = "log-filter.replays.yaml"
server = Test.MakeVerifierServerProcess("server", replay_file)
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 2,
'proxy.config.diags.debug.tags': 'http',
'proxy.config.diags.debug.client_ip': '127.0.0.1',
})
ts.Disk.remap_config.AddLine(
'map / http://localhost:{}/'.format(server.Variables.http_port)
)
# Verify that the various aspects of the expected debug output for the
# transaction are logged.
ts.Streams.stderr = Testers.ContainsExpression(
r"\+ Incoming Request \+",
"Make sure the client request information is present.")
ts.Streams.stderr += Testers.ContainsExpression(
r"\+ Proxy's Request after hooks \+",
"Make sure the proxy request information is present.")
ts.Streams.stderr += Testers.ContainsExpression(
r"\+ Incoming O.S. Response \+",
"Make sure the server's response information is present.")
ts.Streams.stderr += Testers.ContainsExpression(
r"\+ Proxy's Response 2 \+",
"Make sure the proxy response information is present.")
tr = Test.AddTestRun()
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(ts)
tr.AddVerifierClientProcess("client-1", replay_file, http_ports=[ts.Variables.port], other_args="--keys test-1")
|
{
"content_hash": "a022f3c6b3dd41097e8f5fb05c973bc9",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 112,
"avg_line_length": 39,
"alnum_prop": 0.733058608058608,
"repo_name": "pbchou/trafficserver",
"id": "705ed4920cef6e1241b61899b017bf274d7d2a60",
"size": "2184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/gold_tests/logging/log-debug-client-ip.test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1478100"
},
{
"name": "C++",
"bytes": "16547456"
},
{
"name": "CMake",
"bytes": "13151"
},
{
"name": "Dockerfile",
"bytes": "6693"
},
{
"name": "Java",
"bytes": "9881"
},
{
"name": "Lua",
"bytes": "64412"
},
{
"name": "M4",
"bytes": "216500"
},
{
"name": "Makefile",
"bytes": "250518"
},
{
"name": "Objective-C",
"bytes": "12972"
},
{
"name": "Perl",
"bytes": "128436"
},
{
"name": "Python",
"bytes": "1509938"
},
{
"name": "SWIG",
"bytes": "25777"
},
{
"name": "Shell",
"bytes": "175893"
},
{
"name": "Starlark",
"bytes": "987"
},
{
"name": "Vim script",
"bytes": "192"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from .T_S_I_V_ import table_T_S_I_V_
class table_T_S_I_J_(table_T_S_I_V_):
pass
|
{
"content_hash": "0291543ecffd2ea4c724eda49ab19d1e",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 64,
"avg_line_length": 30.166666666666668,
"alnum_prop": 0.7071823204419889,
"repo_name": "Pal3love/otRebuilder",
"id": "ca538ba9a60ffae9d88099b10a2942f312e845da",
"size": "181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Package/otRebuilder/Dep/fontTools/ttLib/tables/T_S_I_J_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2756220"
}
],
"symlink_target": ""
}
|
"""IPython Module"""
from __future__ import (absolute_import, print_function,
division)
from ..persistence.models import * # pylint: disable=wildcard-import
from ..persistence import persistence_config, relational, content
def init(path=None, ipython=None):
"""Initiate noWorkflow extension.
Load D3, IPython magics, and connect to database
Keyword Arguments:
path -- database path (default=current directory)
ipython -- IPython object (default=None)
"""
import os
from .magics import register_magics
try:
from .hierarchymagic import load_ipython_extension as load_hierarchy
load_hierarchy(ipython)
except ImportError:
print("Warning: Sphinx is not installed. Dot "
"graphs won't work")
register_magics(ipython)
if path is None:
path = os.getcwd()
persistence_config.connect(path)
return u"ok"
|
{
"content_hash": "d83a929bbf50560b84d312ed41784826",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 114,
"avg_line_length": 27.885714285714286,
"alnum_prop": 0.6342213114754098,
"repo_name": "gems-uff/noworkflow",
"id": "d802e83cc9a5c6de1c15f1599b18a0d35ce4dda6",
"size": "1194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "capture/noworkflow/now/ipython/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "176047"
},
{
"name": "HTML",
"bytes": "238"
},
{
"name": "JavaScript",
"bytes": "787748"
},
{
"name": "Jupyter Notebook",
"bytes": "5241520"
},
{
"name": "Prolog",
"bytes": "18527"
},
{
"name": "Python",
"bytes": "656680"
},
{
"name": "TypeScript",
"bytes": "122003"
}
],
"symlink_target": ""
}
|
DEBUG = True
TEMPLATE_DEBUG = True
DATABASE_ENGINE = ''
DATABASE_NAME = ''
DATABASE_USER = ''
|
{
"content_hash": "712f0caa909231dc536cfa17e8bc3a19",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 21,
"avg_line_length": 13.714285714285714,
"alnum_prop": 0.6666666666666666,
"repo_name": "fgirault/smeuhsocial",
"id": "f9c7a4ee0153b2cc5e5a7dd78b0f48eaf5c3773f",
"size": "191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/django_openid/docs/fake_project/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "72383"
},
{
"name": "HTML",
"bytes": "224460"
},
{
"name": "JavaScript",
"bytes": "18844"
},
{
"name": "Makefile",
"bytes": "3102"
},
{
"name": "Python",
"bytes": "690632"
}
],
"symlink_target": ""
}
|
import deployDataCenter
import TestCaseExecuteEngine
from optparse import OptionParser
import os
if __name__ == "__main__":
parser = OptionParser() #TODO: deprecate and use the argparse module
parser.add_option("-c", "--config", action="store", default="./datacenterCfg", dest="config", help="the path where the json config file generated, by default is ./datacenterCfg")
parser.add_option("-d", "--directory", dest="testCaseFolder", help="the test case directory")
parser.add_option("-r", "--result", dest="result", help="test result log file")
parser.add_option("-t", "--client", dest="testcaselog", help="test case log file")
parser.add_option("-l", "--load", dest="load", action="store_true", help="only load config, do not deploy, it will only run testcase")
parser.add_option("-f", "--file", dest="module", help="run tests in the given file")
parser.add_option("-x", "--xml", dest="xmlrunner", help="use the xml runner to generate xml reports and path to store xml files")
(options, args) = parser.parse_args()
testResultLogFile = None
if options.result is not None:
testResultLogFile = options.result
testCaseLogFile = None
if options.testcaselog is not None:
testCaseLogFile = options.testcaselog
deploy = deployDataCenter.deployDataCenters(options.config)
if options.load:
deploy.loadCfg()
else:
deploy.deploy()
format = "text"
xmlDir = None
if options.xmlrunner is not None:
xmlDir = options.xmlrunner
format = "xml"
if options.testCaseFolder is None:
if options.module is None:
parser.print_usage()
exit(1)
else:
engine = TestCaseExecuteEngine.TestCaseExecuteEngine(deploy.testClient, testCaseLogFile, testResultLogFile, format, xmlDir)
engine.loadTestsFromFile(options.module)
engine.run()
else:
engine = TestCaseExecuteEngine.TestCaseExecuteEngine(deploy.testClient, testCaseLogFile, testResultLogFile, format, xmlDir)
engine.loadTestsFromDir(options.testCaseFolder)
engine.run()
|
{
"content_hash": "ca49dea42d878809073e1b47ec3418c2",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 182,
"avg_line_length": 43.97959183673469,
"alnum_prop": 0.6700696055684455,
"repo_name": "argv0/cloudstack",
"id": "1c82d9fcc2e7d9515f70eb527b9fe0b037d9e707",
"size": "2943",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/marvin/marvin/deployAndRun.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "52484672"
},
{
"name": "JavaScript",
"bytes": "1913801"
},
{
"name": "Perl",
"bytes": "824212"
},
{
"name": "Python",
"bytes": "2076246"
},
{
"name": "Ruby",
"bytes": "2166"
},
{
"name": "Shell",
"bytes": "445096"
}
],
"symlink_target": ""
}
|
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ExtensionsV1beta1Scale(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'ExtensionsV1beta1ScaleSpec',
'status': 'ExtensionsV1beta1ScaleStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""
ExtensionsV1beta1Scale - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""
Gets the api_version of this ExtensionsV1beta1Scale.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this ExtensionsV1beta1Scale.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this ExtensionsV1beta1Scale.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this ExtensionsV1beta1Scale.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this ExtensionsV1beta1Scale.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this ExtensionsV1beta1Scale.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this ExtensionsV1beta1Scale.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this ExtensionsV1beta1Scale.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this ExtensionsV1beta1Scale.
Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
:return: The metadata of this ExtensionsV1beta1Scale.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this ExtensionsV1beta1Scale.
Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
:param metadata: The metadata of this ExtensionsV1beta1Scale.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this ExtensionsV1beta1Scale.
defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
:return: The spec of this ExtensionsV1beta1Scale.
:rtype: ExtensionsV1beta1ScaleSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this ExtensionsV1beta1Scale.
defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
:param spec: The spec of this ExtensionsV1beta1Scale.
:type: ExtensionsV1beta1ScaleSpec
"""
self._spec = spec
@property
def status(self):
"""
Gets the status of this ExtensionsV1beta1Scale.
current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
:return: The status of this ExtensionsV1beta1Scale.
:rtype: ExtensionsV1beta1ScaleStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this ExtensionsV1beta1Scale.
current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
:param status: The status of this ExtensionsV1beta1Scale.
:type: ExtensionsV1beta1ScaleStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ExtensionsV1beta1Scale):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "98ede56a8b1b99dc6f63671b94bc0b67",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 281,
"avg_line_length": 32.686440677966104,
"alnum_prop": 0.6121337827326938,
"repo_name": "mbohlool/client-python",
"id": "4f888655d3bedf91dc6a20e24f2d300653addae3",
"size": "7731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/extensions_v1beta1_scale.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8417639"
},
{
"name": "Shell",
"bytes": "16830"
}
],
"symlink_target": ""
}
|
from bottle import run, route, post, request, default_app
import auth
import chats
from urls import *
users = auth.Users()
users.add('adm', 'asm')
cht = chats.Chats()
#TODO: читать чат может только один из его пользователей авторизованный
# хранить пользователей, чаты
br = '<br>'
br = '\n'
@route(urlHist, method='POST')
def home():
chat = request.forms.get('chat')
return getHist(chat)
@route(urlPost, method='POST')
def chatPost():
msg = request.forms.get('msg')
user = request.forms.get('name')
chat = request.forms.get('chat')
r = addMsg(msg, user, chat)
return r
@route('/chatclear') # TODO
def chatClear():
with open(chatFile, 'wt') as f:
pass
return ''
@route(urlUserAdd, method='POST')
def webAddUser():
name = request.forms.get('name')
pswd = request.forms.get('pswd')
res = addUser(name, pswd)
return res
@route(urlAuth, method='POST')
def webAddUser():
name = request.forms.get('name')
pswd = request.forms.get('pswd')
res = authUser(name, pswd)
return res
@route(urlChatAdd, method='POST')
def webAddChat():
name = request.forms.get('name')
users = request.forms.get('users')
users = users.split()
res = addChat(name, users)
return res
@route(urlGetUsers, method='POST')
def webGetUsers():
usrs = users.getusers()
print(usrs)
return str(usrs)
@route(urlGetChats, method='POST')
def webGetUsers():
chats = cht.getchats()
print(chats)
return str(chats)
def addMsg(msg, userId, chatId):
if users.authed(userId):
cht.post(chatId, userId, msg)
return 'ok add msg'
else:
return 'no auth'
def getHist(chat):
h = cht.hist(chat)
return h
def addUser(name, pswd):
r = users.add(name, pswd)
return 'User added' if r else 'User exist'
def authUser(name, pswd):
r = users.auth(name, pswd)
print(name, pswd, r)
return r
def addChat(name, users):
r = cht.add(name, users)
return 'OK' if r else 'not'
if __name__=='__main__':
run(host='localhost', port=7000, reloader=True)
pass
applicationChat = default_app()
|
{
"content_hash": "407ec96e1ae89e2ba0fcefdd7ddbe5bf",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 71,
"avg_line_length": 22.15625,
"alnum_prop": 0.6370474847202633,
"repo_name": "anokata/pythonPetProjects",
"id": "eeb12ea62e9fe91f2efe28b7a8b27a5b498bd796",
"size": "2207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webChat/chat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6467"
},
{
"name": "HTML",
"bytes": "56632"
},
{
"name": "JavaScript",
"bytes": "603"
},
{
"name": "Makefile",
"bytes": "889"
},
{
"name": "Python",
"bytes": "840906"
},
{
"name": "Shell",
"bytes": "2407"
},
{
"name": "TSQL",
"bytes": "1299"
}
],
"symlink_target": ""
}
|
from bookmarksdb import *
from utility import *
log = None
gnosis_db = BookmarksDb()
def print_instructions():
"""
Print possible command-line arguments
"""
print("-a, --search-all")
print("-c, --clean: remove stale bookmarks")
print("-g, --get \"file\": return bookmarks for file")
print("-i, --import: import bookmarks")
print("-s, --stats: DB stats")
def db_stats():
"""
"""
print("DB stats:\n")
print("ePub files:")
print("PDF files:")
print("Bookmarks:")
def bookmarks_for_file(file_path):
"""
:param file_path:
"""
if os.path.isfile(file_path):
if file_path.endswith('.epub'):
bookmarks_for_epub()
elif file_path.endswith('.pdf'):
bookmarks_for_pdf(file_path)
def bookmarks_for_epub():
"""Retrieves last opened file (current) from calibre viewer preferences and
retrieves bookmarks
:return: all bookmarks for one ePub to Alfred
"""
epub_file = epub_file_from_calibre()
bookmarks = gnosis_db.bookmarks_for_file(epub_file)
if bookmarks is not None:
return_epub_bookmarks(epub_file, bookmarks)
else:
import_files(epub_file)
bookmarks = gnosis_db.bookmarks_for_file(epub_file)
if bookmarks:
return_epub_bookmarks(epub_file, bookmarks)
else:
return_no_bookmarks()
def bookmarks_for_epub_filtered(search_terms):
"""Filters bookmarks for last open ePub in calibre by search terms
:param search_terms: from Alfred script filter
:return: filtered bookmarks
"""
file_path = epub_file_from_calibre()
bookmarks = gnosis_db.bookmarks_for_file_filtered(file_path,
search_terms)
if bookmarks is not None:
for bookmark in bookmarks:
section = bookmark[2] if bookmark[2] else ''
wf.add_item(uid=bookmark[0],
title=bookmark[1],
subtitle=section,
icon=file_path,
icontype='fileicon',
quicklookurl=file_path,
arg='"%s"' % bookmark[1],
valid=True)
wf.send_feedback()
return 0
def bookmarks_for_pdf(file_path):
"""Retrieves bookmarks for PDF, imports first if needed, then calls
return_pdf_bookmarks or return_no_bookmarks to return Alfred results
:param file_path: path to PDF
"""
bookmarks = gnosis_db.bookmarks_for_file(file_path)
if bookmarks is not None:
return_pdf_bookmarks(file_path, bookmarks)
else:
import_files(file_path)
bookmarks = gnosis_db.bookmarks_for_file(file_path)
if bookmarks:
return_pdf_bookmarks(file_path, bookmarks)
else:
return_no_bookmarks()
def bookmarks_for_pdf_filtered(file_path, search_terms):
"""Filter bookmark results for open PDF
:param file_path: path to PDF
:param search_terms: Alfred filter string
:return: relevant bookmarks
"""
filtered_bookmarks = gnosis_db.bookmarks_for_file_filtered(file_path,
search_terms)
if not filtered_bookmarks:
wf.send_feedback()
return 0
for bookmark in filtered_bookmarks:
if bookmark[2]:
subtitle_suffix = ''.join(['. ', bookmark[2]])
else:
subtitle_suffix = ''
wf.add_item(uid=bookmark[0],
title=bookmark[1],
subtitle=''.join(['Page ',
bookmark[3],
subtitle_suffix]),
icon=file_path,
icontype='fileicon',
quicklookurl=file_path,
arg=bookmark[3],
valid=True)
wf.send_feedback()
return 0
def clean_bookmarks():
"""Remove file and bookmarks for any deleted files"""
print("Looking for orphaned files...\n")
all_files = gnosis_db.all_files()
number_of_files = 0
missing_files = list()
for the_file in all_files:
if not os.path.isfile(the_file.path):
folder, file_name = os.path.split(the_file.path)
file_path_icloud = os.path.join(folder, '.' + file_name +
'.icloud')
if not os.path.isfile(file_path_icloud.encode("utf-8")):
print('%s' % the_file.path)
missing_files.append(the_file.path)
number_of_files += 1
if number_of_files == 0:
print('Found 0 files.')
else:
if yes_or_no("\nRemove %s file(s)?" % number_of_files):
for the_file in missing_files:
gnosis_db.remove_file(the_file)
print("\rRemoved %s of %s files." % (number_of_files,
all_files.count()))
def edit_epub():
"""Get current ePub from calibre viewer for editing
:return: path to ePub
"""
epub_file = epub_file_from_calibre()
wf.add_item(title='Edit %s' % epub_file,
arg=epub_file,
valid=True)
wf.send_feedback()
return
def epub_file_from_calibre():
"""Reads calibre viewer settings json and returns last open file, assumes
currently open
:return: path to ePub
"""
import json
calibre_json_file = "~/Library/Preferences/calibre/viewer.json"
json_path = os.path.expanduser(calibre_json_file)
json_data = open(json_path)
data = json.load(json_data)
return data['viewer_open_history'][0]
def icon_for_file(cover_folder, result):
"""Set icon for search results, either book cover if exists, or file
icon
:param cover_folder:
:param result:
:return:
"""
cover_path = os.path.join(cover_folder, str(result['file_id']) + '.png')
if os.path.isfile(cover_path):
cover_icon = cover_path
icon_type = 'None'
else:
cover_icon = result['path']
icon_type = 'fileicon'
return cover_icon, icon_type
def import_files(path):
"""Import ePub and PDF bookmarks, file or recursively by folder path
:param path: full path to file or folder
"""
# if file
if os.path.isfile(path):
if path.endswith('.epub') or path.endswith('.pdf'):
bookmarks = gnosis_db.bookmarks_for_file(path)
if bookmarks is None:
gnosis_db.import_bookmarks_from_file(
path)
elif os.path.isdir(path):
# if folder, recursive
for root, __, files in os.walk(path):
for name in files:
if name.endswith('.epub') or name.endswith('.pdf'):
file_path = os.path.join(root, name)
bookmarks = gnosis_db.bookmarks_for_file(file_path)
if bookmarks is None or len(bookmarks) == 0:
print("Importing: %s" % file_path)
gnosis_db.import_bookmarks_from_file(
file_path)
def open_epub_bookmark(bookmark):
"""
:param bookmark:
"""
epub_file = epub_file_from_calibre()
command = "/Applications/calibre.app/Contents/MacOS/ebook-viewer"
parameter = "--open-at=toc:%s" % bookmark
from subprocess import call
call([command, parameter, "--continue", epub_file])
def open_epub_to_bookmark(epub_file, bookmark):
"""
:param epub_file:
:param bookmark:
"""
calibre_viewer = '/Applications/calibre.app/Contents/MacOS/ebook-viewer'
bookmark_parameter = ''.join([u' --open-at=toc:',
u'"',
bookmark.encode('utf-8'),
u'"'])
quoted_file = ''.join([u' "',
epub_file,
u'"'])
cmd = ''.join([calibre_viewer, bookmark_parameter, quoted_file])
from subprocess import call
call([cmd], shell=True)
def return_epub_bookmarks(file_path, bookmarks):
"""
:param file_path:
:param bookmarks:
:return:
"""
for bookmark in bookmarks:
section = bookmark[2] if bookmark[2] else ''
wf.add_item(uid=bookmark[0],
title=bookmark[1],
subtitle=section,
icon=file_path,
icontype='fileicon',
quicklookurl=file_path,
arg='"%s"' % bookmark[1],
valid=True)
wf.send_feedback()
return 0
def return_no_bookmarks():
"""
:return:
"""
wf.add_item(title="No bookmarks found.",
valid=True)
wf.send_feedback()
return 0
def return_pdf_bookmarks(file_path, bookmarks):
"""Create Alfred items from returned bookmarks for current PDF
:param file_path: path to file, used for results icon
:param bookmarks: bookmarks from DB
:return: Alfred items
"""
for bookmark in bookmarks:
subtitle_suffix = ''.join(['. ', bookmark[2]]) \
if bookmark[2] else ''
wf.add_item(uid=bookmark[0],
title=bookmark[1],
subtitle=''.join(['Page ', bookmark[3], subtitle_suffix]),
icon=file_path,
icontype='fileicon',
quicklookurl=file_path,
arg=bookmark[3],
valid=True)
wf.send_feedback()
return 0
def search_all(query, file_type=None):
"""Takes query string and searches all bookmarks
:param query: search terms from Alfred
:param file_type: optional file type `epub` or `pdf`
:return: FTS matches for bookmark title, section & file name
"""
last_query = wf.cached_data('last_query', max_age=0)
if last_query != query:
wf.cache_data('last_query', query)
results = gnosis_db.search_all(query, file_type)
add_item = wf.add_item
cover_folder = os.path.join(wf.datadir, 'covers')
if results is None:
return_no_bookmarks()
return 0
for result in results:
# if NULL title
if result['title'] is None:
continue
# append section to subtitle
title_suffix = ''.join([' | ', result['section']]) \
if result['section'] else ''
# file icon
cover_icon, icon_type = icon_for_file(cover_folder, result)
item = add_item(uid=str(result['id']),
title=''.join([result['title'], title_suffix]),
icon=cover_icon,
icontype=icon_type,
quicklookurl=result['path'],
valid=True)
item.add_modifier('cmd', subtitle=result['section'])
if result['path'].endswith('epub'):
item.arg = ''.join(
[result['path'], '/Page:"', result['title'], '"'])
item.subtitle = result['file_name']
elif result['path'].endswith('pdf'):
item.arg = ''.join([result['path'], '/Page:', result[
'destination']])
item.subtitle = ''.join(
['Page ', result['destination'], '. ', result[
'file_name']])
item.add_modifier('cmd', subtitle=result['section'])
wf.send_feedback()
return 0
def testing_new_stuff(file_path=None, query=None):
"""
For testing new things
:param file_path:
"""
from gnosis_pdf import GnosisPdf
def test_file(the_file):
gnosis_pdf = GnosisPdf(the_file)
new_bookmarks = gnosis_pdf.bookmarks()
old_bookmarks = gnosis_db.bookmarks_for_file(the_file)
old_bookmarks = [x[1:] for x in old_bookmarks]
if new_bookmarks == old_bookmarks:
print("Bookmarks the same.")
else:
print("not the same: %s" % the_file)
print("Old bookmarks %s: %s\r" % (
len(old_bookmarks), old_bookmarks))
print("New bookmarks %s: %s" % (
len(new_bookmarks), new_bookmarks))
file_path = file_path.replace('\\ ', ' ')
if os.path.isdir(file_path):
for the_file in os.listdir(file_path):
if the_file.endswith('.pdf') and not the_file.startswith('.'):
full_path = os.path.join(file_path, the_file)
test_file(full_path)
if os.path.isfile(file_path):
test_file(file_path)
def cover_for_file(db_file):
cover_folder = os.path.join(wf.datadir, 'covers')
cover_file_path = os.path.join(cover_folder, str(db_file.id) + '.png')
file_path = db_file.path
# Create path for covers in ~/Application Support/...
# TODO make folder in bookmarks_db
if not os.path.exists(cover_folder):
os.makedirs(cover_folder)
# Try to get cover if not exists
if not os.path.isfile(cover_file_path):
cover_file = ''
if file_path.endswith('.epub'):
cover_file = gnosis_db.cover_for_epub(db_file)
elif file_path.endswith('.pdf'):
cover_file = gnosis_db.cover_for_pdf(db_file)
if cover_file is None:
print('Generated thumbnail: %s' % db_file.path)
def update_files():
"""Update files, at the moment covers"""
print('Updating files...')
all_files = gnosis_db.all_files()
for db_file in all_files:
file_path = db_file.path
if os.path.isfile(file_path):
cover_for_file(db_file)
if db_file.hash is None:
print('Hashing: %s' % db_file.path)
gnosis_db.db_record_for_file(db_file.path)
else:
# delete db entry only if not in iCloud
file_path_icloud = os.path.join('.', db_file.path, '.icloud')
if os.path.isfile(file_path_icloud):
print('Removing: %s' % file_path)
gnosis_db.remove_file(file_path)
def main(wf):
# Parse arguments
args = wf.args
number_of_args = len(args)
if number_of_args == 0:
print_instructions()
elif number_of_args == 1:
command = args[0]
if command in ("-e", "--epub"):
bookmarks_for_epub()
elif command in ("-c", "--clean"):
clean_bookmarks()
elif command in "--edit-epub":
return edit_epub()
elif command in ("-s", "--stats"):
db_stats()
elif command in ("-t", "--test"):
testing_new_stuff()
elif command in "--update-files":
update_files()
elif number_of_args == 2:
command = args[0]
query = args[1]
query = query.replace('\\', '').strip()
if command in ("-a", "--search-all"):
search_all(query)
elif command in ("-g", "--get"):
bookmarks_for_file(query)
elif command in ("-e", "--epub"):
search_terms = query
bookmarks_for_epub_filtered(search_terms)
elif command in ("-b", "--epub-bookmark"):
bookmark = query
open_epub_bookmark(bookmark)
elif command in "--search-all-epub":
search_all(query, 'epub')
elif command in ("-p", "--search-all-pdf"):
search_all(query, 'pdf')
elif command in ("-i", "--import"):
import_files(query)
elif command in ("-t", "--test"):
testing_new_stuff(query)
elif number_of_args == 3:
command = args[0]
if command in "--epub-to-bookmark":
# open epub file to bookmark
open_epub_to_bookmark(args[1].encode('utf-8'), args[2].encode(
'utf-8'))
else:
query = args[1]
query = query.replace('\\', '')
search_terms = args[2]
bookmarks_for_pdf_filtered(query, search_terms)
return 0
if __name__ == '__main__':
wf = Workflow3()
sys.exit(wf.run(main))
|
{
"content_hash": "6ea52a2ff02ec23595de1099680e6a91",
"timestamp": "",
"source": "github",
"line_count": 543,
"max_line_length": 79,
"avg_line_length": 29.535911602209946,
"alnum_prop": 0.5405287442324479,
"repo_name": "gennaios/alfred-gnosis",
"id": "893603465a1cd7b93ec4bae66d95110bbb4bca4e",
"size": "16057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/gnosis.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "71130"
},
{
"name": "C++",
"bytes": "19568"
},
{
"name": "Python",
"bytes": "1575529"
},
{
"name": "XSLT",
"bytes": "152693"
}
],
"symlink_target": ""
}
|
"""Tests for certbot_nginx.parser."""
import glob
import os
import re
import shutil
import unittest
from certbot import errors
from certbot_nginx import nginxparser
from certbot_nginx import obj
from certbot_nginx import parser
from certbot_nginx.tests import util
class NginxParserTest(util.NginxTest):
"""Nginx Parser Test."""
def setUp(self):
super(NginxParserTest, self).setUp()
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.config_dir)
shutil.rmtree(self.work_dir)
def test_root_normalized(self):
path = os.path.join(self.temp_dir, "etc_nginx/////"
"ubuntu_nginx/../../etc_nginx")
nparser = parser.NginxParser(path, None)
self.assertEqual(nparser.root, self.config_path)
def test_root_absolute(self):
nparser = parser.NginxParser(os.path.relpath(self.config_path), None)
self.assertEqual(nparser.root, self.config_path)
def test_root_no_trailing_slash(self):
nparser = parser.NginxParser(self.config_path + os.path.sep, None)
self.assertEqual(nparser.root, self.config_path)
def test_load(self):
"""Test recursive conf file parsing.
"""
nparser = parser.NginxParser(self.config_path, self.ssl_options)
nparser.load()
self.assertEqual(set([nparser.abs_path(x) for x in
['foo.conf', 'nginx.conf', 'server.conf',
'sites-enabled/default',
'sites-enabled/example.com']]),
set(nparser.parsed.keys()))
self.assertEqual([['server_name', 'somename alias another.alias']],
nparser.parsed[nparser.abs_path('server.conf')])
self.assertEqual([[['server'], [['listen', '69.50.225.155:9000'],
['listen', '127.0.0.1'],
['server_name', '.example.com'],
['server_name', 'example.*']]]],
nparser.parsed[nparser.abs_path(
'sites-enabled/example.com')])
def test_abs_path(self):
nparser = parser.NginxParser(self.config_path, self.ssl_options)
self.assertEqual('/etc/nginx/*', nparser.abs_path('/etc/nginx/*'))
self.assertEqual(os.path.join(self.config_path, 'foo/bar/'),
nparser.abs_path('foo/bar/'))
def test_filedump(self):
nparser = parser.NginxParser(self.config_path, self.ssl_options)
nparser.filedump('test')
# pylint: disable=protected-access
parsed = nparser._parse_files(nparser.abs_path(
'sites-enabled/example.com.test'))
self.assertEqual(3, len(glob.glob(nparser.abs_path('*.test'))))
self.assertEqual(2, len(
glob.glob(nparser.abs_path('sites-enabled/*.test'))))
self.assertEqual([[['server'], [['listen', '69.50.225.155:9000'],
['listen', '127.0.0.1'],
['server_name', '.example.com'],
['server_name', 'example.*']]]],
parsed[0])
def test_get_vhosts(self):
nparser = parser.NginxParser(self.config_path, self.ssl_options)
vhosts = nparser.get_vhosts()
vhost1 = obj.VirtualHost(nparser.abs_path('nginx.conf'),
[obj.Addr('', '8080', False, False)],
False, True,
set(['localhost',
r'~^(www\.)?(example|bar)\.']),
[])
vhost2 = obj.VirtualHost(nparser.abs_path('nginx.conf'),
[obj.Addr('somename', '8080', False, False),
obj.Addr('', '8000', False, False)],
False, True,
set(['somename', 'another.alias', 'alias']),
[])
vhost3 = obj.VirtualHost(nparser.abs_path('sites-enabled/example.com'),
[obj.Addr('69.50.225.155', '9000',
False, False),
obj.Addr('127.0.0.1', '', False, False)],
False, True,
set(['.example.com', 'example.*']), [])
vhost4 = obj.VirtualHost(nparser.abs_path('sites-enabled/default'),
[obj.Addr('myhost', '', False, True)],
False, True, set(['www.example.org']), [])
vhost5 = obj.VirtualHost(nparser.abs_path('foo.conf'),
[obj.Addr('*', '80', True, True)],
True, True, set(['*.www.foo.com',
'*.www.example.com']), [])
self.assertEqual(5, len(vhosts))
example_com = [x for x in vhosts if 'example.com' in x.filep][0]
self.assertEqual(vhost3, example_com)
default = [x for x in vhosts if 'default' in x.filep][0]
self.assertEqual(vhost4, default)
fooconf = [x for x in vhosts if 'foo.conf' in x.filep][0]
self.assertEqual(vhost5, fooconf)
localhost = [x for x in vhosts if 'localhost' in x.names][0]
self.assertEquals(vhost1, localhost)
somename = [x for x in vhosts if 'somename' in x.names][0]
self.assertEquals(vhost2, somename)
def test_add_server_directives(self):
nparser = parser.NginxParser(self.config_path, self.ssl_options)
nparser.add_server_directives(nparser.abs_path('nginx.conf'),
set(['localhost',
r'~^(www\.)?(example|bar)\.']),
[['foo', 'bar'], ['ssl_certificate',
'/etc/ssl/cert.pem']],
replace=False)
ssl_re = re.compile(r'\n\s+ssl_certificate /etc/ssl/cert.pem')
dump = nginxparser.dumps(nparser.parsed[nparser.abs_path('nginx.conf')])
self.assertEqual(1, len(re.findall(ssl_re, dump)))
server_conf = nparser.abs_path('server.conf')
names = set(['alias', 'another.alias', 'somename'])
nparser.add_server_directives(server_conf, names,
[['foo', 'bar'], ['ssl_certificate',
'/etc/ssl/cert2.pem']],
replace=False)
nparser.add_server_directives(server_conf, names, [['foo', 'bar']],
replace=False)
self.assertEqual(nparser.parsed[server_conf],
[['server_name', 'somename alias another.alias'],
['foo', 'bar'],
['ssl_certificate', '/etc/ssl/cert2.pem']
])
def test_add_http_directives(self):
nparser = parser.NginxParser(self.config_path, self.ssl_options)
filep = nparser.abs_path('nginx.conf')
block = [['server'],
[['listen', '80'],
['server_name', 'localhost']]]
nparser.add_http_directives(filep, block)
root = nparser.parsed[filep]
self.assertTrue(util.contains_at_depth(root, ['http'], 1))
self.assertTrue(util.contains_at_depth(root, block, 2))
# Check that our server block got inserted first among all server
# blocks.
http_block = [x for x in root if x[0] == ['http']][0][1]
server_blocks = [x for x in http_block if x[0] == ['server']]
self.assertEqual(server_blocks[0], block)
def test_replace_server_directives(self):
nparser = parser.NginxParser(self.config_path, self.ssl_options)
target = set(['.example.com', 'example.*'])
filep = nparser.abs_path('sites-enabled/example.com')
nparser.add_server_directives(
filep, target, [['server_name', 'foobar.com']], replace=True)
self.assertEqual(
nparser.parsed[filep],
[[['server'], [['listen', '69.50.225.155:9000'],
['listen', '127.0.0.1'],
['server_name', 'foobar.com'],
['server_name', 'example.*'],
]]])
self.assertRaises(errors.MisconfigurationError,
nparser.add_server_directives,
filep, set(['foobar.com', 'example.*']),
[['ssl_certificate', 'cert.pem']],
replace=True)
def test_get_best_match(self):
target_name = 'www.eff.org'
names = [set(['www.eff.org', 'irrelevant.long.name.eff.org', '*.org']),
set(['eff.org', 'ww2.eff.org', 'test.www.eff.org']),
set(['*.eff.org', '.www.eff.org']),
set(['.eff.org', '*.org']),
set(['www.eff.', 'www.eff.*', '*.www.eff.org']),
set(['example.com', r'~^(www\.)?(eff.+)', '*.eff.*']),
set(['*', r'~^(www\.)?(eff.+)']),
set(['www.*', r'~^(www\.)?(eff.+)', '.test.eff.org']),
set(['*.org', r'*.eff.org', 'www.eff.*']),
set(['*.www.eff.org', 'www.*']),
set(['*.org']),
set([]),
set(['example.com'])]
winners = [('exact', 'www.eff.org'),
(None, None),
('exact', '.www.eff.org'),
('wildcard_start', '.eff.org'),
('wildcard_end', 'www.eff.*'),
('regex', r'~^(www\.)?(eff.+)'),
('wildcard_start', '*'),
('wildcard_end', 'www.*'),
('wildcard_start', '*.eff.org'),
('wildcard_end', 'www.*'),
('wildcard_start', '*.org'),
(None, None),
(None, None)]
for i, winner in enumerate(winners):
self.assertEqual(winner,
parser.get_best_match(target_name, names[i]))
def test_get_all_certs_keys(self):
nparser = parser.NginxParser(self.config_path, self.ssl_options)
filep = nparser.abs_path('sites-enabled/example.com')
nparser.add_server_directives(filep,
set(['.example.com', 'example.*']),
[['ssl_certificate', 'foo.pem'],
['ssl_certificate_key', 'bar.key'],
['listen', '443 ssl']],
replace=False)
c_k = nparser.get_all_certs_keys()
self.assertEqual(set([('foo.pem', 'bar.key', filep)]), c_k)
def test_parse_server_ssl(self):
server = parser.parse_server([
['listen', '443']
])
self.assertFalse(server['ssl'])
server = parser.parse_server([
['listen', '443 ssl']
])
self.assertTrue(server['ssl'])
server = parser.parse_server([
['listen', '443'], ['ssl', 'off']
])
self.assertFalse(server['ssl'])
server = parser.parse_server([
['listen', '443'], ['ssl', 'on']
])
self.assertTrue(server['ssl'])
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
{
"content_hash": "50561404fc27bd5780158fe82079b235",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 80,
"avg_line_length": 46.14624505928854,
"alnum_prop": 0.47349036402569594,
"repo_name": "dietsche/letsencrypt",
"id": "8ac995dfc4a3477f7f6ffeb3fae9a8a95e454240",
"size": "11675",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "certbot-nginx/certbot_nginx/tests/parser_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "48402"
},
{
"name": "Augeas",
"bytes": "5076"
},
{
"name": "Batchfile",
"bytes": "35005"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37245"
},
{
"name": "Nginx",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "1386757"
},
{
"name": "Shell",
"bytes": "123310"
}
],
"symlink_target": ""
}
|
import arcpy
import os
import sys
import traceback
import TestUtilities
def RunTest():
try:
arcpy.AddMessage("Starting Test: TestModelRadialLineOfSight")
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckOutExtension("Spatial")
else:
# Raise a custom exception
raise Exception("LicenseError")
# WORKAROUND
print("Creating New Scratch Workspace (Workaround)")
TestUtilities.createScratch()
# Verify the expected configuration exists
inputPointsFC = os.path.join(TestUtilities.inputGDB, "RlosObserverWebMerc")
inputSurface = os.path.join(TestUtilities.inputGDB, "Jbad_SRTM_USGS_EROS")
outputVizFC = os.path.join(TestUtilities.outputGDB, "RlosVizOutput")
toolbox = TestUtilities.toolbox
# Check For Valid Input
objects2Check = []
objects2Check.extend([inputPointsFC, inputSurface, toolbox])
for object2Check in objects2Check :
desc = arcpy.Describe(object2Check)
if desc == None :
raise Exception("Bad Input")
else :
print("Valid Object: " + desc.Name)
# Set environment settings
print("Running from: " + str(TestUtilities.currentPath))
print("Geodatabase path: " + str(TestUtilities.geodatabasePath))
arcpy.env.overwriteOutput = True
arcpy.env.scratchWorkspace = TestUtilities.scratchGDB
arcpy.ImportToolbox(toolbox, "VandR")
inputFeatureCount = int(arcpy.GetCount_management(inputPointsFC).getOutput(0))
print("Input FeatureClass: " + str(inputPointsFC))
print("Input Feature Count: " + str(inputFeatureCount))
if (inputFeatureCount < 1) :
print("Invalid Input Feature Count: " + str(inputFeatureCount))
forceVisibilityToInfinity = False
########################################################3
# Execute the Model under test:
arcpy.RadialLineOfSight_VandR(inputPointsFC, inputSurface, outputVizFC, forceVisibilityToInfinity)
########################################################3
# Verify the results
outputFeatureCountViz = int(arcpy.GetCount_management(outputVizFC).getOutput(0))
print("Output FeatureClass: " + str(outputVizFC))
print("Output Feature Count: " + str(outputFeatureCountViz))
if (outputFeatureCountViz < 1) :
print("Invalid Output Feature Count: " + str(outputFeatureCountViz))
raise Exception("Test Failed")
# WORKAROUND: delete scratch db
print("Deleting Scratch Workspace (Workaround)")
TestUtilities.deleteScratch()
print("Test Successful")
except arcpy.ExecuteError:
# Get the tool error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
# return a system error code
sys.exit(-1)
except Exception as e:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# return a system error code
sys.exit(-1)
finally:
# Check in the 3D Analyst extension
arcpy.CheckInExtension("Spatial")
RunTest()
|
{
"content_hash": "eb5d81a36cc983dffa7190ee52f17643",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 106,
"avg_line_length": 37.35576923076923,
"alnum_prop": 0.5837837837837838,
"repo_name": "JudTown17/solutions-geoprocessing-toolbox",
"id": "220acf5c28ebb8299da522248a0cf90421c3850f",
"size": "4859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visibility/test/test_viz_and_range/TestModelRadialLineOfSight.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "964652"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
class Autoencoder(object):
def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer = tf.train.AdamOptimizer()):
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer = transfer_function
network_weights = self._initialize_weights()
self.weights = network_weights
# model
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.hidden = self.transfer(tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1']))
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
# cost
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.get_variable("w1", shape=[self.n_input, self.n_hidden],
initializer=tf.contrib.layers.xavier_initializer())
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))
return all_weights
def partial_fit(self, X):
cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X})
return cost
def calc_total_cost(self, X):
return self.sess.run(self.cost, feed_dict = {self.x: X})
def transform(self, X):
return self.sess.run(self.hidden, feed_dict={self.x: X})
def generate(self, hidden = None):
if hidden is None:
hidden = self.sess.run(tf.random_normal([1, self.n_hidden]))
return self.sess.run(self.reconstruction, feed_dict={self.hidden: hidden})
def reconstruct(self, X):
return self.sess.run(self.reconstruction, feed_dict={self.x: X})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
|
{
"content_hash": "9b189ae0a42af6708be22955274c5299",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 114,
"avg_line_length": 38.186440677966104,
"alnum_prop": 0.6271637816245007,
"repo_name": "wangyang59/tf_models",
"id": "cde14aa4a993cb6997eeb99e8af31fb3d438cd28",
"size": "2253",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "autoencoder/autoencoder_models/Autoencoder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1353"
},
{
"name": "C++",
"bytes": "1216671"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "61098"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Protocol Buffer",
"bytes": "24249"
},
{
"name": "Python",
"bytes": "3406328"
},
{
"name": "Shell",
"bytes": "48362"
}
],
"symlink_target": ""
}
|
import re
from mcp21.package import MCPPackageBase
from window.outputpane import EVT_ROW_COL_CHANGED
class MCPPackage(MCPPackageBase):
def __init__(self, mcp):
MCPPackageBase.__init__(self, mcp)
self.package = 'dns-com-vmoo-client'
self.min = '1.0'
self.max = '1.0'
mcp.register(self, ['dns-com-vmoo-client-disconnect'])
def dispatch(self, msg):
if msg.message == 'dns-com-vmoo-client-disconnect': self.do_disconnect(msg)
def Initialize(self):
self.mcp.connection.output_pane.Bind(EVT_ROW_COL_CHANGED, self.send_screensize)
def do_disconnect(self, msg):
self.mcp.debug("Got a disconnect from dns-com-vmoo-client")
def mcp_negotiate_end(self):
self.send_info()
self.send_screensize()
def send_info(self):
# TODO - actual versioning please
self.mcp.server_notify(
'dns-com-vmoo-client-info', {
'name' : 'wxpymoo',
'text-version' : '0.2',
'internal-version' : '0.2',
}
)
def send_screensize(self, msg = None):
self.mcp.server_notify(
'dns-com-vmoo-client-screensize', {
'cols' : str(self.mcp.connection.output_pane.cols),
'rows' : str(self.mcp.connection.output_pane.rows),
}
)
|
{
"content_hash": "d89a06f8edec8414dd25d0e46533722a",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 87,
"avg_line_length": 31.454545454545453,
"alnum_prop": 0.5686416184971098,
"repo_name": "emersonrp/wxpymoo",
"id": "6ed770a260013af34aba3666df9d9920469e4234",
"size": "1384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mcp21/package/dns_com_vmoo_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "201427"
}
],
"symlink_target": ""
}
|
"""Core database logic for HaaS
This module defines a number of built-in database objects used by HaaS.
In addition, it provides some general infrastructure for dealing with the
database.
Extensions are permitted to create new database objects by subclassing from
`db.Model`.
"""
# from sqlalchemy import *
# from sqlalchemy.ext.declarative import declarative_base, declared_attr
# from sqlalchemy.orm import relationship, sessionmaker,backref
from flask.ext.sqlalchemy import SQLAlchemy
from subprocess import call, check_call, Popen, PIPE
from haas.flaskapp import app
from haas.config import cfg
from haas.dev_support import no_dry_run
import uuid
import xml.etree.ElementTree
db = SQLAlchemy(app)
# without setting this explicitly, we get a warning that this option
# will default to disabled in future versions (due to incurring a lot
# of overhed). We aren't using the relevant functionality, so let's
# just opt-in to the change now:
app.config.update(SQLALCHEMY_TRACK_MODIFICATIONS=False)
def init_db(uri=None):
"""Start up the DB connection.
`uri` is the uri to use for the databse. If it is None, the uri from the
config file will be used.
"""
if uri is None:
uri = cfg.get('database', 'uri')
app.config.update(SQLALCHEMY_DATABASE_URI=uri)
# A joining table for project's access to networks, which have a many to many
# relationship:
network_projects = db.Table(
'network_projects',
db.Column('project_id', db.ForeignKey('project.id')),
db.Column('network_id', db.ForeignKey('network.id')))
class Nic(db.Model):
"""a nic belonging to a Node"""
id = db.Column(db.Integer, primary_key=True)
label = db.Column(db.String, nullable=False)
# The Node to which the nic belongs:
owner_id = db.Column(db.ForeignKey('node.id'), nullable=False)
owner = db.relationship("Node", backref=db.backref('nics'))
# The mac address of the nic:
mac_addr = db.Column(db.String)
# The switch port to which the nic is attached:
port_id = db.Column(db.ForeignKey('port.id'))
port = db.relationship("Port",
backref=db.backref('nic', uselist=False))
def __init__(self, node, label, mac_addr):
self.owner = node
self.label = label
self.mac_addr = mac_addr
class Node(db.Model):
"""a (physical) machine"""
id = db.Column(db.Integer, primary_key=True)
label = db.Column(db.String, nullable=False)
# The project to which this node is allocated. If the project is null, the
# node is unallocated:
project_id = db.Column(db.ForeignKey('project.id'))
project = db.relationship("Project", backref=db.backref('nodes'))
# The Obm info is fetched from the obm class and its respective subclass
# pertaining to the node
obm_id = db.Column(db.Integer, db.ForeignKey('obm.id'), nullable=False)
obm = db.relationship("Obm",
uselist=False,
backref="node",
single_parent=True,
cascade='all, delete-orphan')
class Project(db.Model):
"""a collection of resources
A project may contain allocated nodes, networks, and headnodes.
"""
id = db.Column(db.Integer, primary_key=True)
label = db.Column(db.String, nullable=False)
def __init__(self, label):
"""Create a project with the given label."""
self.label = label
class Network(db.Model):
"""A link-layer network.
See docs/networks.md for more information on the parameters.
"""
id = db.Column(db.Integer, primary_key=True)
label = db.Column(db.String, nullable=False)
# The project to which the network belongs, or None if the network was
# created by the administrator. This field determines who can delete a
# network.
owner_id = db.Column(db.ForeignKey('project.id'))
owner = db.relationship("Project",
backref=db.backref('networks_created'),
foreign_keys=[owner_id])
# The project that has access to the network, or None if the network is
# public. This field determines who can connect a node or headnode to a
# network.
access = db.relationship("Project",
backref=db.backref('networks_access'),
secondary='network_projects')
# True if network_id was allocated by the driver; False if it was
# assigned by an administrator.
allocated = db.Column(db.Boolean)
# An identifier meaningful to the networking driver:
network_id = db.Column(db.String, nullable=False)
def __init__(self, owner, access, allocated, network_id, label):
"""Create a network.
The network will belong to `project`, and have a symbolic name of
`label`. `network_id`, as expected, is the identifier meaningful to
the driver.
"""
self.network_id = network_id
self.owner = owner
self.access = access
self.allocated = allocated
self.label = label
class Port(db.Model):
"""a port on a switch
The port's label is an identifier that is meaningful only to the
corresponding switch's driver.
"""
id = db.Column(db.Integer, primary_key=True)
label = db.Column(db.String, nullable=False)
owner_id = db.Column(db.ForeignKey('switch.id'), nullable=False)
owner = db.relationship('Switch', backref=db.backref('ports'))
def __init__(self, label, switch):
"""Register a port on a switch."""
self.label = label
self.owner = switch
class Switch(db.Model):
"""A network switch.
This is meant to be subclassed by switch-specific drivers, implemented
by extensions.
Subclasses MUST override both ``validate`` and ``session``.
"""
id = db.Column(db.Integer, primary_key=True)
label = db.Column(db.String, nullable=False)
type = db.Column(db.String, nullable=False)
__mapper_args__ = {
'polymorphic_identity': 'switch',
'polymorphic_on': type,
}
@staticmethod
def validate(kwargs):
"""Verify that ``kwargs`` is a legal set of keyword args to ``__init__``
Raise a ``schema.SchemaError`` if the ``kwargs`` is invalid.
Note well: this is a *static* method; it will be invoked on the class.
"""
assert False, "Subclasses MUST override the validate method"
def session(self):
"""Return a session object for the switch.
Conceptually, a session is an active connection to the switch; it lets
HaaS avoid connecting and disconnecting for each change. the session
object must have the methods:
def apply_networking(self, action):
'''Apply the NetworkingAction ``action`` to the switch.
Action is guaranteed to reference a port object that is
attached to the correct switch.
'''
def disconnect(self):
'''Disconnect from the switch.
This will be called when HaaS is done with the session.
'''
def get_port_networks(self, ports):
'''Return a mapping from port objects to (channel, network ID)
pairs.
``ports`` is a list of port objects to collect information on.
The return value will be a dictionary of the form:
{
Port<"port-3">: [("vlan/native", "23"),
("vlan/52", "52")],
Port<"port-7">: [("vlan/23", "23")],
Port<"port-8">: [("vlan/native", "52")],
...
}
With one key for each element in the ``ports`` argument.
This method is only for use by the test suite.
'''
Some drivers may do things that are not connection-oriented; If so,
they can just return a dummy object here. The recommended way to
handle this is to define the two methods above on the switch object,
and have ``session`` just return ``self``.
"""
class Obm(db.Model):
"""Obm superclass supporting various drivers
related to out of band management of servers.
"""
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String, nullable=False)
__mapper_args__ = {
'polymorphic_on': type
}
@staticmethod
def validate(kwargs):
"""Verify that ``kwargs`` is a legal set of keywords args to ``__init__``
Raise a ``schema.SchemaError`` if the ``kwargs`` is invalid.
Note well: This is a *static* method; it will be invoked on the class.
"""
assert False, "Subclasses MUST override the validate method "
def power_cycle(self):
"""Power cycles the node.
Exact implementation is left to the subclasses.
"""
assert False, "Subclasses MUST override the power_cycle method "
def power_off(self):
""" Shuts off the node.
Exact implementation is left to the subclasses.
"""
assert False, "Subclasses MUST override the power_off method "
def start_console(self):
"""Starts logging to the console. """
assert False, "Subclasses MUST override the start_console method"
def stop_console(self):
"""Stops console logging. """
assert False, "Subclasses MUST override the stop_console method"
def delete_console(self):
assert False, "Subclasses MUST override the delete_console method"
def get_console(self):
assert False, "Subclasses MUST override the get_console method"
def get_console_log_filename(self):
assert False, "Subclasses MUST override the get_console_log_filename" \
"method"
def _on_virt_uri(args_list):
"""Make an argument list to libvirt tools use right URI.
This will work for virt-clone and virsh, at least. It gets the
appropriate endpoint URI from the config file.
"""
libvirt_endpoint = cfg.get('headnode', 'libvirt_endpoint')
return [args_list[0], '--connect', libvirt_endpoint] + args_list[1:]
class Headnode(db.Model):
"""A virtual machine used to administer a project."""
id = db.Column(db.Integer, primary_key=True)
label = db.Column(db.String, nullable=False)
# The project to which this Headnode belongs:
project_id = db.Column(db.ForeignKey('project.id'), nullable=False)
project = db.relationship(
"Project", backref=db.backref('headnodes', uselist=True))
# True iff there are unapplied changes to the Headnode:
dirty = db.Column(db.Boolean, nullable=False)
base_img = db.Column(db.String, nullable=False)
# We need a guaranteed unique name to generate the libvirt machine name;
# The name is therefore a function of a uuid:
uuid = db.Column(db.String, nullable=False, unique=True)
def __init__(self, project, label, base_img):
"""Create a headnode belonging to `project` with the given label."""
self.project = project
self.label = label
self.dirty = True
self.uuid = str(uuid.uuid1())
self.base_img = base_img
@no_dry_run
def create(self):
"""Creates the vm within libvirt, by cloning the base image.
The vm is not started at this time.
"""
check_call(_on_virt_uri(['virt-clone',
'-o', self.base_img,
'-n', self._vmname(),
'--auto-clone']))
for hnic in self.hnics:
hnic.create()
@no_dry_run
def delete(self):
"""Delete the vm, including associated storage"""
# Don't check return value. If the headnode was powered off, this
# will fail, and we don't care. If it fails for some other reason,
# then the following line will also fail, and we'll catch that error.
call(_on_virt_uri(['virsh', 'destroy', self._vmname()]))
check_call(_on_virt_uri(['virsh',
'undefine', self._vmname(),
'--remove-all-storage']))
@no_dry_run
def start(self):
"""Powers on the vm, which must have been previously created.
Once the headnode has been started once it is "frozen," and no changes
may be made to it, other than starting, stopping or deleting it.
"""
check_call(_on_virt_uri(['virsh', 'start', self._vmname()]))
check_call(_on_virt_uri(['virsh', 'autostart', self._vmname()]))
self.dirty = False
@no_dry_run
def stop(self):
"""Stop the vm.
This does a hard poweroff; the OS is not given a chance to react.
"""
check_call(_on_virt_uri(['virsh', 'destroy', self._vmname()]))
check_call(_on_virt_uri(
['virsh', 'autostart', '--disable', self._vmname()]))
def _vmname(self):
"""Returns the name (as recognized by libvirt) of this vm."""
return 'headnode-%s' % self.uuid
# This function returns a meaningful value, but also uses actual hardware.
# It has no_dry_run because the unit test for 'show_headnode' will call
# it. None is a fine return value there, because it will just put it into
# a JSON object.
@no_dry_run
def get_vncport(self):
"""Return the port that VNC is listening on, as an int.
If the VM is powered off, the return value may be None -- this is
dependant on the configuration of libvirt. A powered on VM will always
have a vnc port.
If the VM has not been created yet (and is therefore dirty) the return
value will be None.
"""
if self.dirty:
return None
p = Popen(_on_virt_uri(['virsh', 'dumpxml', self._vmname()]),
stdout=PIPE)
xmldump, _ = p.communicate()
root = xml.etree.ElementTree.fromstring(xmldump)
port = root.findall("./devices/graphics")[0].get('port')
if port == -1:
# No port allocated (yet)
return None
else:
return port
# No VNC service found, so no port available
return None
class Hnic(db.Model):
"""a network interface for a Headnode"""
id = db.Column(db.Integer, primary_key=True)
label = db.Column(db.String, nullable=False)
# The Headnode to which this Hnic belongs:
owner_id = db.Column(db.ForeignKey('headnode.id'), nullable=False)
owner = db.relationship("Headnode", backref=db.backref('hnics'))
# The network to which this Hnic is attached.
network_id = db.Column(db.ForeignKey('network.id'))
network = db.relationship("Network", backref=db.backref('hnics'))
def __init__(self, headnode, label):
"""Create an Hnic attached to the given headnode. with the given
label.
"""
self.owner = headnode
self.label = label
@no_dry_run
def create(self):
"""Create the hnic within livbirt.
XXX: This is a noop if the Hnic isn't connected to a network. This
means that the headnode won't have a corresponding nic, even a
disconnected one.
"""
if not self.network:
# It is non-trivial to make a NIC not connected to a network, so
# do nothing at all instead.
return
vlan_no = str(self.network.network_id)
bridge = 'br-vlan%s' % vlan_no
check_call(_on_virt_uri(['virsh',
'attach-interface', self.owner._vmname(),
'bridge', bridge,
'--config']))
class NetworkingAction(db.Model):
"""A journal entry representing a pending networking change."""
id = db.Column(db.Integer, primary_key=True)
nic_id = db.Column(db.ForeignKey('nic.id'), nullable=False)
new_network_id = db.Column(db.ForeignKey('network.id'), nullable=True)
channel = db.Column(db.String, nullable=False)
nic = db.relationship("Nic",
backref=db.backref('current_action', uselist=False))
new_network = db.relationship("Network",
backref=db.backref('scheduled_nics',
uselist=True))
class NetworkAttachment(db.Model):
"""An attachment of a network to a particular nic on a channel"""
id = db.Column(db.Integer, primary_key=True)
# TODO: it would be nice to place explicit unique constraints on some
# things:
#
# * (nic_id, network_id)
# * (nic_id, channel)
nic_id = db.Column(db.ForeignKey('nic.id'), nullable=False)
network_id = db.Column(db.ForeignKey('network.id'), nullable=False)
channel = db.Column(db.String, nullable=False)
nic = db.relationship('Nic', backref=db.backref('attachments'))
network = db.relationship('Network', backref=db.backref('attachments'))
|
{
"content_hash": "7fb54398883ee6e8e7fd9bcb86120acc",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 81,
"avg_line_length": 35.446058091286304,
"alnum_prop": 0.6112379280070237,
"repo_name": "henn/hil_sahil",
"id": "fc853e985e445b81c2fafbd9c9ff6ca9cf639a4f",
"size": "17693",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "haas/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "452"
},
{
"name": "Python",
"bytes": "415731"
},
{
"name": "Shell",
"bytes": "5355"
}
],
"symlink_target": ""
}
|
import json
import os
import re
import subprocess
import sys
import tempfile
import urllib2
import getpass
try:
import jira.client
JIRA_IMPORTED = True
except ImportError:
JIRA_IMPORTED = False
# Location of your Parquet git development area
PARQUET_HOME = os.path.abspath(__file__).rsplit("/", 2)[0]
PROJECT_NAME = PARQUET_HOME.rsplit("/", 1)[1]
print "PARQUET_HOME = " + PARQUET_HOME
print "PROJECT_NAME = " + PROJECT_NAME
def lines_from_cmd(cmd):
return subprocess.check_output(cmd.split(" ")).strip().split("\n")
# Remote name which points to the GitHub site
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME")
available_remotes = lines_from_cmd("git remote")
if PR_REMOTE_NAME is not None:
if PR_REMOTE_NAME not in available_remotes:
print "ERROR: git remote '%s' is not defined." % PR_REMOTE_NAME
sys.exit(-1)
else:
remote_candidates = ["github-apache", "apache-github"]
# Get first available remote from the list of candidates
PR_REMOTE_NAME = next((remote for remote in available_remotes if remote in remote_candidates), None)
# Remote name which points to Apache git
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "apache")
# ASF JIRA username
JIRA_USERNAME = os.environ.get("JIRA_USERNAME")
# ASF JIRA password
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD")
GITHUB_BASE = "https://github.com/apache/" + PROJECT_NAME + "/pull"
GITHUB_API_BASE = "https://api.github.com/repos/apache/" + PROJECT_NAME
JIRA_BASE = "https://issues.apache.org/jira/browse"
JIRA_API_BASE = "https://issues.apache.org/jira"
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
os.chdir(PARQUET_HOME)
def get_json(url):
try:
return json.load(urllib2.urlopen(url))
except urllib2.HTTPError as e:
print "Unable to fetch URL, exiting: %s" % url
sys.exit(-1)
def fail(msg):
print msg
clean_up()
sys.exit(-1)
def run_cmd(cmd):
try:
if isinstance(cmd, list):
return subprocess.check_output(cmd)
else:
return subprocess.check_output(cmd.split(" "))
except subprocess.CalledProcessError as e:
# this avoids hiding the stdout / stderr of failed processes
print 'Command failed: %s' % cmd
print 'With output:'
print '--------------'
print e.output
print '--------------'
raise e
def continue_maybe(prompt):
result = raw_input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
original_head = run_cmd("git rev-parse HEAD")[:8]
def clean_up():
print "Restoring head pointer to %s" % original_head
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print "Deleting local branch %s" % branch
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?"
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x), reverse=True)
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n\n")
merge_message_flags = []
merge_message_flags += ["-m", title]
if body != None:
merge_message_flags += ["-m", body]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % (
committer_name, committer_email)
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close the PR
merge_message_flags += [
"-m",
"Closes #%s from %s and squashes the following commits:" % (pr_num, pr_repo_desc)]
for c in commits:
merge_message_flags += ["-m", c]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def cherry_pick(pr_num, merge_hash, default_branch):
pick_ref = raw_input("Enter a branch name [%s]: " % default_branch)
if pick_ref == "":
pick_ref = default_branch
pick_branch_name = "%s_PICK_PR_%s_%s" % (BRANCH_PREFIX, pr_num, pick_ref.upper())
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref, pick_branch_name))
run_cmd("git checkout %s" % pick_branch_name)
run_cmd("git cherry-pick -sx %s" % merge_hash)
continue_maybe("Pick complete (local ref %s). Push to %s?" % (
pick_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name, pick_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8]
clean_up()
print("Pull request #%s picked into %s!" % (pr_num, pick_ref))
print("Pick hash: %s" % pick_hash)
return pick_ref
def fix_version_from_branch(branch, versions):
# Note: Assumes this is a sorted (newest->oldest) list of un-released versions
if branch == "master":
return versions[0]
else:
branch_ver = branch.replace("branch-", "")
return filter(lambda x: x.name.startswith(branch_ver), versions)[-1]
def exctract_jira_id(title):
m = re.search(r'^(PARQUET-[0-9]+)\b.*$', title, re.IGNORECASE)
if m and m.groups > 0:
return m.group(1).upper()
else:
fail("PR title should be prefixed by a jira id \"PARQUET-XXX: ...\", found: \"%s\"" % title)
def check_jira(title):
jira_id = exctract_jira_id(title)
asf_jira = jira.client.JIRA({'server': JIRA_API_BASE},
basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
try:
issue = asf_jira.issue(jira_id)
except Exception as e:
fail("ASF JIRA could not find %s\n%s" % (jira_id, e))
def resolve_jira(title, merge_branches, comment):
asf_jira = jira.client.JIRA({'server': JIRA_API_BASE},
basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
default_jira_id = exctract_jira_id(title)
jira_id = raw_input("Enter a JIRA id [%s]: " % default_jira_id)
if jira_id == "":
jira_id = default_jira_id
try:
issue = asf_jira.issue(jira_id)
except Exception as e:
fail("ASF JIRA could not find %s\n%s" % (jira_id, e))
cur_status = issue.fields.status.name
cur_summary = issue.fields.summary
cur_assignee = issue.fields.assignee
if cur_assignee is None:
cur_assignee = "NOT ASSIGNED!!!"
else:
cur_assignee = cur_assignee.displayName
if cur_status == "Resolved" or cur_status == "Closed":
fail("JIRA issue %s already has status '%s'" % (jira_id, cur_status))
print ("=== JIRA %s ===" % jira_id)
print ("summary\t\t%s\nassignee\t%s\nstatus\t\t%s\nurl\t\t%s/%s\n" % (
cur_summary, cur_assignee, cur_status, JIRA_BASE, jira_id))
versions = asf_jira.project_versions("PARQUET")
versions = sorted(versions, key=lambda x: x.name, reverse=True)
versions = filter(lambda x: x.raw['released'] is False, versions)
default_fix_versions = map(lambda x: fix_version_from_branch(x, versions).name, merge_branches)
for v in default_fix_versions:
# Handles the case where we have forked a release branch but not yet made the release.
# In this case, if the PR is committed to the master branch and the release branch, we
# only consider the release branch to be the fix version. E.g. it is not valid to have
# both 1.1.0 and 1.0.0 as fix versions.
(major, minor, patch) = v.split(".")
if patch == "0":
previous = "%s.%s.%s" % (major, int(minor) - 1, 0)
if previous in default_fix_versions:
default_fix_versions = filter(lambda x: x != v, default_fix_versions)
default_fix_versions = ",".join(default_fix_versions)
fix_versions = raw_input("Enter comma-separated fix version(s) [%s]: " % default_fix_versions)
if fix_versions == "":
fix_versions = default_fix_versions
fix_versions = fix_versions.replace(" ", "").split(",")
def get_version_json(version_str):
return filter(lambda v: v.name == version_str, versions)[0].raw
jira_fix_versions = map(lambda v: get_version_json(v), fix_versions)
resolve = filter(lambda a: a['name'] == "Resolve Issue", asf_jira.transitions(jira_id))[0]
asf_jira.transition_issue(
jira_id, resolve["id"], fixVersions=jira_fix_versions, comment=comment)
print "Succesfully resolved %s with fixVersions=%s!" % (jira_id, fix_versions)
if JIRA_IMPORTED:
jira_login_accepted = False
while not jira_login_accepted:
if JIRA_USERNAME:
print "JIRA username: %s" % JIRA_USERNAME
else:
JIRA_USERNAME = raw_input("Enter JIRA username: ")
if not JIRA_PASSWORD:
JIRA_PASSWORD = getpass.getpass("Enter JIRA password: ")
try:
asf_jira = jira.client.JIRA({'server': JIRA_API_BASE},
basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
jira_login_accepted = True
except Exception as e:
print "\nJIRA login failed, try again\n"
JIRA_USERNAME = None
JIRA_PASSWORD = None
else:
print "WARNING: Could not find jira python library. Run 'sudo pip install jira' to install."
print "The tool will continue to run but won't handle the JIRA."
print
branches = get_json("%s/branches" % GITHUB_API_BASE)
branch_names = filter(lambda x: x.startswith("branch-"), [x['name'] for x in branches])
# Assumes branch names can be sorted lexicographically
# Julien: I commented this out as we don't have any "branch-*" branch yet
#latest_branch = sorted(branch_names, reverse=True)[0]
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
title = pr["title"]
if JIRA_IMPORTED:
check_jira(title)
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
if pr["merged"] is True:
print "Pull request %s has already been merged, assuming you want to backport" % pr_num
merge_commit_desc = run_cmd([
'git', 'log', '--merges', '--first-parent',
'--grep=pull request #%s' % pr_num, '--oneline']).split("\n")[0]
if merge_commit_desc == "":
fail("Couldn't find any merge commit for #%s, you may need to update HEAD." % pr_num)
merge_hash = merge_commit_desc[:7]
message = merge_commit_desc[8:]
print "Found: %s" % message
maybe_cherry_pick(pr_num, merge_hash, latest_branch)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \
"Continue? (experts only!)"
continue_maybe(msg)
print ("\n=== Pull Request #%s ===" % pr_num)
print ("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" % (
title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref)
pick_prompt = "Would you like to pick %s into another branch?" % merge_hash
while raw_input("\n%s (y/n): " % pick_prompt).lower() == "y":
merged_refs = merged_refs + [cherry_pick(pr_num, merge_hash, latest_branch)]
if JIRA_IMPORTED:
continue_maybe("Would you like to update the associated JIRA?")
jira_comment = "Issue resolved by pull request %s\n[%s/%s]" % (pr_num, GITHUB_BASE, pr_num)
resolve_jira(title, merged_refs, jira_comment)
else:
print "WARNING: Could not find jira python library. Run 'sudo pip install jira' to install."
print "Exiting without trying to close the associated JIRA."
|
{
"content_hash": "1481ff49114b4e2d4201651f47d73d50",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 104,
"avg_line_length": 37.11716621253406,
"alnum_prop": 0.6219351049772427,
"repo_name": "apache/parquet-mr",
"id": "c67092472125e60c15e8a0263a1efb852a0b6634",
"size": "14771",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "dev/merge_parquet_pr.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "5572665"
},
{
"name": "Python",
"bytes": "14771"
},
{
"name": "Scala",
"bytes": "8436"
},
{
"name": "Shell",
"bytes": "14860"
},
{
"name": "Thrift",
"bytes": "10354"
}
],
"symlink_target": ""
}
|
import unittest
from Src.BioAnalyzer.DataAccess.Entities.GenePrioritization.DifferentialDnaMethylationLevelValue import \
DifferentialDnaMethylationLevelValue
from Src.BioAnalyzer.DataAccess.Entities.GenePrioritization.LocalDifferentialDnaMethylationSample import \
LocalDifferentialDnaMethylationSample
class LocalDifferentialMessengerRnaSampleTest(unittest.TestCase):
def test_instance(self):
diff_dna_methylation_sample = LocalDifferentialDnaMethylationSample(**{
'patient_id':'TCGA-A7-A0DB',
'values':[{'id_entrez':1, 'value':0.1, 'is_significant':True, 'status':1},
{'id_entrez': 12, 'value': 0.1, 'is_significant': True, 'status': 1},
{'id_entrez': 1, 'value': 0.1, 'is_significant': True, 'status': 1},
{'id_entrez': 13, 'value': 0.1, 'is_significant': True, 'status': 1}]})
self.assertEqual(diff_dna_methylation_sample.patient_id, 'TCGA-A7-A0DB')
self.assertTrue(len(diff_dna_methylation_sample.values) == 3)
def test_properties(self):
diff_dna_methylation_sample = LocalDifferentialDnaMethylationSample()
diff_dna_methylation_sample.patient_id='TCGA-A7-A0DB'
diff_dna_methylation_sample.values=[DifferentialDnaMethylationLevelValue(**{'id_entrez':1,
'value':0.1,
'is_significant':True,
'status':1}),
DifferentialDnaMethylationLevelValue(**{'id_entrez':3,
'value':0.1,
'is_significant':True,
'status':1}),
DifferentialDnaMethylationLevelValue(**{'id_entrez':1,
'value':0.1,
'is_significant':True,
'status':1}),
DifferentialDnaMethylationLevelValue(**{'id_entrez':5,
'value':0.1,
'is_significant':True,
'status':1})]
self.assertEqual(diff_dna_methylation_sample.patient_id, 'TCGA-A7-A0DB')
self.assertTrue(len(diff_dna_methylation_sample.values) == 3)
def test_equal(self):
diff_dna_methylation_sample = [LocalDifferentialDnaMethylationSample(**{
'patient_id':'TCGA-A7-A0DB',
'values':[{'id_entrez':1, 'value':0.1, 'is_significant':True, 'status':1},
{'id_entrez': 12, 'value': 0.1, 'is_significant': True, 'status': 1},
{'id_entrez': 1, 'value': 0.1, 'is_significant': True, 'status': 1},
{'id_entrez': 13, 'value': 0.1, 'is_significant': True, 'status': 1}]}),
LocalDifferentialDnaMethylationSample(**{
'patient_id':'TCGA-A7-A1DB',
'values':[{'id_entrez':1, 'value':0.1, 'is_significant':True, 'status':1},
{'id_entrez': 12, 'value': 0.1, 'is_significant': True, 'status': 1},
{'id_entrez': 1, 'value': 0.1, 'is_significant': True, 'status': 1},
{'id_entrez': 13, 'value': 0.1, 'is_significant': True, 'status': 1}]}),
LocalDifferentialDnaMethylationSample(**{
'patient_id':'TCGA-A7-A0DB',
'values':[{'id_entrez':1, 'value':0.1, 'is_significant':True, 'status':1},
{'id_entrez': 12, 'value': 0.1, 'is_significant': True, 'status': 1},
{'id_entrez': 1, 'value': 0.1, 'is_significant': True, 'status': 1},
{'id_entrez': 13, 'value': 0.1, 'is_significant': True, 'status': 1}]}),
LocalDifferentialDnaMethylationSample(**{
'patient_id':'TCGA-A7-A0CB',
'values':[{'id_entrez':1, 'value':0.1, 'is_significant':True, 'status':1},
{'id_entrez': 12, 'value': 0.1, 'is_significant': True, 'status': 1},
{'id_entrez': 1, 'value': 0.1, 'is_significant': True, 'status': 1},
{'id_entrez': 13, 'value': 0.1, 'is_significant': True, 'status': 1}]})]
self.assertTrue(len(list(set(diff_dna_methylation_sample))) == 3)
def test_validation(self):
diff_dna_methylation_sample = LocalDifferentialDnaMethylationSample()
diff_dna_methylation_sample.patient_id = 'TCGA-A7-A0DB'
diff_dna_methylation_sample.values=[DifferentialDnaMethylationLevelValue(**{'id_entrez':1, 'value':0.1, 'is_significant':True, 'status':1})]
try:
diff_dna_methylation_sample.validate()
except ValueError:
self.fail('Encountered an unexpected exception.')
def test_validation_fail(self):
diff_dna_methylation_sample = LocalDifferentialDnaMethylationSample(**{
'values':[{'id_entrez':1, 'value':0.1, 'is_significant':True, 'status':1}]})
self.assertRaises(ValueError, diff_dna_methylation_sample.validate)
def test_as_dict(self):
diff_dna_methylation_sample = LocalDifferentialDnaMethylationSample(**{
'patient_id':'TCGA-A7-A0DB',
'values':[{'id_entrez':1, 'value':0.1, 'is_significant':True, 'status':1},
{'id_entrez': 12, 'value': 0.1, 'is_significant': True, 'status': 1},
{'id_entrez': 1, 'value': 0.1, 'is_significant': True, 'status': 1},
{'id_entrez': 13, 'value': 0.1, 'is_significant': True, 'status': 1}]})
diff_dna_methylation_sample_dict = diff_dna_methylation_sample.as_dict()
self.assertTrue('patient_id' in diff_dna_methylation_sample_dict)
self.assertEqual(diff_dna_methylation_sample_dict['patient_id'], 'TCGA-A7-A0DB')
self.assertTrue('values' in diff_dna_methylation_sample_dict)
self.assertTrue(len(diff_dna_methylation_sample_dict['values']), 1)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "62fe24a39fd93d6d8241d20cd77989fb",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 148,
"avg_line_length": 72.08490566037736,
"alnum_prop": 0.43672294202329537,
"repo_name": "cemarchi/biosphere",
"id": "dcee2e0bf7195cf3524f511d93b20332e597cd67",
"size": "7641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tests/BioAnalyzer/DataAccess/Entities/GenePrioritization/LocalDifferentialDnaMethylationSampleTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "822707"
}
],
"symlink_target": ""
}
|
import os
import struct
import numpy as np
import scipy.misc
import skimage.exposure
def read_gnt_in_directory(gnt_dirpath):
def samples(f):
header_size = 10
# read samples from f until no bytes remaining
while True:
header = np.fromfile(f, dtype='uint8', count=header_size)
if not header.size: break
sample_size = header[0] + (header[1]<<8) + (header[2]<<16) + (header[3]<<24)
tagcode = header[5] + (header[4]<<8)
width = header[6] + (header[7]<<8)
height = header[8] + (header[9]<<8)
assert header_size + width*height == sample_size
bitmap = np.fromfile(f, dtype='uint8', count=width*height).reshape((height, width))
yield bitmap, tagcode
for file_name in os.listdir(gnt_dirpath):
if file_name.endswith('.gnt'):
file_path = os.path.join(gnt_dirpath, file_name)
with open(file_path, 'rb') as f:
for bitmap, tagcode in samples(f):
yield bitmap, tagcode
def normalize_bitmap(bitmap):
# pad the bitmap to make it squared
pad_size = abs(bitmap.shape[0]-bitmap.shape[1]) // 2
if bitmap.shape[0] < bitmap.shape[1]:
pad_dims = ((pad_size, pad_size), (0, 0))
else:
pad_dims = ((0, 0), (pad_size, pad_size))
bitmap = np.lib.pad(bitmap, pad_dims, mode='constant', constant_values=255)
# rescale and add empty border
bitmap = scipy.misc.imresize(bitmap, (64 - 4*2, 64 - 4*2))
bitmap = np.lib.pad(bitmap, ((4, 4), (4, 4)), mode='constant', constant_values=255)
assert bitmap.shape == (64, 64)
bitmap = np.expand_dims(bitmap, axis=0)
assert bitmap.shape == (1, 64, 64)
return bitmap
def preprocess_bitmap(bitmap):
# contrast stretching
p2, p98 = np.percentile(bitmap, (2, 98))
assert abs(p2-p98) > 10
bitmap = skimage.exposure.rescale_intensity(bitmap, in_range=(p2, p98))
# from skimage.filters import threshold_otsu
# thresh = threshold_otsu(bitmap)
# bitmap = bitmap > thresh
return bitmap
def tagcode_to_unicode(tagcode):
return struct.pack('>H', tagcode).decode('gb2312')
def unicode_to_tagcode(tagcode_unicode):
return struct.unpack('>H', tagcode_unicode.encode('gb2312'))[0]
|
{
"content_hash": "45c1b6dcc240bd17d59624e3d5d34e3a",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 95,
"avg_line_length": 33.30434782608695,
"alnum_prop": 0.608355091383812,
"repo_name": "integeruser/CASIA-HWDB1.1-cnn",
"id": "0062db4dcf85f9343a2141f33aa67f5bc9a44f21",
"size": "2298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2517"
},
{
"name": "Python",
"bytes": "128373"
}
],
"symlink_target": ""
}
|
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "GeoCoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
{
"content_hash": "20eab88f4fcdd3a0e847031ff7dd6e8d",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 69,
"avg_line_length": 27.208333333333332,
"alnum_prop": 0.7090352220520674,
"repo_name": "cryptokoin/geocoinq",
"id": "02d4f6fe0fee7ed2355b9d77de6aa2399cfe66c1",
"size": "895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "share/qt/clean_mac_info_plist.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "336845"
},
{
"name": "C++",
"bytes": "2647210"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Makefile",
"bytes": "7786"
},
{
"name": "Objective-C",
"bytes": "1052"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69383"
},
{
"name": "Shell",
"bytes": "8319"
}
],
"symlink_target": ""
}
|
from matplotlib import pyplot as plt
import numpy as np
import scipy.io as sio # Submodule to load a GNU Octave/MATLAB file
image = plt.imread('input.png')
bin_mask = sio.loadmat('mask.mat')['bin_mask'].astype('bool')
mask = np.dstack((bin_mask, bin_mask, bin_mask))
image[~mask] = 0
plt.imsave('output.png', image)
|
{
"content_hash": "f5b9bc1c739587e1f0af1cfeaa05bcec",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 67,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.7125,
"repo_name": "milq/computer-vision-resources",
"id": "2872db656bac34cb6132fb84f05b4d61e48741fe",
"size": "320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/scripts/python/apply_bin_mask_to_image.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "552"
},
{
"name": "CMake",
"bytes": "203"
},
{
"name": "Python",
"bytes": "20744"
}
],
"symlink_target": ""
}
|
import py
from rpython.jit.metainterp.test import test_string
from rpython.jit.backend.ppc.test.support import JitPPCMixin
class TestString(JitPPCMixin, test_string.TestLLtype):
# for the individual tests see
# ====> ../../../metainterp/test/test_string.py
pass
class TestUnicode(JitPPCMixin, test_string.TestLLtypeUnicode):
# for the individual tests see
# ====> ../../../metainterp/test/test_string.py
pass
|
{
"content_hash": "e34e824d6653f5669c6c722bdcda4f51",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 62,
"avg_line_length": 33.46153846153846,
"alnum_prop": 0.7172413793103448,
"repo_name": "oblique-labs/pyVM",
"id": "8f34e4595f7331f7688e3ade12f480aa7722f20d",
"size": "435",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rpython/jit/backend/ppc/test/test_string.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "161293"
},
{
"name": "Awk",
"bytes": "271"
},
{
"name": "Batchfile",
"bytes": "5289"
},
{
"name": "C",
"bytes": "771638"
},
{
"name": "C++",
"bytes": "12850"
},
{
"name": "Emacs Lisp",
"bytes": "3149"
},
{
"name": "HCL",
"bytes": "155"
},
{
"name": "M4",
"bytes": "12737"
},
{
"name": "Makefile",
"bytes": "35222"
},
{
"name": "Objective-C",
"bytes": "2224"
},
{
"name": "Python",
"bytes": "18329219"
},
{
"name": "Shell",
"bytes": "15396"
},
{
"name": "Vim script",
"bytes": "1107"
}
],
"symlink_target": ""
}
|
from io import StringIO
from antlr4.Recognizer import Recognizer
from antlr4.RuleContext import RuleContext
class SemanticContext(object):
#
# The default {@link SemanticContext}, which is semantically equivalent to
# a predicate of the form {@code {true}?}.
#
NONE = None
#
# For context independent predicates, we evaluate them without a local
# context (i.e., null context). That way, we can evaluate them without
# having to create proper rule-specific context during prediction (as
# opposed to the parser, which creates them naturally). In a practical
# sense, this avoids a cast exception from RuleContext to myruleContext.
#
# <p>For context dependent predicates, we must pass in a local context so that
# references such as $arg evaluate properly as _localctx.arg. We only
# capture context dependent predicates in the context in which we begin
# prediction, so we passed in the outer context here in case of context
# dependent predicate evaluation.</p>
#
def eval(self, parser:Recognizer , outerContext:RuleContext ):
pass
#
# Evaluate the precedence predicates for the context and reduce the result.
#
# @param parser The parser instance.
# @param outerContext The current parser context object.
# @return The simplified semantic context after precedence predicates are
# evaluated, which will be one of the following values.
# <ul>
# <li>{@link #NONE}: if the predicate simplifies to {@code true} after
# precedence predicates are evaluated.</li>
# <li>{@code null}: if the predicate simplifies to {@code false} after
# precedence predicates are evaluated.</li>
# <li>{@code this}: if the semantic context is not changed as a result of
# precedence predicate evaluation.</li>
# <li>A non-{@code null} {@link SemanticContext}: the new simplified
# semantic context after precedence predicates are evaluated.</li>
# </ul>
#
def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext):
return self
# need forward declaration
AND = None
def andContext(a:SemanticContext, b:SemanticContext):
if a is None or a is SemanticContext.NONE:
return b
if b is None or b is SemanticContext.NONE:
return a
result = AND(a, b)
if len(result.opnds) == 1:
return result.opnds[0]
else:
return result
# need forward declaration
OR = None
def orContext(a:SemanticContext, b:SemanticContext):
if a is None:
return b
if b is None:
return a
if a is SemanticContext.NONE or b is SemanticContext.NONE:
return SemanticContext.NONE
result = OR(a, b)
if len(result.opnds) == 1:
return result.opnds[0]
else:
return result
def filterPrecedencePredicates(collection:list):
result = []
for context in collection:
if isinstance(context, PrecedencePredicate):
if result is None:
result = []
result.append(context)
return result
class Predicate(SemanticContext):
def __init__(self, ruleIndex:int=-1, predIndex:int=-1, isCtxDependent:bool=False):
self.ruleIndex = ruleIndex
self.predIndex = predIndex
self.isCtxDependent = isCtxDependent # e.g., $i ref in pred
def eval(self, parser:Recognizer , outerContext:RuleContext ):
localctx = outerContext if self.isCtxDependent else None
return parser.sempred(localctx, self.ruleIndex, self.predIndex)
def __hash__(self):
with StringIO() as buf:
buf.write(str(self.ruleIndex))
buf.write("/")
buf.write(str(self.predIndex))
buf.write("/")
buf.write(str(self.isCtxDependent))
return hash(buf.getvalue())
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, Predicate):
return False
return self.ruleIndex == other.ruleIndex and \
self.predIndex == other.predIndex and \
self.isCtxDependent == other.isCtxDependent
def __str__(self):
return "{" + str(self.ruleIndex) + ":" + str(self.predIndex) + "}?"
class PrecedencePredicate(SemanticContext):
def __init__(self, precedence:int=0):
self.precedence = precedence
def eval(self, parser:Recognizer , outerContext:RuleContext ):
return parser.precpred(outerContext, self.precedence)
def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext):
if parser.precpred(outerContext, self.precedence):
return SemanticContext.NONE
else:
return None
def __cmp__(self, other):
return self.precedence - other.precedence
def __hash__(self):
return 31
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, PrecedencePredicate):
return False
else:
return self.precedence == other.precedence
# A semantic context which is true whenever none of the contained contexts
# is false.
del AND
class AND(SemanticContext):
def __init__(self, a:SemanticContext, b:SemanticContext):
operands = set()
if isinstance( a, AND ):
for o in a.opnds:
operands.add(o)
else:
operands.add(a)
if isinstance( b, AND ):
for o in b.opnds:
operands.add(o)
else:
operands.add(b)
precedencePredicates = filterPrecedencePredicates(operands)
if len(precedencePredicates)>0:
# interested in the transition with the lowest precedence
reduced = min(precedencePredicates)
operands.add(reduced)
self.opnds = [ o for o in operands ]
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, AND):
return False
else:
return self.opnds == other.opnds
def __hash__(self):
return hash(str(self.opnds)+ "/AND")
#
# {@inheritDoc}
#
# <p>
# The evaluation of predicates by this context is short-circuiting, but
# unordered.</p>
#
def eval(self, parser:Recognizer , outerContext:RuleContext ):
for opnd in self.opnds:
if not opnd.eval(parser, outerContext):
return False
return True
def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext):
differs = False
operands = []
for context in self.opnds:
evaluated = context.evalPrecedence(parser, outerContext)
differs |= evaluated is not context
if evaluated is None:
# The AND context is false if any element is false
return None
elif evaluated is not SemanticContext.NONE:
# Reduce the result by skipping true elements
operands.append(evaluated)
if not differs:
return self
if len(operands)==0:
# all elements were true, so the AND context is true
return SemanticContext.NONE
result = None
for o in operands:
result = o if result is None else andContext(result, o)
return result
def __str__(self):
with StringIO() as buf:
first = True
for o in self.opnds:
if not first:
buf.write("&&")
buf.write(str(o))
first = False
return buf.getvalue()
#
# A semantic context which is true whenever at least one of the contained
# contexts is true.
del OR
class OR (SemanticContext):
def __init__(self, a:SemanticContext, b:SemanticContext):
operands = set()
if isinstance( a, OR ):
for o in a.opnds:
operands.add(o)
else:
operands.add(a)
if isinstance( b, OR ):
for o in b.opnds:
operands.add(o)
else:
operands.add(b)
precedencePredicates = filterPrecedencePredicates(operands)
if len(precedencePredicates)>0:
# interested in the transition with the highest precedence
s = sorted(precedencePredicates)
reduced = s[len(s)-1]
operands.add(reduced)
self.opnds = [ o for o in operands ]
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, OR):
return False
else:
return self.opnds == other.opnds
def __hash__(self):
return hash(str(self.opnds)+"/OR")
# <p>
# The evaluation of predicates by this context is short-circuiting, but
# unordered.</p>
#
def eval(self, parser:Recognizer, outerContext:RuleContext):
for opnd in self.opnds:
if opnd.eval(parser, outerContext):
return True
return False
def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext):
differs = False
operands = []
for context in self.opnds:
evaluated = context.evalPrecedence(parser, outerContext)
differs |= evaluated is not context
if evaluate is SemanticContext.NONE:
# The OR context is true if any element is true
return SemanticContext.NONE
elif evaluated is not None:
# Reduce the result by skipping false elements
operands.append(evaluated)
if not differs:
return self
if len(operands)==0:
# all elements were false, so the OR context is false
return None
result = None
for o in operands:
result = o if result is None else orContext(result, o)
return result
def __str__(self):
with StringIO() as buf:
first = True
for o in self.opnds:
if not first:
buf.write("||")
buf.write(str(o))
first = False
return buf.getvalue()
SemanticContext.NONE = Predicate()
|
{
"content_hash": "eb4715602f3347bf704449059232252e",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 86,
"avg_line_length": 31.75541795665635,
"alnum_prop": 0.6033928049137175,
"repo_name": "hce/antlr4",
"id": "d4a50915dd4ba645e4e908ebe5ebb4d92a4f0984",
"size": "12174",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "runtime/Python3/src/antlr4/atn/SemanticContext.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ANTLR",
"bytes": "72606"
},
{
"name": "Batchfile",
"bytes": "139"
},
{
"name": "C#",
"bytes": "1460309"
},
{
"name": "GAP",
"bytes": "110837"
},
{
"name": "Java",
"bytes": "5768297"
},
{
"name": "JavaScript",
"bytes": "443990"
},
{
"name": "PowerShell",
"bytes": "6138"
},
{
"name": "Python",
"bytes": "1005486"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Person.suffix'
db.add_column('contacts_people', 'suffix', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Person.suffix'
db.delete_column('contacts_people', 'suffix')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contacts.company': {
'Meta': {'ordering': "('name',)", 'object_name': 'Company', 'db_table': "'contacts_companies'"},
'about': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'nickname': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'contacts.emailaddress': {
'Meta': {'object_name': 'EmailAddress', 'db_table': "'contacts_email_addresses'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email_address': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'work'", 'max_length': '6'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
'contacts.group': {
'Meta': {'ordering': "('name',)", 'object_name': 'Group', 'db_table': "'contacts_groups'"},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'companies': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['contacts.Company']", 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'people': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['contacts.Person']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'contacts.instantmessenger': {
'Meta': {'object_name': 'InstantMessenger', 'db_table': "'contacts_instant_messengers'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'im_account': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'work'", 'max_length': '6'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'service': ('django.db.models.fields.CharField', [], {'default': "'jabber'", 'max_length': '11'})
},
'contacts.person': {
'Meta': {'ordering': "('last_name', 'first_name')", 'object_name': 'Person', 'db_table': "'contacts_people'"},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts.Company']", 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'nickname': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'suffix': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'contacts.phonenumber': {
'Meta': {'object_name': 'PhoneNumber', 'db_table': "'contacts_phone_numbers'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'work'", 'max_length': '6'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'contacts.specialdate': {
'Meta': {'object_name': 'SpecialDate', 'db_table': "'contacts_special_dates'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'every_year': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'occasion': ('django.db.models.fields.TextField', [], {'max_length': '200'})
},
'contacts.streetaddress': {
'Meta': {'object_name': 'StreetAddress', 'db_table': "'contacts_street_addresses'"},
'city': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'work'", 'max_length': '6'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'province': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'street': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'contacts.website': {
'Meta': {'object_name': 'WebSite', 'db_table': "'contacts_web_sites'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'work'", 'max_length': '6'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['contacts']
|
{
"content_hash": "57eafe25943535b6d12165fc5244c96e",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 182,
"avg_line_length": 82,
"alnum_prop": 0.5495017046944664,
"repo_name": "huang4fstudio/django-contacts",
"id": "297ca023f8354570352ec573982bc9985e9eb62f",
"size": "15270",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/contacts/migrations/0006_auto__add_field_person_suffix.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
class LustObject(object):
# executes the command
def handle(self, arguments): pass
def print_help(self): pass
class FactorialCommand(LustObject):
def handle(self, arguments):
try: argument = int(arguments[0])
except (ValueError, IndexError):
print("fact: could not read integer argument.")
return
if argument < 0:
print("fact: argument has to be non-negative!")
return
print(self.__calculate_factorial(argument))
def print_help(self):
print(" fact <integer>")
print(" Calculates the factorial of <integer>.")
def __calculate_factorial(self, argument):
# Hmmm...
result = 0
for i in range(1, argument+1):
result *= i
return result
class CheckPrimeCommand(LustObject):
def handle(self, arguments):
try: argument = int(arguments[0])
except (ValueError, IndexError):
print("checkprime: could not read integer argument.")
return
if not argument > 1:
print("checkprime: argument has to be > 1!")
return
print(self.__check_prime(argument))
def print_help(self):
print(" checkprime <integer>")
print(" Performs a primality test.")
def __check_prime(self, argument):
# I think that there is a theorem that says
# that we can check fewer numbers
upper_bound = argument
for i in range(2, upper_bound):
if argument % i == 0:
return False
return True
class QuitCommand(LustObject):
def handle(self, arguments = None):
print("Bye!")
exit()
def print_help(self):
print(" quit")
print(" Quits.")
class HelpCommand(LustObject):
def __init__(self, commands):
self.commands = commands
def handle(self, arguments = None):
print("List of all commands")
print("--------------------")
for command in sorted(self.commands):
self.commands[command].print_help()
def print_help(self):
print(" help")
print(" Prints help for all commands.")
print("Hello! Welcome to the LARICS Universal Shell Terminal (LUST)!")
print("Enter 'help' for a list of commands. Press Ctrl-D or enter 'quit' to quit.")
# dictionary for storing all commands
commands = { }
commands["fact"] = FactorialCommand()
commands["checkprime"] = CheckPrimeCommand()
commands["quit"] = QuitCommand()
# help command needs a reference to the parent dictionary in order to call each
# command's print_help() function
commands["help"] = HelpCommand(commands)
# input from Python 3 is raw_input in Python 2
try: input = raw_input
except NameError: pass
while True:
# read current line and try to extract command name
try:
cmd_line = input(">> ")
except (EOFError):
break
arguments = cmd_line.split()
try: cmd_name = arguments[0].lower()
except IndexError: continue
# look up the appropriate command in commands dictionary
if cmd_name not in commands:
print("lust: no such command '{}'.".format(cmd_name))
continue
else:
# command found, pass its handler the rest of the read arguments
commands[cmd_name].handle(arguments[1:])
print
commands["quit"].handle()
|
{
"content_hash": "37ca5a7a14e6568b6d07cd8f8d1fec67",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 83,
"avg_line_length": 27.309734513274336,
"alnum_prop": 0.6652624756966947,
"repo_name": "dmiklic/git-tutorial-code",
"id": "658f115f4e61b9bb62be29ac550c8c7acb61f3ac",
"size": "3109",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/lust.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "4327"
},
{
"name": "CMake",
"bytes": "179"
},
{
"name": "Python",
"bytes": "3611"
}
],
"symlink_target": ""
}
|
"""
pygments.lexers.sieve
~~~~~~~~~~~~~~~~~~~~~
Lexer for Sieve file format.
https://tools.ietf.org/html/rfc5228
https://tools.ietf.org/html/rfc5173
https://tools.ietf.org/html/rfc5229
https://tools.ietf.org/html/rfc5230
https://tools.ietf.org/html/rfc5232
https://tools.ietf.org/html/rfc5235
https://tools.ietf.org/html/rfc5429
https://tools.ietf.org/html/rfc8580
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Comment, Name, Literal, String, Text, Punctuation, \
Keyword
__all__ = ["SieveLexer"]
class SieveLexer(RegexLexer):
"""
Lexer for sieve format.
.. versionadded:: 2.6
"""
name = 'Sieve'
filenames = ['*.siv', '*.sieve']
aliases = ['sieve']
tokens = {
'root': [
(r'\s+', Text),
(r'[();,{}\[\]]', Punctuation),
# import:
(r'(?i)require',
Keyword.Namespace),
# tags:
(r'(?i)(:)(addresses|all|contains|content|create|copy|comparator|'
r'count|days|detail|domain|fcc|flags|from|handle|importance|is|'
r'localpart|length|lowerfirst|lower|matches|message|mime|options|'
r'over|percent|quotewildcard|raw|regex|specialuse|subject|text|'
r'under|upperfirst|upper|value)',
bygroups(Name.Tag, Name.Tag)),
# tokens:
(r'(?i)(address|addflag|allof|anyof|body|discard|elsif|else|envelope|'
r'ereject|exists|false|fileinto|if|hasflag|header|keep|'
r'notify_method_capability|notify|not|redirect|reject|removeflag|'
r'setflag|size|spamtest|stop|string|true|vacation|virustest)',
Name.Builtin),
(r'(?i)set',
Keyword.Declaration),
# number:
(r'([0-9.]+)([kmgKMG])?',
bygroups(Literal.Number, Literal.Number)),
# comment:
(r'#.*$',
Comment.Single),
(r'/\*.*\*/',
Comment.Multiline),
# string:
(r'"[^"]*?"',
String),
# text block:
(r'text:',
Name.Tag, 'text'),
],
'text': [
(r'[^.].*?\n', String),
(r'^\.', Punctuation, "#pop"),
]
}
|
{
"content_hash": "ee8d2cdb191b1545d46c00e2ee3133ed",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 82,
"avg_line_length": 31.294871794871796,
"alnum_prop": 0.528062269561655,
"repo_name": "dscorbett/pygments",
"id": "ab43db8ba38469e9f5011baa91d834354c3ce824",
"size": "2441",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pygments/lexers/sieve.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP.NET",
"bytes": "636"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "6951"
},
{
"name": "Agda",
"bytes": "3155"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "3577"
},
{
"name": "Asymptote",
"bytes": "10210"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "732"
},
{
"name": "Awk",
"bytes": "4528"
},
{
"name": "Batchfile",
"bytes": "4275"
},
{
"name": "Berry",
"bytes": "8606"
},
{
"name": "BlitzBasic",
"bytes": "1824"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Boogie",
"bytes": "4482"
},
{
"name": "C",
"bytes": "108863"
},
{
"name": "C#",
"bytes": "17784"
},
{
"name": "C++",
"bytes": "85604"
},
{
"name": "CMake",
"bytes": "2150"
},
{
"name": "COBOL",
"bytes": "117432"
},
{
"name": "CSS",
"bytes": "982"
},
{
"name": "Cap'n Proto",
"bytes": "390"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4934"
},
{
"name": "Cirru",
"bytes": "3285"
},
{
"name": "Clean",
"bytes": "8739"
},
{
"name": "Clojure",
"bytes": "25885"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9263"
},
{
"name": "Common Lisp",
"bytes": "49017"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Crystal",
"bytes": "36037"
},
{
"name": "Csound",
"bytes": "1135"
},
{
"name": "Csound Document",
"bytes": "209"
},
{
"name": "Csound Score",
"bytes": "310"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "Cypher",
"bytes": "3365"
},
{
"name": "D",
"bytes": "5475"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Elm",
"bytes": "1187"
},
{
"name": "Emacs Lisp",
"bytes": "198742"
},
{
"name": "Erlang",
"bytes": "6048"
},
{
"name": "F#",
"bytes": "19734"
},
{
"name": "F*",
"bytes": "42435"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "Fennel",
"bytes": "6994"
},
{
"name": "Forth",
"bytes": "48"
},
{
"name": "Fortran",
"bytes": "38732"
},
{
"name": "Futhark",
"bytes": "507"
},
{
"name": "G-code",
"bytes": "2791"
},
{
"name": "GAP",
"bytes": "19390"
},
{
"name": "GDScript",
"bytes": "1240"
},
{
"name": "GLSL",
"bytes": "450"
},
{
"name": "Gherkin",
"bytes": "1267"
},
{
"name": "Gnuplot",
"bytes": "10376"
},
{
"name": "Go",
"bytes": "430"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groovy",
"bytes": "2663"
},
{
"name": "HLSL",
"bytes": "5123"
},
{
"name": "HTML",
"bytes": "146106"
},
{
"name": "Handlebars",
"bytes": "1738"
},
{
"name": "Haskell",
"bytes": "49856"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "57251"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "2046"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "62868"
},
{
"name": "J",
"bytes": "25519"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "20381"
},
{
"name": "JavaScript",
"bytes": "2686"
},
{
"name": "Jsonnet",
"bytes": "1217"
},
{
"name": "Julia",
"bytes": "11904"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "Lean",
"bytes": "8611"
},
{
"name": "LilyPond",
"bytes": "2804"
},
{
"name": "Limbo",
"bytes": "9891"
},
{
"name": "Liquid",
"bytes": "862"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "306"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "9184"
},
{
"name": "MAXScript",
"bytes": "8464"
},
{
"name": "MQL4",
"bytes": "6230"
},
{
"name": "MQL5",
"bytes": "4866"
},
{
"name": "Macaulay2",
"bytes": "409"
},
{
"name": "Makefile",
"bytes": "60153"
},
{
"name": "Mako",
"bytes": "1023"
},
{
"name": "Mask",
"bytes": "855"
},
{
"name": "Mathematica",
"bytes": "355"
},
{
"name": "Meson",
"bytes": "965"
},
{
"name": "Modula-2",
"bytes": "23838"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Motoko",
"bytes": "6213"
},
{
"name": "Myghty",
"bytes": "3939"
},
{
"name": "NCL",
"bytes": "473"
},
{
"name": "NSIS",
"bytes": "7663"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "NewLisp",
"bytes": "42726"
},
{
"name": "Nim",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "3385"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "318"
},
{
"name": "PHP",
"bytes": "17903"
},
{
"name": "POV-Ray SDL",
"bytes": "1455"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "84520"
},
{
"name": "Pawn",
"bytes": "6555"
},
{
"name": "Perl",
"bytes": "47698"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "Pony",
"bytes": "247"
},
{
"name": "PostScript",
"bytes": "8648"
},
{
"name": "PowerShell",
"bytes": "6127"
},
{
"name": "Procfile",
"bytes": "128"
},
{
"name": "Prolog",
"bytes": "9661"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "4303263"
},
{
"name": "QML",
"bytes": "3912"
},
{
"name": "R",
"bytes": "4057"
},
{
"name": "REXX",
"bytes": "862"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Raku",
"bytes": "4692"
},
{
"name": "Reason",
"bytes": "1316"
},
{
"name": "Rebol",
"bytes": "1887"
},
{
"name": "Red",
"bytes": "10792"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Roff",
"bytes": "49294"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "23551"
},
{
"name": "SWIG",
"bytes": "6153"
},
{
"name": "SaltStack",
"bytes": "1040"
},
{
"name": "Scala",
"bytes": "11132"
},
{
"name": "Scaml",
"bytes": "166"
},
{
"name": "Scheme",
"bytes": "46229"
},
{
"name": "Scilab",
"bytes": "1639"
},
{
"name": "Shell",
"bytes": "132777"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Shen",
"bytes": "3134"
},
{
"name": "Sieve",
"bytes": "1205"
},
{
"name": "Singularity",
"bytes": "1070"
},
{
"name": "Slim",
"bytes": "679"
},
{
"name": "Smali",
"bytes": "832"
},
{
"name": "Smalltalk",
"bytes": "156665"
},
{
"name": "Smithy",
"bytes": "14242"
},
{
"name": "Solidity",
"bytes": "1544"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Stan",
"bytes": "2547"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "SuperCollider",
"bytes": "1578"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "TSQL",
"bytes": "1576"
},
{
"name": "TeX",
"bytes": "2234"
},
{
"name": "Tea",
"bytes": "391"
},
{
"name": "Thrift",
"bytes": "167"
},
{
"name": "VBA",
"bytes": "846"
},
{
"name": "VBScript",
"bytes": "1199"
},
{
"name": "VCL",
"bytes": "4128"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "Vim Script",
"bytes": "16922"
},
{
"name": "Visual Basic .NET",
"bytes": "16364"
},
{
"name": "WebAssembly",
"bytes": "916"
},
{
"name": "Whiley",
"bytes": "9029"
},
{
"name": "X10",
"bytes": "198"
},
{
"name": "XQuery",
"bytes": "11179"
},
{
"name": "XSLT",
"bytes": "755"
},
{
"name": "Xtend",
"bytes": "741"
},
{
"name": "Zeek",
"bytes": "10863"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "Zig",
"bytes": "8703"
},
{
"name": "eC",
"bytes": "26388"
},
{
"name": "mcfunction",
"bytes": "6450"
},
{
"name": "mupad",
"bytes": "2443"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "q",
"bytes": "3349"
},
{
"name": "sed",
"bytes": "1614"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
}
|
<<<<<<< HEAD
<<<<<<< HEAD
import sys
from test import support, list_tests
import pickle
class ListTest(list_tests.CommonTest):
type2test = list
def test_basic(self):
self.assertEqual(list([]), [])
l0_3 = [0, 1, 2, 3]
l0_3_bis = list(l0_3)
self.assertEqual(l0_3, l0_3_bis)
self.assertTrue(l0_3 is not l0_3_bis)
self.assertEqual(list(()), [])
self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3])
self.assertEqual(list(''), [])
self.assertEqual(list('spam'), ['s', 'p', 'a', 'm'])
if sys.maxsize == 0x7fffffff:
# This test can currently only work on 32-bit machines.
# XXX If/when PySequence_Length() returns a ssize_t, it should be
# XXX re-enabled.
# Verify clearing of bug #556025.
# This assumes that the max data size (sys.maxint) == max
# address size this also assumes that the address size is at
# least 4 bytes with 8 byte addresses, the bug is not well
# tested
#
# Note: This test is expected to SEGV under Cygwin 1.3.12 or
# earlier due to a newlib bug. See the following mailing list
# thread for the details:
# http://sources.redhat.com/ml/newlib/2002/msg00369.html
self.assertRaises(MemoryError, list, range(sys.maxsize // 2))
# This code used to segfault in Py2.4a3
x = []
x.extend(-y for y in x)
self.assertEqual(x, [])
def test_truth(self):
super().test_truth()
self.assertTrue(not [])
self.assertTrue([42])
def test_identity(self):
self.assertTrue([] is not [])
def test_len(self):
super().test_len()
self.assertEqual(len([]), 0)
self.assertEqual(len([0]), 1)
self.assertEqual(len([0, 1, 2]), 3)
def test_overflow(self):
lst = [4, 5, 6, 7]
n = int((sys.maxsize*2+2) // len(lst))
def mul(a, b): return a * b
def imul(a, b): a *= b
self.assertRaises((MemoryError, OverflowError), mul, lst, n)
self.assertRaises((MemoryError, OverflowError), imul, lst, n)
def test_repr_large(self):
# Check the repr of large list objects
def check(n):
l = [0] * n
s = repr(l)
self.assertEqual(s,
'[' + ', '.join(['0'] * n) + ']')
check(10) # check our checking code
check(1000000)
def test_iterator_pickle(self):
# Userlist iterators don't support pickling yet since
# they are based on generators.
data = self.type2test([4, 5, 6, 7])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = itorg = iter(data)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(self.type2test(it), self.type2test(data))
it = pickle.loads(d)
next(it)
d = pickle.dumps(it, proto)
self.assertEqual(self.type2test(it), self.type2test(data)[1:])
def test_reversed_pickle(self):
data = self.type2test([4, 5, 6, 7])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = itorg = reversed(data)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(self.type2test(it), self.type2test(reversed(data)))
it = pickle.loads(d)
next(it)
d = pickle.dumps(it, proto)
self.assertEqual(self.type2test(it), self.type2test(reversed(data))[1:])
def test_no_comdat_folding(self):
# Issue 8847: In the PGO build, the MSVC linker's COMDAT folding
# optimization causes failures in code that relies on distinct
# function addresses.
class L(list): pass
with self.assertRaises(TypeError):
(3,) + L([1,2])
def test_main(verbose=None):
support.run_unittest(ListTest)
# verify reference counting
import sys
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(ListTest)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
=======
import sys
from test import support, list_tests
import pickle
class ListTest(list_tests.CommonTest):
type2test = list
def test_basic(self):
self.assertEqual(list([]), [])
l0_3 = [0, 1, 2, 3]
l0_3_bis = list(l0_3)
self.assertEqual(l0_3, l0_3_bis)
self.assertTrue(l0_3 is not l0_3_bis)
self.assertEqual(list(()), [])
self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3])
self.assertEqual(list(''), [])
self.assertEqual(list('spam'), ['s', 'p', 'a', 'm'])
if sys.maxsize == 0x7fffffff:
# This test can currently only work on 32-bit machines.
# XXX If/when PySequence_Length() returns a ssize_t, it should be
# XXX re-enabled.
# Verify clearing of bug #556025.
# This assumes that the max data size (sys.maxint) == max
# address size this also assumes that the address size is at
# least 4 bytes with 8 byte addresses, the bug is not well
# tested
#
# Note: This test is expected to SEGV under Cygwin 1.3.12 or
# earlier due to a newlib bug. See the following mailing list
# thread for the details:
# http://sources.redhat.com/ml/newlib/2002/msg00369.html
self.assertRaises(MemoryError, list, range(sys.maxsize // 2))
# This code used to segfault in Py2.4a3
x = []
x.extend(-y for y in x)
self.assertEqual(x, [])
def test_truth(self):
super().test_truth()
self.assertTrue(not [])
self.assertTrue([42])
def test_identity(self):
self.assertTrue([] is not [])
def test_len(self):
super().test_len()
self.assertEqual(len([]), 0)
self.assertEqual(len([0]), 1)
self.assertEqual(len([0, 1, 2]), 3)
def test_overflow(self):
lst = [4, 5, 6, 7]
n = int((sys.maxsize*2+2) // len(lst))
def mul(a, b): return a * b
def imul(a, b): a *= b
self.assertRaises((MemoryError, OverflowError), mul, lst, n)
self.assertRaises((MemoryError, OverflowError), imul, lst, n)
def test_repr_large(self):
# Check the repr of large list objects
def check(n):
l = [0] * n
s = repr(l)
self.assertEqual(s,
'[' + ', '.join(['0'] * n) + ']')
check(10) # check our checking code
check(1000000)
def test_iterator_pickle(self):
# Userlist iterators don't support pickling yet since
# they are based on generators.
data = self.type2test([4, 5, 6, 7])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = itorg = iter(data)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(self.type2test(it), self.type2test(data))
it = pickle.loads(d)
next(it)
d = pickle.dumps(it, proto)
self.assertEqual(self.type2test(it), self.type2test(data)[1:])
def test_reversed_pickle(self):
data = self.type2test([4, 5, 6, 7])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = itorg = reversed(data)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(self.type2test(it), self.type2test(reversed(data)))
it = pickle.loads(d)
next(it)
d = pickle.dumps(it, proto)
self.assertEqual(self.type2test(it), self.type2test(reversed(data))[1:])
def test_no_comdat_folding(self):
# Issue 8847: In the PGO build, the MSVC linker's COMDAT folding
# optimization causes failures in code that relies on distinct
# function addresses.
class L(list): pass
with self.assertRaises(TypeError):
(3,) + L([1,2])
def test_main(verbose=None):
support.run_unittest(ListTest)
# verify reference counting
import sys
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(ListTest)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import sys
from test import support, list_tests
import pickle
class ListTest(list_tests.CommonTest):
type2test = list
def test_basic(self):
self.assertEqual(list([]), [])
l0_3 = [0, 1, 2, 3]
l0_3_bis = list(l0_3)
self.assertEqual(l0_3, l0_3_bis)
self.assertTrue(l0_3 is not l0_3_bis)
self.assertEqual(list(()), [])
self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3])
self.assertEqual(list(''), [])
self.assertEqual(list('spam'), ['s', 'p', 'a', 'm'])
if sys.maxsize == 0x7fffffff:
# This test can currently only work on 32-bit machines.
# XXX If/when PySequence_Length() returns a ssize_t, it should be
# XXX re-enabled.
# Verify clearing of bug #556025.
# This assumes that the max data size (sys.maxint) == max
# address size this also assumes that the address size is at
# least 4 bytes with 8 byte addresses, the bug is not well
# tested
#
# Note: This test is expected to SEGV under Cygwin 1.3.12 or
# earlier due to a newlib bug. See the following mailing list
# thread for the details:
# http://sources.redhat.com/ml/newlib/2002/msg00369.html
self.assertRaises(MemoryError, list, range(sys.maxsize // 2))
# This code used to segfault in Py2.4a3
x = []
x.extend(-y for y in x)
self.assertEqual(x, [])
def test_truth(self):
super().test_truth()
self.assertTrue(not [])
self.assertTrue([42])
def test_identity(self):
self.assertTrue([] is not [])
def test_len(self):
super().test_len()
self.assertEqual(len([]), 0)
self.assertEqual(len([0]), 1)
self.assertEqual(len([0, 1, 2]), 3)
def test_overflow(self):
lst = [4, 5, 6, 7]
n = int((sys.maxsize*2+2) // len(lst))
def mul(a, b): return a * b
def imul(a, b): a *= b
self.assertRaises((MemoryError, OverflowError), mul, lst, n)
self.assertRaises((MemoryError, OverflowError), imul, lst, n)
def test_repr_large(self):
# Check the repr of large list objects
def check(n):
l = [0] * n
s = repr(l)
self.assertEqual(s,
'[' + ', '.join(['0'] * n) + ']')
check(10) # check our checking code
check(1000000)
def test_iterator_pickle(self):
# Userlist iterators don't support pickling yet since
# they are based on generators.
data = self.type2test([4, 5, 6, 7])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = itorg = iter(data)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(self.type2test(it), self.type2test(data))
it = pickle.loads(d)
next(it)
d = pickle.dumps(it, proto)
self.assertEqual(self.type2test(it), self.type2test(data)[1:])
def test_reversed_pickle(self):
data = self.type2test([4, 5, 6, 7])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = itorg = reversed(data)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(self.type2test(it), self.type2test(reversed(data)))
it = pickle.loads(d)
next(it)
d = pickle.dumps(it, proto)
self.assertEqual(self.type2test(it), self.type2test(reversed(data))[1:])
def test_no_comdat_folding(self):
# Issue 8847: In the PGO build, the MSVC linker's COMDAT folding
# optimization causes failures in code that relies on distinct
# function addresses.
class L(list): pass
with self.assertRaises(TypeError):
(3,) + L([1,2])
def test_main(verbose=None):
support.run_unittest(ListTest)
# verify reference counting
import sys
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(ListTest)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
{
"content_hash": "e7b3b5a4bbccdcb5531e7035840c44e4",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 84,
"avg_line_length": 34.74935400516796,
"alnum_prop": 0.5562165377751338,
"repo_name": "ArcherSys/ArcherSys",
"id": "6d4c5e6e0eb3b2471821c4ac8f4e2bc08bc4a789",
"size": "13448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/test/test_list.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
This module defines all of the Mixins that provide components of XBlock-family
functionality, such as ScopeStorage, RuntimeServices, and Handlers.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
import copy
import functools
import inspect
import logging
import warnings
import json
from lxml import etree
import six
from webob import Response
from xblock.exceptions import JsonHandlerError, KeyValueMultiSaveError, XBlockSaveError, FieldDataDeprecationWarning
from xblock.fields import Field, Reference, Scope, ReferenceList
from xblock.internal import class_lazy, NamedAttributesMetaclass
# OrderedDict is used so that namespace attributes are put in predictable order
# This allows for simple string equality assertions in tests and have no other effects
XML_NAMESPACES = OrderedDict([
("option", "http://code.edx.org/xblock/option"),
("block", "http://code.edx.org/xblock/block"),
])
class HandlersMixin(object):
"""
A mixin provides all of the machinery needed for working with XBlock-style handlers.
"""
@classmethod
def json_handler(cls, func):
"""
Wrap a handler to consume and produce JSON.
Rather than a Request object, the method will now be passed the
JSON-decoded body of the request. The request should be a POST request
in order to use this method. Any data returned by the function
will be JSON-encoded and returned as the response.
The wrapped function can raise JsonHandlerError to return an error
response with a non-200 status code.
This decorator will return a 405 HTTP status code if the method is not
POST.
This decorator will return a 400 status code if the body contains
invalid JSON.
"""
@cls.handler
@functools.wraps(func)
def wrapper(self, request, suffix=''):
"""The wrapper function `json_handler` returns."""
if request.method != "POST":
return JsonHandlerError(405, "Method must be POST").get_response(allow=["POST"])
try:
request_json = json.loads(request.body)
except ValueError:
return JsonHandlerError(400, "Invalid JSON").get_response()
try:
response = func(self, request_json, suffix)
except JsonHandlerError as err:
return err.get_response()
if isinstance(response, Response):
return response
else:
return Response(json.dumps(response), content_type='application/json', charset='utf8')
return wrapper
@classmethod
def handler(cls, func):
"""
A decorator to indicate a function is usable as a handler.
The wrapped function must return a `webob.Response` object.
"""
func._is_xblock_handler = True # pylint: disable=protected-access
return func
def handle(self, handler_name, request, suffix=''):
"""Handle `request` with this block's runtime."""
return self.runtime.handle(self, handler_name, request, suffix)
class RuntimeServicesMixin(object):
"""
This mixin provides all of the machinery needed for an XBlock-style object
to declare dependencies on particular runtime services.
"""
@class_lazy
def _services_requested(cls): # pylint: disable=no-self-argument
"""A per-class dictionary to store the services requested by a particular XBlock."""
return {}
@class_lazy
def _combined_services(cls): # pylint: disable=no-self-argument
"""
A dictionary that collects all _services_requested by all ancestors of this XBlock class.
"""
# The class declares what services it desires. To deal with subclasses,
# especially mixins, properly, we have to walk up the inheritance
# hierarchy, and combine all the declared services into one dictionary.
combined = {}
for parent in reversed(cls.mro()):
combined.update(getattr(parent, "_services_requested", {}))
return combined
def __init__(self, runtime, **kwargs):
"""
Arguments:
runtime (:class:`.Runtime`): Use it to access the environment.
It is available in XBlock code as ``self.runtime``.
"""
self.runtime = runtime
super(RuntimeServicesMixin, self).__init__(**kwargs)
@classmethod
def needs(cls, *service_names):
"""A class decorator to indicate that an XBlock class needs particular services."""
def _decorator(cls_): # pylint: disable=missing-docstring
for service_name in service_names:
cls_._services_requested[service_name] = "need" # pylint: disable=protected-access
return cls_
return _decorator
@classmethod
def wants(cls, *service_names):
"""A class decorator to indicate that an XBlock class wants particular services."""
def _decorator(cls_): # pylint: disable=missing-docstring
for service_name in service_names:
cls_._services_requested[service_name] = "want" # pylint: disable=protected-access
return cls_
return _decorator
@classmethod
def service_declaration(cls, service_name):
"""
Find and return a service declaration.
XBlocks declare their service requirements with `@XBlock.needs` and
`@XBlock.wants` decorators. These store information on the class.
This function finds those declarations for a block.
Arguments:
service_name (string): the name of the service requested.
Returns:
One of "need", "want", or None.
"""
return cls._combined_services.get(service_name) # pylint: disable=no-member
@RuntimeServicesMixin.needs('field-data')
class ScopedStorageMixin(six.with_metaclass(NamedAttributesMetaclass, RuntimeServicesMixin)):
"""
This mixin provides scope for Fields and the associated Scoped storage.
"""
@class_lazy
def fields(cls): # pylint: disable=no-self-argument
"""
A dictionary mapping the attribute name to the Field object for all
Field attributes of the class.
"""
fields = {}
# Loop through all of the baseclasses of cls, in
# the order that methods are resolved (Method Resolution Order / mro)
# and find all of their defined fields.
#
# Only save the first such defined field (as expected for method resolution)
bases = cls.mro()
local = bases.pop(0)
# First, descend the MRO from the top down, updating the 'fields' dictionary
# so that the dictionary always has the most specific version of fields in it
for base in reversed(bases):
fields.update(getattr(base, 'fields', {}))
# For this class, loop through all attributes not named 'fields',
# find those of type Field, and save them to the 'fields' dict
for attr_name, attr_value in inspect.getmembers(local, lambda attr: isinstance(attr, Field)):
fields[attr_name] = attr_value
return fields
def __init__(self, scope_ids, field_data=None, **kwargs):
"""
Arguments:
field_data (:class:`.FieldData`): Interface used by the XBlock
fields to access their data from wherever it is persisted.
scope_ids (:class:`.ScopeIds`): Identifiers needed to resolve
scopes.
"""
# This is used to store a directly passed field data
# for backwards compatibility
if field_data:
warnings.warn(
"Setting _field_data via the constructor is deprecated, please use a Runtime service",
FieldDataDeprecationWarning,
stacklevel=2
)
# Storing _field_data instead of _deprecated_per_instance_field_data allows subclasses to
# continue to override this behavior (for instance, the way that edx-platform's XModule does
# in order to proxy to XBlock).
self._field_data = field_data
else:
self._deprecated_per_instance_field_data = None # pylint: disable=invalid-name
self._field_data_cache = {}
self._dirty_fields = {}
self.scope_ids = scope_ids
super(ScopedStorageMixin, self).__init__(**kwargs)
@property
def _field_data(self):
"""
Return the FieldData for this XBlock (either as passed in the constructor
or from retrieving the 'field-data' service).
"""
if self._deprecated_per_instance_field_data:
return self._deprecated_per_instance_field_data
else:
return self.runtime.service(self, 'field-data')
@_field_data.setter
def _field_data(self, field_data):
"""
Set _field_data.
Deprecated.
"""
warnings.warn("Setting _field_data is deprecated", FieldDataDeprecationWarning, stacklevel=2)
self._deprecated_per_instance_field_data = field_data
def save(self):
"""Save all dirty fields attached to this XBlock."""
if not self._dirty_fields:
# nop if _dirty_fields attribute is empty
return
fields_to_save = self._get_fields_to_save()
if fields_to_save:
self.force_save_fields(fields_to_save)
def force_save_fields(self, field_names):
"""
Save all fields that are specified in `field_names`, even
if they are not dirty.
"""
fields = [self.fields[field_name] for field_name in field_names]
fields_to_save_json = {}
for field in fields:
fields_to_save_json[field.name] = field.to_json(self._field_data_cache[field.name])
try:
# Throws KeyValueMultiSaveError if things go wrong
self._field_data.set_many(self, fields_to_save_json)
except KeyValueMultiSaveError as save_error:
saved_fields = [field for field in fields if field.name in save_error.saved_field_names]
for field in saved_fields:
# should only find one corresponding field
fields.remove(field)
# if the field was dirty, delete from dirty fields
self._reset_dirty_field(field)
msg = 'Error saving fields {}'.format(save_error.saved_field_names)
raise XBlockSaveError(saved_fields, fields, msg)
# Remove all dirty fields, since the save was successful
for field in fields:
self._reset_dirty_field(field)
def _get_fields_to_save(self):
"""
Get an xblock's dirty fields.
"""
# If the field value isn't the same as the baseline we recorded
# when it was read, then save it
# pylint: disable=protected-access
return [field.name for field in self._dirty_fields if field._is_dirty(self)]
def _clear_dirty_fields(self):
"""
Remove all dirty fields from an XBlock.
"""
self._dirty_fields.clear()
def _reset_dirty_field(self, field):
"""
Resets dirty field value with the value from the field data cache.
"""
if field in self._dirty_fields:
self._dirty_fields[field] = copy.deepcopy(
self._field_data_cache[field.name]
)
def __repr__(self):
# `ScopedStorageMixin` obtains the `fields` attribute from the `ModelMetaclass`.
# Since this is not understood by static analysis, silence this error.
# pylint: disable=E1101
attrs = []
for field in six.itervalues(self.fields):
try:
value = getattr(self, field.name)
except Exception: # pylint: disable=broad-except
# Ensure we return a string, even if unanticipated exceptions.
attrs.append(" %s=???" % (field.name,))
else:
if isinstance(value, six.binary_type):
value = value.decode('utf-8', errors='escape')
if isinstance(value, six.text_type):
value = value.strip()
if len(value) > 40:
value = value[:37] + "..."
attrs.append(" %s=%r" % (field.name, value))
return "<%s @%04X%s>" % (
self.__class__.__name__,
id(self) % 0xFFFF,
','.join(attrs)
)
class ChildrenModelMetaclass(ScopedStorageMixin.__class__):
"""
A metaclass that transforms the attribute `has_children = True` into a List
field with a children scope.
"""
def __new__(mcs, name, bases, attrs):
if (attrs.get('has_children', False) or
any(getattr(base, 'has_children', False) for base in bases)):
attrs['children'] = ReferenceList(
help='The ids of the children of this XBlock',
scope=Scope.children)
else:
attrs['has_children'] = False
return super(ChildrenModelMetaclass, mcs).__new__(mcs, name, bases, attrs)
class HierarchyMixin(six.with_metaclass(ChildrenModelMetaclass, ScopedStorageMixin)):
"""
This adds Fields for parents and children.
"""
parent = Reference(help='The id of the parent of this XBlock', default=None, scope=Scope.parent)
def __init__(self, **kwargs):
# A cache of the parent block, retrieved from .parent
self._parent_block = None
self._parent_block_id = None
self._child_cache = {}
for_parent = kwargs.pop('for_parent', None)
if for_parent is not None:
self._parent_block = for_parent
self._parent_block_id = for_parent.scope_ids.usage_id
super(HierarchyMixin, self).__init__(**kwargs)
def get_parent(self):
"""Return the parent block of this block, or None if there isn't one."""
if not self.has_cached_parent:
if self.parent is not None:
self._parent_block = self.runtime.get_block(self.parent)
else:
self._parent_block = None
self._parent_block_id = self.parent
return self._parent_block
@property
def has_cached_parent(self):
"""Return whether this block has a cached parent block."""
return self.parent is not None and self._parent_block_id == self.parent
def get_child(self, usage_id):
"""Return the child identified by ``usage_id``."""
if usage_id in self._child_cache:
return self._child_cache[usage_id]
child_block = self.runtime.get_block(usage_id, for_parent=self)
self._child_cache[usage_id] = child_block
return child_block
def get_children(self, usage_id_filter=None):
"""
Return instantiated XBlocks for each of this blocks ``children``.
"""
if not self.has_children:
return []
return [
self.get_child(usage_id)
for usage_id in self.children
if usage_id_filter is None or usage_id_filter(usage_id)
]
def clear_child_cache(self):
"""
Reset the cache of children stored on this XBlock.
"""
self._child_cache.clear()
def add_children_to_node(self, node):
"""
Add children to etree.Element `node`.
"""
if self.has_children:
for child_id in self.children:
child = self.runtime.get_block(child_id)
self.runtime.add_block_as_child_node(child, node)
class XmlSerializationMixin(ScopedStorageMixin):
"""
A mixin that provides XML serialization and deserialization on top of ScopedStorage.
"""
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator):
"""
Use `node` to construct a new block.
Arguments:
node (etree.Element): The xml node to parse into an xblock.
runtime (:class:`.Runtime`): The runtime to use while parsing.
keys (:class:`.ScopeIds`): The keys identifying where this block
will store its data.
id_generator (:class:`.IdGenerator`): An object that will allow the
runtime to generate correct definition and usage ids for
children of this block.
"""
block = runtime.construct_xblock_from_class(cls, keys)
# The base implementation: child nodes become child blocks.
# Or fields, if they belong to the right namespace.
for child in node:
if child.tag is etree.Comment:
continue
qname = etree.QName(child)
tag = qname.localname
namespace = qname.namespace
if namespace == XML_NAMESPACES["option"]:
cls._set_field_if_present(block, tag, child.text, child.attrib)
else:
block.runtime.add_node_as_child(block, child, id_generator)
# Attributes become fields.
for name, value in node.items(): # lxml has no iteritems
cls._set_field_if_present(block, name, value, {})
# Text content becomes "content", if such a field exists.
if "content" in block.fields and block.fields["content"].scope == Scope.content:
text = node.text
if text:
text = text.strip()
if text:
block.content = text
return block
def add_xml_to_node(self, node):
"""
For exporting, set data on `node` from ourselves.
"""
# pylint: disable=E1101
# Set node.tag based on our class name.
node.tag = self.xml_element_name()
node.set('xblock-family', self.entry_point)
# Set node attributes based on our fields.
for field_name, field in self.fields.items():
if field_name in ('children', 'parent', 'content'):
continue
if field.is_set_on(self) or field.force_export:
self._add_field(node, field_name, field)
# A content field becomes text content.
text = self.xml_text_content()
if text is not None:
node.text = text
def xml_element_name(self):
"""What XML element name should be used for this block?"""
return self.scope_ids.block_type
def xml_text_content(self):
"""What is the text content for this block's XML node?"""
# pylint: disable=E1101
if 'content' in self.fields and self.content:
return self.content
else:
return None
@classmethod
def _set_field_if_present(cls, block, name, value, attrs):
"""Sets the field block.name, if block have such a field."""
if name in block.fields:
value = (block.fields[name]).from_string(value)
if "none" in attrs and attrs["none"] == "true":
setattr(block, name, None)
else:
setattr(block, name, value)
else:
logging.warning("XBlock %s does not contain field %s", type(block), name)
def _add_field(self, node, field_name, field):
"""
Add xml representation of field to node.
Depending on settings, it either stores the value of field
as an xml attribute or creates a separate child node.
"""
value = field.to_string(field.read_from(self))
text_value = "" if value is None else value
# Is the field type supposed to serialize the fact that the value is None to XML?
save_none_as_xml_attr = field.none_to_xml and value is None
field_attrs = {"none": "true"} if save_none_as_xml_attr else {}
if save_none_as_xml_attr or field.xml_node:
# Field will be output to XML as an separate element.
tag = etree.QName(XML_NAMESPACES["option"], field_name)
elem = etree.SubElement(node, tag, field_attrs)
if field.xml_node:
# Only set the value if forced via xml_node;
# in all other cases, the value is None.
# Avoids an unnecessary XML end tag.
elem.text = text_value
else:
# Field will be output to XML as an attribute on the node.
node.set(field_name, text_value)
class IndexInfoMixin(object):
"""
This mixin provides interface for classes that wish to provide index
information which might be used within a search index
"""
def index_dictionary(self):
"""
return key/value fields to feed an index within in a Python dict object
values may be numeric / string or dict
default implementation is an empty dict
"""
return {}
class ViewsMixin(object):
"""
This mixin provides decorators that can be used on xBlock view methods.
"""
@classmethod
def supports(cls, *functionalities):
"""
A view decorator to indicate that an xBlock view has support for the
given functionalities.
Arguments:
functionalities: String identifiers for the functionalities of the view.
For example: "multi_device".
"""
def _decorator(view):
"""
Internal decorator that updates the given view's list of supported
functionalities.
"""
# pylint: disable=protected-access
if not hasattr(view, "_supports"):
view._supports = set()
for functionality in functionalities:
view._supports.add(functionality)
return view
return _decorator
def has_support(self, view, functionality):
"""
Returns whether the given view has support for the given functionality.
An XBlock view declares support for a functionality with the
@XBlock.supports decorator. The decorator stores information on the view.
Note: We implement this as an instance method to allow xBlocks to
override it, if necessary.
Arguments:
view (object): The view of the xBlock.
functionality (string): A functionality of the view.
For example: "multi_device".
Returns:
True or False
"""
return hasattr(view, "_supports") and functionality in view._supports # pylint: disable=protected-access
|
{
"content_hash": "07e7cb0fd67a29db85a30967ac439ce4",
"timestamp": "",
"source": "github",
"line_count": 612,
"max_line_length": 116,
"avg_line_length": 37.09803921568628,
"alnum_prop": 0.6037262156448203,
"repo_name": "mitodl/XBlock",
"id": "ac5c5b9a0bfe0a99e641e818a6f5078714b9ce2b",
"size": "22704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xblock/mixins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "168"
},
{
"name": "Python",
"bytes": "374921"
},
{
"name": "Shell",
"bytes": "1458"
}
],
"symlink_target": ""
}
|
import vision_configuration as vc
import numpy as np
np.random.seed(10000)
lamina = vc.Lamina(24, 32, 'neuron_types_lamina.csv', 'synapse_lamina.csv', None)
lamina.create_cartridges()
lamina.connect_cartridges()
lamina.create_non_columnar_neurons()
lamina.connect_composition_II()
lamina.connect_composition_I()
lamina.add_selectors()
lamina.export_to_gexf('lamina.gexf.gz')
|
{
"content_hash": "23a1f6695bc48bdc4eee03d890ae116a",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 81,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.7792553191489362,
"repo_name": "cerrno/neurokernel",
"id": "548852b2ea8f15047359475c9362f90d569427b9",
"size": "376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/lamina/data/generate_lamina_gexf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cuda",
"bytes": "16086"
},
{
"name": "Makefile",
"bytes": "298"
},
{
"name": "Python",
"bytes": "1635553"
}
],
"symlink_target": ""
}
|
import dateutil # type: ignore
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import pandas as pd
from bs4 import BeautifulSoup
from sklearn.feature_extraction.text import CountVectorizer
from numpy import dot
from numpy.linalg import norm
from email.utils import parseaddr
import tldextract
from urllib.parse import urlparse
import re
no_fetch_extract = tldextract.TLDExtract(suffix_list_urls=None)
pd.options.mode.chained_assignment = None # default='warn'
SIMILARITY_THRESHOLD = float(demisto.args().get('threshold', 0.97))
CLOSE_TO_SIMILAR_DISTANCE = 0.2
EMAIL_BODY_FIELD = 'emailbody'
EMAIL_SUBJECT_FIELD = 'emailsubject'
EMAIL_HTML_FIELD = 'emailbodyhtml'
FROM_FIELD = 'emailfrom'
FROM_DOMAIN_FIELD = 'fromdomain'
PREPROCESSED_EMAIL_BODY = 'preprocessedemailbody'
PREPROCESSED_EMAIL_SUBJECT = 'preprocessedemailsubject'
MERGED_TEXT_FIELD = 'mereged_text'
MIN_TEXT_LENGTH = 50
DEFAULT_ARGS = {
'limit': '1000',
'incidentTypes': 'Phishing',
'existingIncidentsLookback': '100 days ago',
}
FROM_POLICY_TEXT_ONLY = 'TextOnly'
FROM_POLICY_EXACT = 'Exact'
FROM_POLICY_DOMAIN = 'Domain'
FROM_POLICY = FROM_POLICY_TEXT_ONLY
URL_REGEX = r'(?:(?:https?|ftp|hxxps?):\/\/|www\[?\.\]?|ftp\[?\.\]?)(?:[-\w\d]+\[?\.\]?)+[-\w\d]+(?::\d+)?' \
r'(?:(?:\/|\?)[-\w\d+&@#\/%=~_$?!\-:,.\(\);]*[\w\d+&@#\/%=~_$\(\);])?'
IGNORE_INCIDENT_TYPE_VALUE = 'None'
def get_existing_incidents(input_args, current_incident_type):
global DEFAULT_ARGS
get_incidents_args = {}
get_incidents_args['limit'] = input_args.get('limit', DEFAULT_ARGS['limit'])
if 'existingIncidentsLookback' in input_args:
get_incidents_args['fromDate'] = input_args['existingIncidentsLookback']
elif 'existingIncidentsLookback' in DEFAULT_ARGS:
get_incidents_args['fromDate'] = DEFAULT_ARGS['existingIncidentsLookback']
status_scope = input_args.get('statusScope', 'All')
query_components = []
if 'query' in input_args and input_args['query']:
query_components.append(input_args['query'])
if status_scope == 'ClosedOnly':
query_components.append('status:closed')
elif status_scope == 'NonClosedOnly':
query_components.append('-status:closed')
elif status_scope == 'All':
pass
else:
return_error('Unsupported statusScope: {}'.format(status_scope))
type_values = input_args.get('incidentTypes', current_incident_type)
if type_values != IGNORE_INCIDENT_TYPE_VALUE:
type_field = input_args.get('incidentTypeFieldName', 'type')
type_query = generate_incident_type_query_component(type_field, type_values)
query_components.append(type_query)
if len(query_components) > 0:
get_incidents_args['query'] = ' and '.join('({})'.format(c) for c in query_components)
fields = [EMAIL_BODY_FIELD, EMAIL_SUBJECT_FIELD, EMAIL_HTML_FIELD, FROM_FIELD, FROM_DOMAIN_FIELD, 'created', 'id',
'name', 'status', 'emailto', 'emailcc', 'emailbcc']
if 'populateFields' in input_args and input_args['populateFields'] is not None:
get_incidents_args['populateFields'] = ','.join([','.join(fields), input_args['populateFields']])
else:
get_incidents_args['populateFields'] = ','.join(fields)
incidents_query_res = demisto.executeCommand('GetIncidentsByQuery', get_incidents_args)
if is_error(incidents_query_res):
return_error(get_error(incidents_query_res))
incidents = json.loads(incidents_query_res[-1]['Contents'])
return incidents
def generate_incident_type_query_component(type_field_arg, type_values_arg):
type_field = type_field_arg.strip()
type_values = [x.strip() for x in type_values_arg.split(',')]
types_unions = ' '.join(f'"{t}"' for t in type_values)
return f'{type_field}:({types_unions})'
def extract_domain(address):
global no_fetch_extract
if address == '':
return ''
email_address = parseaddr(address)[1]
ext = no_fetch_extract(email_address)
return '{}.{}'.format(ext.domain, ext.suffix)
def get_text_from_html(html):
soup = BeautifulSoup(html)
# kill all script and style elements
for script in soup(["script", "style"]):
script.extract() # rip it out
# get text
text = soup.get_text()
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
text = '\n'.join(chunk for chunk in chunks if chunk)
return text
def eliminate_urls_extensions(text):
urls_list = re.findall(URL_REGEX, text)
if len(urls_list) == 0:
return text
formatted_urls_list_res = demisto.executeCommand('FormatURL', {'input': ','.join(urls_list)})
if is_error(formatted_urls_list_res):
return_error(formatted_urls_list_res)
formatted_urls_list = [entry["Contents"][-1] for entry in formatted_urls_list_res]
for url, formatted_url in zip(urls_list, formatted_urls_list):
parsed_uri = urlparse(formatted_url)
url_with_no_path = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
text = text.replace(url, url_with_no_path)
return text
def preprocess_email_body(incident):
email_body = email_html = ''
if EMAIL_BODY_FIELD in incident:
email_body = incident[EMAIL_BODY_FIELD]
if EMAIL_HTML_FIELD in incident:
email_html = incident[EMAIL_HTML_FIELD]
if isinstance(email_html, float):
email_html = ''
if email_body is None or isinstance(email_body, float) or email_body.strip() == '':
email_body = get_text_from_html(email_html)
return eliminate_urls_extensions(email_body)
def preprocess_email_subject(incident):
email_subject = ''
if EMAIL_SUBJECT_FIELD in incident:
email_subject = incident[EMAIL_SUBJECT_FIELD]
if isinstance(email_subject, float):
email_subject = ''
return eliminate_urls_extensions(email_subject)
def concatenate_subject_body(row):
return '{}\n{}'.format(row[PREPROCESSED_EMAIL_SUBJECT], row[PREPROCESSED_EMAIL_BODY])
def preprocess_incidents_df(existing_incidents):
global MERGED_TEXT_FIELD, FROM_FIELD, FROM_DOMAIN_FIELD
incidents_df = pd.DataFrame(existing_incidents)
if 'CustomFields' in incidents_df.columns:
incidents_df['CustomFields'] = incidents_df['CustomFields'].fillna(value={})
custom_fields_df = incidents_df['CustomFields'].apply(pd.Series)
unique_keys = [k for k in custom_fields_df if k not in incidents_df]
custom_fields_df = custom_fields_df[unique_keys]
incidents_df = pd.concat([incidents_df.drop('CustomFields', axis=1),
custom_fields_df], axis=1).reset_index()
incidents_df[PREPROCESSED_EMAIL_SUBJECT] = incidents_df.apply(lambda x: preprocess_email_subject(x), axis=1)
incidents_df[PREPROCESSED_EMAIL_BODY] = incidents_df.apply(lambda x: preprocess_email_body(x), axis=1)
incidents_df[MERGED_TEXT_FIELD] = incidents_df.apply(concatenate_subject_body, axis=1)
incidents_df = incidents_df[incidents_df[MERGED_TEXT_FIELD].str.len() >= MIN_TEXT_LENGTH]
incidents_df.reset_index(inplace=True)
if FROM_FIELD in incidents_df:
incidents_df[FROM_FIELD] = incidents_df[FROM_FIELD].fillna(value='')
else:
incidents_df[FROM_FIELD] = ''
incidents_df[FROM_FIELD] = incidents_df[FROM_FIELD].apply(lambda x: x.strip())
incidents_df[FROM_DOMAIN_FIELD] = incidents_df[FROM_FIELD].apply(lambda address: extract_domain(address))
incidents_df['created'] = incidents_df['created'].apply(lambda x: dateutil.parser.parse(x)) # type: ignore
return incidents_df
def incident_has_text_fields(incident):
text_fields = [EMAIL_SUBJECT_FIELD, EMAIL_HTML_FIELD, EMAIL_BODY_FIELD]
custom_fields = incident.get('CustomFields', []) or []
if any(field in incident for field in text_fields):
return True
elif 'CustomFields' in incident and any(field in custom_fields for field in text_fields):
return True
return False
def filter_out_same_incident(existing_incidents_df, new_incident):
same_id_mask = existing_incidents_df['id'] == new_incident['id']
existing_incidents_df = existing_incidents_df[~same_id_mask]
return existing_incidents_df
def filter_newer_incidents(existing_incidents_df, new_incident):
new_incident_datetime = dateutil.parser.parse(new_incident['created']) # type: ignore
earlier_incidents_mask = existing_incidents_df['created'] < new_incident_datetime
return existing_incidents_df[earlier_incidents_mask]
def vectorize(text, vectorizer):
return vectorizer.transform([text]).toarray()[0]
def cosine_sim(a, b):
return dot(a, b) / (norm(a) * norm(b))
def find_duplicate_incidents(new_incident, existing_incidents_df, max_incidents_to_return):
global MERGED_TEXT_FIELD, FROM_POLICY
new_incident_text = new_incident[MERGED_TEXT_FIELD]
text = [new_incident_text] + existing_incidents_df[MERGED_TEXT_FIELD].tolist()
vectorizer = CountVectorizer(token_pattern=r"(?u)\b\w\w+\b|!|\?|\"|\'").fit(text)
new_incident_vector = vectorize(new_incident_text, vectorizer)
existing_incidents_df['vector'] = existing_incidents_df[MERGED_TEXT_FIELD].apply(lambda x: vectorize(x, vectorizer))
existing_incidents_df['similarity'] = existing_incidents_df['vector'].apply(
lambda x: cosine_sim(x, new_incident_vector))
if FROM_POLICY == FROM_POLICY_DOMAIN:
mask = (existing_incidents_df[FROM_DOMAIN_FIELD] != '') & \
(existing_incidents_df[FROM_DOMAIN_FIELD] == new_incident[FROM_DOMAIN_FIELD])
existing_incidents_df = existing_incidents_df[mask]
elif FROM_POLICY == FROM_POLICY_EXACT:
mask = (existing_incidents_df[FROM_FIELD] != '') & \
(existing_incidents_df[FROM_FIELD] == new_incident[FROM_FIELD])
existing_incidents_df = existing_incidents_df[mask]
existing_incidents_df['distance'] = existing_incidents_df['similarity'].apply(lambda x: 1 - x)
tie_breaker_col = 'id'
try:
existing_incidents_df['int_id'] = existing_incidents_df['id'].astype(int)
tie_breaker_col = 'int_id'
except Exception:
pass
existing_incidents_df.sort_values(by=['distance', 'created', tie_breaker_col], inplace=True)
return existing_incidents_df.head(max_incidents_to_return)
def return_entry(message, duplicate_incidents_df=None, new_incident=None):
if duplicate_incidents_df is None:
duplicate_incident = {}
all_duplicate_incidents = []
full_incidents = []
else:
most_similar_incident = duplicate_incidents_df.iloc[0]
duplicate_incident = format_incident_context(most_similar_incident)
all_duplicate_incidents = [format_incident_context(row) for _, row in duplicate_incidents_df.iterrows()]
new_incident['created'] = new_incident['created'].astype(str)
duplicate_incidents_df['created'] = duplicate_incidents_df['created'].astype(str)
duplicate_incidents_df.drop('vector', axis=1, inplace=True)
full_incidents = new_incident.to_dict(orient='records') + duplicate_incidents_df.to_dict(orient='records')
outputs = {
'duplicateIncident': duplicate_incident,
'isDuplicateIncidentFound': duplicate_incidents_df is not None,
'allDuplicateIncidents': all_duplicate_incidents
}
return_outputs(message, outputs, raw_response=json.dumps(full_incidents))
def format_incident_context(df_row):
duplicate_incident = {
'rawId': df_row['id'],
'id': df_row['id'],
'name': df_row.get('name'),
'similarity': df_row.get('similarity'),
}
return duplicate_incident
def close_new_incident_and_link_to_existing(new_incident, duplicate_incidents_df):
mask = duplicate_incidents_df['similarity'] >= SIMILARITY_THRESHOLD
duplicate_incidents_df = duplicate_incidents_df[mask]
most_similar_incident = duplicate_incidents_df.iloc[0]
max_similarity = duplicate_incidents_df.iloc[0]['similarity']
min_similarity = duplicate_incidents_df.iloc[-1]['similarity']
formatted_incident, headers = format_incident_hr(duplicate_incidents_df)
incident = 'incidents' if len(duplicate_incidents_df) > 1 else 'incident'
if max_similarity > min_similarity:
title = "Duplicate {} found with similarity {:.1f}%-{:.1f}%".format(incident, min_similarity * 100,
max_similarity * 100)
else:
title = "Duplicate {} found with similarity {:.1f}%".format(incident, max_similarity * 100)
message = tableToMarkdown(title,
formatted_incident, headers)
if demisto.args().get('closeAsDuplicate', 'true') == 'true':
res = demisto.executeCommand("CloseInvestigationAsDuplicate", {
'duplicateId': most_similar_incident['id']})
if is_error(res):
return_error(res)
message += 'This incident (#{}) will be closed and linked to #{}.'.format(new_incident.iloc[0]['id'],
most_similar_incident['id'])
return_entry(message, duplicate_incidents_df, new_incident)
def create_new_incident():
return_entry('This incident is not a duplicate of an existing incident.')
def format_incident_hr(duplicate_incidents_df):
incidents_list = duplicate_incidents_df.to_dict('records')
json_lists = []
status_map = {'0': 'Pending', '1': 'Active', '2': 'Closed', '3': 'Archive'}
for incident in incidents_list:
json_lists.append({'Id': "[%s](#/Details/%s)" % (incident['id'], incident['id']),
'Name': incident['name'],
'Status': status_map[str(incident.get('status'))],
'Time': str(incident['created']),
'Email From': incident.get(demisto.args().get(FROM_FIELD)),
'Text Similarity': "{:.1f}%".format(incident['similarity'] * 100),
})
headers = ['Id', 'Name', 'Status', 'Time', 'Email From', 'Text Similarity']
return json_lists, headers
def create_new_incident_low_similarity(duplicate_incidents_df):
message = '## This incident is not a duplicate of an existing incident.\n'
similarity = duplicate_incidents_df.iloc[0]['similarity']
if similarity > SIMILARITY_THRESHOLD - CLOSE_TO_SIMILAR_DISTANCE:
mask = duplicate_incidents_df['similarity'] >= SIMILARITY_THRESHOLD - CLOSE_TO_SIMILAR_DISTANCE
duplicate_incidents_df = duplicate_incidents_df[mask]
formatted_incident, headers = format_incident_hr(duplicate_incidents_df)
message += tableToMarkdown("Most similar incidents found", formatted_incident, headers=headers)
message += 'The threshold for considering 2 incidents as duplicate is a similarity ' \
'of {:.1f}%.\n'.format(SIMILARITY_THRESHOLD * 100)
message += 'Therefore these 2 incidents will not be considered as duplicate and the current incident ' \
'will remain active.\n'
return_entry(message)
def create_new_incident_no_text_fields():
text_fields = [EMAIL_BODY_FIELD, EMAIL_HTML_FIELD, EMAIL_SUBJECT_FIELD]
message = 'No text fields were found within this incident: {}.\n'.format(','.join(text_fields))
message += 'Incident will remain active.'
return_entry(message)
def create_new_incident_too_short():
return_entry('Incident text after preprocessing is too short for deduplication. Incident will remain active.')
def main():
global EMAIL_BODY_FIELD, EMAIL_SUBJECT_FIELD, EMAIL_HTML_FIELD, FROM_FIELD, MIN_TEXT_LENGTH, FROM_POLICY
input_args = demisto.args()
EMAIL_BODY_FIELD = input_args.get('emailBody', EMAIL_BODY_FIELD)
EMAIL_SUBJECT_FIELD = input_args.get('emailSubject', EMAIL_SUBJECT_FIELD)
EMAIL_HTML_FIELD = input_args.get('emailBodyHTML', EMAIL_HTML_FIELD)
FROM_FIELD = input_args.get('emailFrom', FROM_FIELD)
FROM_POLICY = input_args.get('fromPolicy', FROM_POLICY)
max_incidents_to_return = input_args.get('maxIncidentsToReturn', '20')
try:
max_incidents_to_return = int(max_incidents_to_return)
except Exception:
return_error('Illegal value of arguement "maxIncidentsToReturn": {}. '
'Value should be an integer'.format(max_incidents_to_return))
new_incident = demisto.incidents()[0]
type_field = input_args.get('incidentTypeFieldName', 'type')
existing_incidents = get_existing_incidents(input_args, new_incident.get(type_field, IGNORE_INCIDENT_TYPE_VALUE))
demisto.debug('found {} incidents by query'.format(len(existing_incidents)))
if len(existing_incidents) == 0:
create_new_incident()
return
if not incident_has_text_fields(new_incident):
create_new_incident_no_text_fields()
return
new_incident_df = preprocess_incidents_df([new_incident])
if len(new_incident_df) == 0: # len(new_incident_df)==0 means new incident is too short
create_new_incident_too_short()
return
existing_incidents_df = preprocess_incidents_df(existing_incidents)
existing_incidents_df = filter_out_same_incident(existing_incidents_df, new_incident)
existing_incidents_df = filter_newer_incidents(existing_incidents_df, new_incident)
if len(existing_incidents_df) == 0:
create_new_incident()
return
new_incident_preprocessed = new_incident_df.iloc[0].to_dict()
duplicate_incidents_df = find_duplicate_incidents(new_incident_preprocessed,
existing_incidents_df, max_incidents_to_return)
if len(duplicate_incidents_df) == 0:
create_new_incident()
return
if duplicate_incidents_df.iloc[0]['similarity'] < SIMILARITY_THRESHOLD:
create_new_incident_low_similarity(duplicate_incidents_df)
else:
return close_new_incident_and_link_to_existing(new_incident_df, duplicate_incidents_df)
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
|
{
"content_hash": "0ff74a96b7b6b77e023e25ab12fa81b6",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 120,
"avg_line_length": 46.46700507614213,
"alnum_prop": 0.6668669434127158,
"repo_name": "demisto/content",
"id": "51c992ff6df6fa788d482461745ca43ce11df5b9",
"size": "18308",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/Phishing/Scripts/FindDuplicateEmailIncidents/FindDuplicateEmailIncidents.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47881712"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2011, Michael Jospeh Walsh.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. All advertising materials mentioning features or use of this software
must display the following acknowledgement:
This product includes software developed by the author.
4. Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
'''
Created on Aug 18, 2011
@author: Michael Joseph Walsh
'''
import logging, time
import thread, random
from threading import Lock
from intellect.examples.bahBahBlackSheep.BagOfWool import BagOfWool
def grow_wool(sheep):
while True:
time.sleep(random.randint(2, 5))
logging.getLogger("example").debug("{0}: Grew a bag of wool.".format(sheep.name))
sheep.bags_of_wool.append(BagOfWool())
if len(sheep.bags_of_wool) == 3:
logging.getLogger("example").debug("{0}: Waiting around for retirement.".format(sheep.name))
break
class BlackSheep():
'''
Used to signify a black sheep
'''
number = 0
def __init__(self):
'''
BlackSheep Initializer
'''
self.bags_of_wool = []
BlackSheep.number = BlackSheep.number + 1
self.name = "Sheep #{0}".format(BlackSheep.number)
logging.getLogger("example").debug("Creating {0}.".format(self.name))
self.lock = Lock()
thread.start_new_thread(grow_wool, (self,))
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def bags_of_wool(self):
return self._bags_of_wool
@bags_of_wool.setter
def bags_of_wool(self, value):
self.lock.acquire()
self._bags_of_wool = value
self.lock.release()
if __name__ == '__main__':
sheep = BlackSheep()
while True:
time.sleep(5)
print len(sheep.bags_of_wool)
if len(sheep.bags_of_wool) == 3:
break
|
{
"content_hash": "c8a497f69562c0e9a515675cdd32919e",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 104,
"avg_line_length": 31.466019417475728,
"alnum_prop": 0.6957729095958037,
"repo_name": "r0k3/Intellect",
"id": "6ce5a66c091aff0a819b3b1da2c9b1f0bcaf23d7",
"size": "3241",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "intellect/examples/bahBahBlackSheep/BlackSheep.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "GAP",
"bytes": "23349"
},
{
"name": "Python",
"bytes": "479983"
}
],
"symlink_target": ""
}
|
import kol.Error as Error
from kol.database import ItemDatabase
from kol.manager import PatternManager
from kol.request.GenericRequest import GenericRequest
class WokRequest(GenericRequest):
def __init__(self, session, itemid1, numMake=1):
super(WokRequest, self).__init__(session)
self.url = session.serverURL + "guild.php"
self.requestData['pwd'] = session.pwd
self.requestData['action'] = 'wokcook'
self.requestData['quantity'] = numMake
self.requestData['whichitem'] = itemid1
def parseResponse(self):
noWokAccess = PatternManager.getOrCompilePattern('noWokAccess')
itemsDontMakeFoodPattern = PatternManager.getOrCompilePattern('dontHaveItemsForWok')
dontHaveSkillPattern = PatternManager.getOrCompilePattern('dontHaveSkillForWok')
dontHaveAdventuresPattern = PatternManager.getOrCompilePattern('dontHaveAdventuresForWok')
# Check for errors.
if noWokAccess.search(self.responseText):
raise Error.Error("Unable to use the Wok of Ages. I can't get to the Wok!", Error.RECIPE_NOT_FOUND)
elif dontHaveSkillPattern.search(self.responseText):
raise Error.Error("Unable to use the Wok of Ages. I am not skilled enough.", Error.SKILL_NOT_FOUND)
elif itemsDontMakeFoodPattern.search(self.responseText):
raise Error.Error("Unable to use the Wok of Ages. Invalid ingredients.", Error.ITEM_NOT_FOUND)
elif dontHaveAdventuresPattern.search(self.responseText):
raise Error.Error("Unable to use the Wok of Agles. I don't have enough adventures.", Error.NOT_ENOUGH_ADVENTURES)
# Find the items attached to the message.
singleItemPattern = PatternManager.getOrCompilePattern('acquireSingleItem')
match = singleItemPattern.search(self.responseText)
if match:
descId = int(match.group(1))
item = ItemDatabase.getOrDiscoverItemFromDescId(descId, self.session)
item["quantity"] = 1
else:
multiItemPattern = PatternManager.getOrCompilePattern('acquireMultipleItems')
match = multiItemPattern.search(self.responseText)
if match:
descId = int(match.group(1))
item = ItemDatabase.getOrDiscoverItemFromDescId(descId, self.session)
quantity = int(match.group(2).replace(',', ''))
item["quantity"] = quantity
else:
raise Error.Error("Unknown error.", Error.REQUEST_GENERIC)
self.responseData["wok"] = item
|
{
"content_hash": "45db082e1521daec640977b918d94a2b",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 125,
"avg_line_length": 51.54,
"alnum_prop": 0.6806363989134653,
"repo_name": "ijzer/cwbot-ndy",
"id": "9970f567cfda5f4631e24d56a5c66b9b41defa68",
"size": "2577",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kol/request/WokRequest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2986880"
}
],
"symlink_target": ""
}
|
import sys
"""
Extend the twx namespace
"""
if sys.version_info > (3, 1, 0):
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
{
"content_hash": "f7976f5f6394aca5706d4d5b36f9b96c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 46,
"avg_line_length": 21.125,
"alnum_prop": 0.5798816568047337,
"repo_name": "datamachine/twx.mtproto",
"id": "9fad3cc80fd96ad158e83dea16da8e6e37e5d4c9",
"size": "169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twx/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "128472"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.